repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
usc-isi/extra-specs | nova/api/openstack/compute/contrib/quotas.py | 1 | 3875 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import quota
authorize = extensions.extension_authorizer('compute', 'quotas')
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in quota.quota_resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(project_id))
for resource in quota.quota_resources:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, limit):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
sqlalchemy_api.authorize_project_context(context, id)
return self._format_quota_set(id,
quota.get_project_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
project_id = id
for key in body['quota_set'].keys():
if key in quota.quota_resources:
value = int(body['quota_set'][key])
self._validate_quota_limit(value)
try:
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
authorize(req.environ['nova.context'])
return self._format_quota_set(id, quota._get_default_quotas())
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support"""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
| apache-2.0 | -5,099,529,885,917,966,000 | 33.598214 | 79 | 0.634065 | false |
Conan-Kudo/createrepo_c | utils/setup_for_python_metadata.py | 1 | 1178 | from distutils.core import setup
import sys
# This is a simple and fragile way of passing the current version
# from cmake to setup as I assume no one else will use this.
#
# This script has to have the version always specified as last argument.
version = sys.argv.pop()
setup(
name='createrepo_c',
description='C implementation of createrepo',
version=version,
license='GPLv2+',
author='RPM Software Management',
author_email='rpm-ecosystem@lists.rpm.org',
url='https://github.com/rpm-software-management',
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: C',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| gpl-2.0 | 4,394,858,090,688,129,500 | 35.8125 | 85 | 0.639219 | false |
labordoc/labordoc-next | modules/miscutil/lib/plotextractor_regression_tests.py | 1 | 2132 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Regression tests for the plotextract script."""
__revision__ = "$Id$"
import os
from invenio.config import CFG_TMPDIR, CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
class GetDefaultsTest(InvenioTestCase):
"""Test function to get default values."""
def setUp(self):
self.arXiv_id = "arXiv:astro-ph_0104076"
self.tarball = "%s/2001/04/arXiv:astro-ph_0104076/arXiv:astro-ph_0104076" % (CFG_TMPDIR,)
def test_get_defaults(self):
"""plotextractor - get defaults"""
from invenio.shellutils import run_shell_command
from invenio.plotextractor import get_defaults
sdir_should_be = os.path.join(CFG_TMPDIR, self.arXiv_id + '_plots')
refno_should_be = "15" # Note: For ATLANTIS DEMO site
sdir, refno = get_defaults(tarball=self.tarball, sdir=None, refno_url=CFG_SITE_URL)
if sdir != None:
run_shell_command("rm -rf %s" % (sdir,))
self.assertTrue(sdir == sdir_should_be, \
"didn\'t get correct default scratch dir")
self.assertTrue(refno == refno_should_be, \
'didn\'t get correct default reference number')
TEST_SUITE = make_test_suite(GetDefaultsTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | 5,976,011,825,349,743,000 | 39.226415 | 97 | 0.675422 | false |
xorpaul/check_mk | modules/automation.py | 1 | 45601 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
class MKAutomationError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def do_automation(cmd, args):
try:
if cmd == "get-configuration":
read_config_files(with_conf_d=False)
result = automation_get_configuration()
elif cmd == "get-check-information":
result = automation_get_check_information()
elif cmd == "get-check-manpage":
result = automation_get_check_manpage(args)
elif cmd == "get-check-catalog":
result = automation_get_check_catalog(args)
elif cmd == "notification-get-bulks":
result = automation_get_bulks(args)
else:
read_config_files()
if cmd == "try-inventory":
result = automation_try_discovery(args)
elif cmd == "inventory":
result = automation_discovery(args)
elif cmd == "analyse-service":
result = automation_analyse_service(args)
elif cmd == "active-check":
result = automation_active_check(args)
elif cmd == "get-autochecks":
result = automation_get_autochecks(args)
elif cmd == "set-autochecks":
result = automation_set_autochecks(args)
elif cmd == "reload":
result = automation_restart("reload")
elif cmd == "restart":
result = automation_restart("restart")
elif cmd == "scan-parents":
result = automation_scan_parents(args)
elif cmd == "diag-host":
result = automation_diag_host(args)
elif cmd == "delete-host":
result = automation_delete_host(args)
elif cmd == "rename-host":
result = automation_rename_host(args)
elif cmd == "create-snapshot":
result = automation_create_snapshot(args)
elif cmd == "notification-replay":
result = automation_notification_replay(args)
elif cmd == "notification-analyse":
result = automation_notification_analyse(args)
elif cmd == "update-dns-cache":
result = automation_update_dns_cache()
elif cmd == "bake-agents":
result = automation_bake_agents()
else:
raise MKAutomationError("Automation command '%s' is not implemented." % cmd)
except MKAutomationError, e:
sys.stderr.write("%s\n" % e)
if opt_debug:
raise
output_profile()
sys.exit(1)
except Exception, e:
if opt_debug:
raise
else:
sys.stderr.write("%s\n" % e)
output_profile()
sys.exit(2)
if opt_debug:
import pprint
sys.stdout.write(pprint.pformat(result)+"\n")
else:
sys.stdout.write("%r\n" % (result,))
output_profile()
sys.exit(0)
# Does inventory for *one* host. Possible values for how:
# "new" - find only new services (like -I)
# "remove" - remove exceeding services
# "fixall" - find new, remove exceeding
# "refresh" - drop all services and reinventorize
def automation_discovery(args):
# Error sensivity
if args[0] == "@raiseerrors":
args = args[1:]
on_error = "raise"
os.dup2(os.open("/dev/null", os.O_WRONLY), 2)
else:
on_error = "ignore"
# perform full SNMP scan on SNMP devices?
if args[0] == "@scan":
do_snmp_scan = True
args = args[1:]
else:
do_snmp_scan = False
# use cache files if present?
if args[0] == "@cache":
args = args[1:]
use_caches = True
else:
use_caches = False
if len(args) < 2:
raise MKAutomationError("Need two arguments: new|remove|fixall|refresh HOSTNAME")
how = args[0]
hostnames = args[1:]
counts = {}
failed_hosts = {}
for hostname in hostnames:
counts.setdefault(hostname, [0, 0, 0, 0]) # added, removed, kept, total
try:
# in "refresh" mode we first need to remove all previously discovered
# checks of the host, so that get_host_services() does show us the
# new discovered check parameters.
if how == "refresh":
counts[hostname][1] += remove_autochecks_of(hostname) # this is cluster-aware!
# Compute current state of new and existing checks
services = get_host_services(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
# Create new list of checks
new_items = {}
for (check_type, item), (check_source, paramstring) in services.items():
if check_source in ("custom", "legacy", "active", "manual"):
continue # this is not an autocheck or ignored and currently not checked
# Note discovered checks that are shadowed by manual checks will vanish
# that way.
if check_source in ("new"):
if how in ("new", "fixall", "refresh"):
counts[hostname][0] += 1 # added
counts[hostname][3] += 1 # total
new_items[(check_type, item)] = paramstring
elif check_source in ("old", "ignored"):
# keep currently existing valid services in any case
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
elif check_source in ("obsolete", "vanished"):
# keep item, if we are currently only looking for new services
# otherwise fix it: remove ignored and non-longer existing services
if how not in ("fixall", "remove"):
new_items[(check_type, item)] = paramstring
counts[hostname][2] += 1 # kept
counts[hostname][3] += 1 # total
else:
counts[hostname][1] += 1 # removed
# Silently keep clustered services
elif check_source.startswith("clustered_"):
new_items[(check_type, item)] = paramstring
else:
raise MKGeneralException("Unknown check source '%s'" % check_source)
set_autochecks_of(hostname, new_items)
except Exception, e:
if opt_debug:
raise
failed_hosts[hostname] = str(e)
return counts, failed_hosts
def automation_try_discovery(args):
use_caches = False
do_snmp_scan = False
if args[0] == '@noscan':
args = args[1:]
do_snmp_scan = False
use_caches = True
elif args[0] == '@scan':
args = args[1:]
do_snmp_scan = True
use_caches = False
if args[0] == '@raiseerrors':
on_error = "raise"
args = args[1:]
else:
on_error = "ignore"
# TODO: Remove this unlucky option opt_use_cachefile. At least do not
# handle this option so deep in the code. It should only be handled
# by top-level functions.
global opt_use_cachefile, check_max_cachefile_age
opt_use_cachefile = use_caches
if use_caches:
check_max_cachefile_age = inventory_max_cachefile_age
hostname = args[0]
table = get_check_preview(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
return table
# Set the new list of autochecks. This list is specified by a
# table of (checktype, item). No parameters are specified. Those
# are either (1) kept from existing autochecks or (2) computed
# from a new inventory. Note: we must never convert check parameters
# from python source code to actual values.
def automation_set_autochecks(args):
hostname = args[0]
new_items = eval(sys.stdin.read())
set_autochecks_of(hostname, new_items)
def set_autochecks_of(hostname, new_items):
# A Cluster does not have an autochecks file
# All of its services are located in the nodes instead
# So we cycle through all nodes remove all clustered service
# and add the ones we've got from stdin
if is_cluster(hostname):
for node in nodes_of(hostname):
new_autochecks = []
existing = parse_autochecks_file(node)
for check_type, item, paramstring in existing:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(node, descr):
new_autochecks.append((check_type, item, paramstring))
for (check_type, item), paramstring in new_items.items():
new_autochecks.append((check_type, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(node, new_autochecks)
else:
existing = parse_autochecks_file(hostname)
# write new autochecks file, but take paramstrings from existing ones
# for those checks which are kept
new_autochecks = []
for ct, item, paramstring in existing:
if (ct, item) in new_items:
new_autochecks.append((ct, item, paramstring))
del new_items[(ct, item)]
for (ct, item), paramstring in new_items.items():
new_autochecks.append((ct, item, paramstring))
# write new autochecks file for that host
automation_write_autochecks_file(hostname, new_autochecks)
def automation_write_autochecks_file(hostname, table):
if not os.path.exists(autochecksdir):
os.makedirs(autochecksdir)
path = "%s/%s.mk" % (autochecksdir, hostname)
f = file(path, "w")
f.write("[\n")
for check_type, item, paramstring in table:
f.write(" (%r, %r, %s),\n" % (check_type, item, paramstring))
f.write("]\n")
if inventory_check_autotrigger and inventory_check_interval:
schedule_inventory_check(hostname)
def automation_get_autochecks(args):
hostname = args[0]
result = []
for ct, item, paramstring in parse_autochecks_file(hostname):
result.append((ct, item, eval(paramstring), paramstring))
return result
def schedule_inventory_check(hostname):
try:
import socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(livestatus_unix_socket)
now = int(time.time())
if 'cmk-inventory' in use_new_descriptions_for:
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK Discovery;%d" % (hostname, now)
else:
# FIXME: Remove this old name handling one day
command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK inventory;%d" % (hostname, now)
s.send("COMMAND [%d] %s\n" % (now, command))
except Exception, e:
if opt_debug:
raise
# Determine the type of the check, and how the parameters are being
# constructed
def automation_analyse_service(args):
global g_hostname
hostname = args[0]
servicedesc = args[1]
g_hostname = hostname # To be sure for all subfunctions
# We just consider types of checks that are managed via WATO.
# We have the following possible types of services:
# 1. manual checks (static_checks) (currently overriding inventorized checks)
# 2. inventorized check
# 3. classical checks
# 4. active checks
# Compute effective check table, in order to remove SNMP duplicates
check_table = get_check_table(hostname, remove_duplicates = True)
# 1. Manual checks
for nr, (checkgroup, entries) in enumerate(static_checks.items()):
for entry in entries:
entry, rule_options = get_rule_options(entry)
if rule_options.get("disabled"):
continue
# Parameters are optional
if len(entry[0]) == 2:
checktype, item = entry[0]
params = None
else:
checktype, item, params = entry[0]
if len(entry) == 3:
taglist, hostlist = entry[1:3]
else:
hostlist = entry[1]
taglist = []
if hosttags_match_taglist(tags_of_host(hostname), taglist) and \
in_extraconf_hostlist(hostlist, hostname):
descr = service_description(checktype, item)
if descr == servicedesc:
return {
"origin" : "static",
"checkgroup" : checkgroup,
"checktype" : checktype,
"item" : item,
"rule_nr" : nr,
"parameters" : params,
}
# 2. Load all autochecks of the host in question and try to find
# our service there
try:
path = "%s/%s.mk" % (autochecksdir, hostname)
for entry in eval(file(path).read()):
if len(entry) == 4: # old format
hn, ct, item, params = entry
else:
ct, item, params = entry # new format without host name
hn = hostname
if (ct, item) not in check_table:
continue # this is a removed duplicate or clustered service
descr = service_description(ct, item)
if hn == hostname and descr == servicedesc:
dlv = check_info[ct].get("default_levels_variable")
if dlv:
fs = factory_settings.get(dlv, None)
else:
fs = None
return {
"origin" : "auto",
"checktype" : ct,
"checkgroup" : check_info[ct].get("group"),
"item" : item,
"inv_parameters" : params,
"factory_settings" : fs,
"parameters" : compute_check_parameters(hostname, ct, item, params),
}
except:
if opt_debug:
raise
# 3. Classical checks
custchecks = host_extra_conf(hostname, custom_checks)
for nr, entry in enumerate(custchecks):
desc = entry["service_description"]
if desc == servicedesc:
result = {
"origin" : "classic",
"rule_nr" : nr,
}
if "command_line" in entry: # Only active checks have a command line
result["command_line"] = entry["command_line"]
return result
# 4. Active checks
for acttype, rules in active_checks.items():
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[acttype]
for params in entries:
description = act_info["service_description"](params)
if description == servicedesc:
return {
"origin" : "active",
"checktype" : acttype,
"parameters" : params,
}
return {} # not found
# TODO: Was ist mit Clustern???
# TODO: Klappt das mit automatischen verschatten von SNMP-Checks (bei dual Monitoring)
def automation_delete_host(args):
hostname = args[0]
for path in [
"%s/%s" % (precompiled_hostchecks_dir, hostname),
"%s/%s.py" % (precompiled_hostchecks_dir, hostname),
"%s/%s.mk" % (autochecksdir, hostname),
"%s/%s" % (logwatch_dir, hostname),
"%s/%s" % (counters_directory, hostname),
"%s/%s" % (tcp_cache_dir, hostname),
"%s/%s.*" % (tcp_cache_dir, hostname)]:
os.system("rm -rf '%s'" % path)
def automation_restart(job = "restart", use_rushd = True):
# make sure, Nagios does not inherit any open
# filedescriptors. This really happens, e.g. if
# check_mk is called by WATO via Apache. Nagios inherits
# the open file where Apache is listening for incoming
# HTTP connections. Really.
if monitoring_core == "nagios":
objects_file = nagios_objects_file
for fd in range(3, 256):
try:
os.close(fd)
except:
pass
else:
objects_file = var_dir + "/core/config"
if job == "restart":
job = "reload" # force reload for CMC
# os.closerange(3, 256) --> not available in older Python versions
class null_file:
def write(self, stuff):
pass
def flush(self):
pass
# Deactivate stdout by introducing fake file without filedescriptor
old_stdout = sys.stdout
sys.stdout = null_file()
try:
backup_path = None
if not lock_objects_file():
raise MKAutomationError("Cannot activate changes. "
"Another activation process is currently in progresss")
if os.path.exists(objects_file):
backup_path = objects_file + ".save"
os.rename(objects_file, backup_path)
else:
backup_path = None
try:
if monitoring_core == "nagios":
create_nagios_config(file(objects_file, "w"))
else:
do_create_cmc_config(opt_cmc_relfilename, use_rushd = use_rushd)
if "do_bake_agents" in globals() and bake_agents_on_restart:
do_bake_agents()
except Exception, e:
if backup_path:
os.rename(backup_path, objects_file)
if opt_debug:
raise
raise MKAutomationError("Error creating configuration: %s" % e)
if do_check_nagiosconfig():
if backup_path:
os.remove(backup_path)
if monitoring_core == "cmc":
do_pack_config()
else:
do_precompile_hostchecks()
do_core_action(job)
else:
if backup_path:
os.rename(backup_path, objects_file)
else:
os.remove(objects_file)
raise MKAutomationError("Configuration for monitoring core is invalid. Rolling back.")
except Exception, e:
if backup_path and os.path.exists(backup_path):
os.remove(backup_path)
if opt_debug:
raise
raise MKAutomationError(str(e))
sys.stdout = old_stdout
def automation_get_configuration():
# We read the list of variable names from stdin since
# that could be too much for the command line
variable_names = eval(sys.stdin.read())
result = {}
for varname in variable_names:
if varname in globals():
if not hasattr(globals()[varname], '__call__'):
result[varname] = globals()[varname]
return result
def automation_get_check_catalog(args):
def path_prefix_matches(p, op):
if op and not p:
return False
elif not op:
return True
else:
return p[0] == op[0] and path_prefix_matches(p[1:], op[1:])
read_manpage_catalog()
tree = {}
if len(args) > 0:
only_path = tuple(args)
else:
only_path = ()
for path, entries in g_manpage_catalog.items():
if not path_prefix_matches(path, only_path):
continue
subtree = tree
for component in path[:-1]:
subtree = subtree.setdefault(component, {})
subtree[path[-1]] = map(strip_manpage_entry, entries)
for p in only_path:
tree = tree[p]
return tree, manpage_catalog_titles
def strip_manpage_entry(entry):
return dict([ (k,v) for (k,v) in entry.items() if k in [
"name", "agents", "title"
]])
def automation_get_check_information():
manuals = all_manuals()
checks = {}
for check_type, check in check_info.items():
manfile = manuals.get(check_type)
if manfile:
title = file(manfile).readline().strip().split(":", 1)[1].strip()
else:
title = check_type
checks[check_type] = { "title" : title }
if check["group"]:
checks[check_type]["group"] = check["group"]
checks[check_type]["service_description"] = check.get("service_description","%s")
checks[check_type]["snmp"] = check_uses_snmp(check_type)
return checks
def automation_get_check_manpage(args):
if len(args) != 1:
raise MKAutomationError("Need exactly one argument.")
check_type = args[0]
manpage = load_manpage(args[0])
# Add a few informations from check_info. Note: active checks do not
# have an entry in check_info
if check_type in check_info:
manpage["type"] = "check_mk"
info = check_info[check_type]
for key in [ "snmp_info", "has_perfdata", "service_description" ]:
if key in info:
manpage[key] = info[key]
if "." in check_type:
section = check_type.split(".")[0]
if section in check_info and "snmp_info" in check_info[section]:
manpage["snmp_info"] = check_info[section]["snmp_info"]
if "group" in info:
manpage["group"] = info["group"]
# Assume active check
elif check_type.startswith("check_"):
manpage["type"] = "active"
return manpage
def automation_scan_parents(args):
settings = {
"timeout" : int(args[0]),
"probes" : int(args[1]),
"max_ttl" : int(args[2]),
"ping_probes" : int(args[3]),
}
hostnames = args[4:]
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
raise MKAutomationError("Cannot find binary <tt>traceroute</tt> in search path.")
try:
gateways = scan_parents_of(hostnames, silent=True, settings=settings)
return gateways
except Exception, e:
raise MKAutomationError(str(e))
def automation_diag_host(args):
import subprocess
hostname, test, ipaddress, snmp_community = args[:4]
agent_port, snmp_timeout, snmp_retries = map(int, args[4:7])
cmd = args[7]
if not ipaddress:
try:
ipaddress = lookup_ipaddress(hostname)
except:
raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname)
try:
if test == 'ping':
p = subprocess.Popen('ping -A -i 0.2 -c 2 -W 5 %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test == 'agent':
if not cmd:
cmd = get_datasource_program(hostname, ipaddress)
if cmd:
return 0, get_agent_info_program(cmd)
else:
return 0, get_agent_info_tcp(hostname, ipaddress, agent_port or None)
elif test == 'traceroute':
traceroute_prog = find_bin_in_path('traceroute')
if not traceroute_prog:
return 1, "Cannot find binary <tt>traceroute</tt>."
else:
p = subprocess.Popen('traceroute -n %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE)
response = p.stdout.read()
return (p.wait(), response)
elif test.startswith('snmp'):
if snmp_community:
explicit_snmp_communities[hostname] = snmp_community
# override timing settings if provided
if snmp_timeout or snmp_retries:
timing = {}
if snmp_timeout:
timing['timeout'] = snmp_timeout
if snmp_retries:
timing['retries'] = snmp_retries
snmp_timing.insert(0, (timing, [], [hostname]))
# SNMP versions
global bulkwalk_hosts, snmpv2c_hosts
if test == 'snmpv2':
bulkwalk_hosts = [hostname]
elif test == 'snmpv2_nobulk':
bulkwalk_hosts = []
snmpv2c_hosts = [hostname]
elif test == 'snmpv1':
bulkwalk_hosts = []
snmpv2c_hosts = []
else:
return 1, "SNMP command not implemented"
data = get_snmp_table(hostname, ipaddress, None, ('.1.3.6.1.2.1.1', ['1.0', '4.0', '5.0', '6.0']))
if data:
return 0, 'sysDescr:\t%s\nsysContact:\t%s\nsysName:\t%s\nsysLocation:\t%s\n' % tuple(data[0])
else:
return 1, 'Got empty SNMP response'
else:
return 1, "Command not implemented"
except Exception, e:
if opt_debug:
raise
return 1, str(e)
# WATO calls this automation when a host has been renamed. We need to change
# several file and directory names.
# HIRN: Hier auch das neue Format berücksichtigen! Andererseits sollte
# eigentlich auch nix Schlimmes passieren, wenn der Hostname *nicht* in
# der Datei steht.
def automation_rename_host(args):
oldname = args[0]
newname = args[1]
actions = []
# Autochecks: simply read and write out the file again. We do
# not store a host name here anymore - but old versions did.
# by rewriting we get rid of the host name.
acpath = autochecksdir + "/" + oldname + ".mk"
if os.path.exists(acpath):
old_autochecks = parse_autochecks_file(oldname)
out = file(autochecksdir + "/" + newname + ".mk", "w")
out.write("[\n")
for ct, item, paramstring in old_autochecks:
out.write(" (%r, %r, %s),\n" % (ct, item, paramstring))
out.write("]\n")
out.close()
os.remove(acpath) # Remove old file
actions.append("autochecks")
# At this place WATO already has changed it's configuration. All further
# data might be changed by the still running core. So we need to stop
# it now.
core_was_running = core_is_running()
if core_was_running:
do_core_action("stop", quiet=True)
# Rename temporary files of the host
for d in [ "cache", "counters" ]:
if rename_host_file(tmp_dir + "/" + d + "/", oldname, newname):
actions.append(d)
if rename_host_dir(tmp_dir + "/piggyback/", oldname, newname):
actions.append("piggyback-load")
# Rename piggy files *created* by the host
piggybase = tmp_dir + "/piggyback/"
if os.path.exists(piggybase):
for piggydir in os.listdir(piggybase):
if rename_host_file(piggybase + piggydir, oldname, newname):
actions.append("piggyback-pig")
# Logwatch
if rename_host_dir(logwatch_dir, oldname, newname):
actions.append("logwatch")
# SNMP walks
if rename_host_file(snmpwalks_dir, oldname, newname):
actions.append("snmpwalk")
# OMD-Stuff. Note: The question really is whether this should be
# included in Check_MK. The point is - however - that all these
# actions need to take place while the core is stopped.
if omd_root:
actions += omd_rename_host(oldname, newname)
# Start monitoring again. In case of CMC we need to ignore
# any configuration created by the CMC Rushahead daemon
if core_was_running:
global ignore_ip_lookup_failures
ignore_ip_lookup_failures = True # force config generation to succeed. The core *must* start.
automation_restart("start", use_rushd = False)
if monitoring_core == "cmc":
try:
os.remove(var_dir + "/core/config.rush")
os.remove(var_dir + "/core/config.rush.id")
except:
pass
if failed_ip_lookups:
actions.append("ipfail")
return actions
def rename_host_dir(basedir, oldname, newname):
import shutil
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
shutil.rmtree(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
def rename_host_file(basedir, oldname, newname):
if os.path.exists(basedir + "/" + oldname):
if os.path.exists(basedir + "/" + newname):
os.remove(basedir + "/" + newname)
os.rename(basedir + "/" + oldname, basedir + "/" + newname)
return 1
return 0
# This functions could be moved out of Check_MK.
def omd_rename_host(oldname, newname):
oldregex = oldname.replace(".", "[.]")
newregex = newname.replace(".", "[.]")
actions = []
# Temporarily stop processing of performance data
npcd_running = os.path.exists(omd_root + "/tmp/pnp4nagios/run/npcd.pid")
if npcd_running:
os.system("omd stop npcd >/dev/null 2>&1 </dev/null")
rrdcache_running = os.path.exists(omd_root + "/tmp/run/rrdcached.sock")
if rrdcache_running:
os.system("omd stop rrdcached >/dev/null 2>&1 </dev/null")
# Fix pathnames in XML files
dirpath = omd_root + "/var/pnp4nagios/perfdata/" + oldname
os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' %s/*.xml 2>/dev/null" % (oldname, newname, dirpath))
# RRD files
if rename_host_dir(rrd_path, oldname, newname):
actions.append("rrd")
# entries of rrdcached journal
dirpath = omd_root + "/var/rrdcached/"
if not os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' "
"%s/var/rrdcached/rrd.journal.* 2>/dev/null" % ( oldregex, newregex, omd_root)):
actions.append("rrdcached")
# Spoolfiles of NPCD
if not os.system("sed -i 's/HOSTNAME::%s /HOSTNAME::%s /' "
"%s/var/pnp4nagios/perfdata.dump %s/var/pnp4nagios/spool/perfdata.* 2>/dev/null" % (
oldregex, newregex, omd_root, omd_root)):
actions.append("pnpspool")
if rrdcache_running:
os.system("omd start rrdcached >/dev/null 2>&1 </dev/null")
if npcd_running:
os.system("omd start npcd >/dev/null 2>&1 </dev/null")
# Logfiles and history files of CMC and Nagios. Problem
# here: the exact place of the hostname varies between the
# various log entry lines
sed_commands = r'''
s/(INITIAL|CURRENT) (HOST|SERVICE) STATE: %(old)s;/\1 \2 STATE: %(new)s;/
s/(HOST|SERVICE) (DOWNTIME |FLAPPING |)ALERT: %(old)s;/\1 \2ALERT: %(new)s;/
s/PASSIVE (HOST|SERVICE) CHECK: %(old)s;/PASSIVE \1 CHECK: %(new)s;/
s/(HOST|SERVICE) NOTIFICATION: ([^;]+);%(old)s;/\1 NOTIFICATION: \2;%(new)s;/
''' % { "old" : oldregex, "new" : newregex }
patterns = [
"var/check_mk/core/history",
"var/check_mk/core/archive/*",
"var/nagios/nagios.log",
"var/nagios/archive/*",
]
one_matched = False
for pattern in patterns:
command = "sed -ri --file=/dev/fd/0 %s/%s >/dev/null 2>&1" % (omd_root, pattern)
p = os.popen(command, "w")
p.write(sed_commands)
if not p.close():
one_matched = True
if one_matched:
actions.append("history")
# State retention (important for Downtimes, Acknowledgements, etc.)
if monitoring_core == "nagios":
if not os.system("sed -ri 's/^host_name=%s$/host_name=%s/' %s/var/nagios/retention.dat" % (
oldregex, newregex, omd_root)):
actions.append("retention")
else: # CMC
# Create a file "renamed_hosts" with the information about the
# renaming of the hosts. The core will honor this file when it
# reads the status file with the saved state.
file(var_dir + "/core/renamed_hosts", "w").write("%s\n%s\n" % (oldname, newname))
actions.append("retention")
# NagVis maps
if not os.system("sed -i 's/^[[:space:]]*host_name=%s[[:space:]]*$/host_name=%s/' "
"%s/etc/nagvis/maps/*.cfg 2>/dev/null" % (
oldregex, newregex, omd_root)):
actions.append("nagvis")
return actions
def automation_create_snapshot(args):
try:
import tarfile, time, cStringIO, shutil, subprocess, thread, traceback, threading
from hashlib import sha256
the_data = sys.stdin.read()
data = eval(the_data)
snapshot_name = data["snapshot_name"]
snapshot_dir = var_dir + "/wato/snapshots"
work_dir = snapshot_dir + "/workdir/%s" % snapshot_name
if not os.path.exists(work_dir):
os.makedirs(work_dir)
# Open / initialize files
filename_target = "%s/%s" % (snapshot_dir, snapshot_name)
filename_work = "%s/%s.work" % (work_dir, snapshot_name)
filename_status = "%s/%s.status" % (work_dir, snapshot_name)
filename_pid = "%s/%s.pid" % (work_dir, snapshot_name)
filename_subtar = ""
current_domain = ""
file(filename_target, "w").close()
file(filename_status, "w").close()
def wipe_directory(path):
for entry in os.listdir(path):
if entry not in [ '.', '..' ]:
p = path + "/" + entry
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
lock_status_file = threading.Lock()
def update_status_file(domain = None, infotext = None):
lock_status_file.acquire()
if os.path.exists(filename_status):
if domain:
statusinfo[domain] = infotext
statusfile = file(filename_status, "w")
statusfile.write("comment:%s\n" % data.get("comment"," ").encode("utf-8"))
status_list = list(statusinfo.items())
status_list.sort()
for status in status_list:
statusfile.write("%s.tar.gz:%s\n" % status)
lock_status_file.release()
# Set initial status info
statusinfo = {}
for name in data.get("domains", {}).keys():
statusinfo[name] = "TODO:0"
update_status_file()
# Now fork into our own process to have an asynchronous backup creation
try:
pid = os.fork()
if pid > 0:
# Exit parent process
return
# Decouple from parent environment
os.chdir("/")
os.umask(0)
os.setsid()
# Close all fd except stdin,out,err
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
sys.stdout.flush()
sys.stderr.flush()
si = os.open("/dev/null", os.O_RDONLY)
so = os.open("/dev/null", os.O_WRONLY)
os.dup2(si, 0)
os.dup2(so, 1)
os.dup2(so, 2)
os.close(si)
os.close(so)
except OSError, e:
raise MKAutomationError(str(e))
# Save pid of working process.
file(filename_pid, "w").write("%d" % os.getpid())
def cleanup():
wipe_directory(work_dir)
os.rmdir(work_dir)
def check_should_abort():
if not os.path.exists(filename_target):
cleanup()
sys.exit(0)
def get_basic_tarinfo(name):
tarinfo = tarfile.TarInfo(name)
tarinfo.mtime = time.time()
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.mode = 0644
tarinfo.type = tarfile.REGTYPE
return tarinfo
def update_subtar_size(seconds):
while current_domain != None:
try:
if current_domain:
if os.path.exists(path_subtar):
update_status_file(current_domain, "Processing:%d" % os.stat(path_subtar).st_size)
except:
pass
time.sleep(seconds)
def snapshot_secret():
path = default_config_dir + '/snapshot.secret'
try:
return file(path).read()
except IOError:
# create a secret during first use
try:
s = os.urandom(256)
except NotImplementedError:
s = sha256(time.time())
file(path, 'w').write(s)
return s
#
# Initialize the snapshot tar file and populate with initial information
#
tar_in_progress = tarfile.open(filename_work, "w")
# Add comment to tar file
if data.get("comment"):
tarinfo = get_basic_tarinfo("comment")
tarinfo.size = len(data.get("comment").encode("utf-8"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("comment").encode("utf-8")))
if data.get("created_by"):
tarinfo = get_basic_tarinfo("created_by")
tarinfo.size = len(data.get("created_by"))
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("created_by")))
# Add snapshot type
snapshot_type = data.get("type")
tarinfo = get_basic_tarinfo("type")
tarinfo.size = len(snapshot_type)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(snapshot_type))
# Close tar in progress, all other files are included via command line tar
tar_in_progress.close()
#
# Process domains (sorted)
#
subtar_update_thread = thread.start_new_thread(update_subtar_size, (1,))
domains = map(lambda x: x, data.get("domains").items())
domains.sort()
subtar_info = {}
for name, info in domains:
current_domain = name # Set name for update size thread
prefix = info.get("prefix","")
exclude_options = ""
for entry in info.get("exclude", []):
exclude_options += "--exclude=%s " % entry
check_should_abort()
filename_subtar = "%s.tar.gz" % name
path_subtar = "%s/%s" % (work_dir, filename_subtar)
if info.get("backup_command"):
command = info.get("backup_command") % {
"prefix" : prefix,
"path_subtar" : path_subtar,
"work_dir" : work_dir
}
else:
paths = map(lambda x: x[1] == "" and "." or x[1], info.get("paths", []))
command = "tar czf %s --ignore-failed-read --force-local %s -C %s %s" % \
(path_subtar, exclude_options, prefix, " ".join(paths))
proc = subprocess.Popen(command, shell=True, stdin=None, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix)
stdout, stderr = proc.communicate()
exit_code = proc.wait()
# Allow exit codes 0 and 1 (files changed during backup)
if exit_code not in [0, 1]:
raise MKAutomationError("Error while creating backup of %s (Exit Code %d) - %s.\n%s" %
(current_domain, exit_code, stderr, command))
subtar_size = os.stat(path_subtar).st_size
subtar_hash = sha256(file(path_subtar).read()).hexdigest()
subtar_signed = sha256(subtar_hash + snapshot_secret()).hexdigest()
subtar_info[filename_subtar] = (subtar_hash, subtar_signed)
# Append tar.gz subtar to snapshot
command = "tar --append --file=%s %s ; rm %s" % \
(filename_work, filename_subtar, filename_subtar)
proc = subprocess.Popen(command, shell=True, cwd = work_dir)
proc.communicate()
exit_code = proc.wait()
if exit_code != 0:
raise MKAutomationError("Error on adding backup domain %s to tarfile" % current_domain)
current_domain = ""
update_status_file(name, "Finished:%d" % subtar_size)
# Now add the info file which contains hashes and signed hashes for
# each of the subtars
info = ''.join([ '%s %s %s\n' % (k, v[0], v[1]) for k, v in subtar_info.items() ]) + '\n'
tar_in_progress = tarfile.open(filename_work, "a")
tarinfo = get_basic_tarinfo("checksums")
tarinfo.size = len(info)
tar_in_progress.addfile(tarinfo, cStringIO.StringIO(info))
tar_in_progress.close()
current_domain = None
shutil.move(filename_work, filename_target)
cleanup()
except Exception, e:
cleanup()
raise MKAutomationError(str(e))
def automation_notification_replay(args):
nr = args[0]
return notification_replay_backlog(int(nr))
def automation_notification_analyse(args):
nr = args[0]
return notification_analyse_backlog(int(nr))
def automation_get_bulks(args):
only_ripe = args[0] == "1"
return find_bulks(only_ripe)
def automation_active_check(args):
hostname, plugin, item = args
actchecks = []
needed_commands = []
if plugin == "custom":
custchecks = host_extra_conf(hostname, custom_checks)
for entry in custchecks:
if entry["service_description"] == item:
command_line = replace_core_macros(hostname, entry.get("command_line", ""))
if command_line:
command_line = autodetect_plugin(command_line)
return execute_check_plugin(command_line)
else:
return -1, "Passive check - cannot be executed"
else:
rules = active_checks.get(plugin)
if rules:
entries = host_extra_conf(hostname, rules)
if entries:
act_info = active_check_info[plugin]
for params in entries:
description = act_info["service_description"](params).replace('$HOSTNAME$', hostname)
if description == item:
args = act_info["argument_function"](params)
command_line = replace_core_macros(hostname, act_info["command_line"].replace("$ARG1$", args))
return execute_check_plugin(command_line)
def load_resource_file(macros):
try:
for line in file(omd_root + "/etc/nagios/resource.cfg"):
line = line.strip()
if not line or line[0] == '#':
continue
varname, value = line.split('=', 1)
macros[varname] = value
except:
if opt_debug:
raise
# Simulate replacing some of the more important macros of hosts. We
# cannot use dynamic macros, of course. Note: this will not work
# without OMD, since we do not know the value of $USER1$ and $USER2$
# here. We could read the Nagios resource.cfg file, but we do not
# know for sure the place of that either.
def replace_core_macros(hostname, commandline):
macros = {
"$HOSTNAME$" : hostname,
"$HOSTADDRESS$" : lookup_ipaddress(hostname),
}
load_resource_file(macros)
for varname, value in macros.items():
commandline = commandline.replace(varname, value)
return commandline
def execute_check_plugin(commandline):
try:
p = os.popen(commandline + " 2>&1")
output = p.read().strip()
ret = p.close()
if not ret:
status = 0
else:
if ret & 0xff == 0:
status = ret / 256
else:
status = 3
if status < 0 or status > 3:
status = 3
output = output.split("|",1)[0] # Drop performance data
return status, output
except Exception, e:
if opt_debug:
raise
return 3, "UNKNOWN - Cannot execute command: %s" % e
def automation_update_dns_cache():
return do_update_dns_cache()
def automation_bake_agents():
if "do_bake_agents" in globals():
return do_bake_agents()
| gpl-2.0 | -6,314,698,494,620,364,000 | 35.774194 | 120 | 0.551162 | false |
SU-ECE-17-7/ibeis | ibeis/algo/hots/word_index.py | 1 | 9442 | # -*- coding: utf-8 -*-
"""
TODO: DEPRICATE OR REFACTOR INTO SMK
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.algo.hots.word_index))"
python -m doctest -v ibeis/algo/hots/word_index.py
python -m doctest ibeis/algo/hots/word_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
import six
#from itertools import chain
# Science
import numpy as np
# UTool
import vtool
import utool
# VTool
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr_, profile) = utool.inject(__name__, '[entroid_index]')
NOCACHE_WORD = utool.get_argflag('--nocache-word')
# TODO:
class NeighborAssignment():
def __init__(asgn):
pass
def test_windex():
from ibeis.algo.hots.query_request import new_ibeis_query_request
import ibeis
daid_list = [7, 8, 9, 10, 11]
ibs = ibeis.opendb(db='testdb1')
qreq_ = new_ibeis_query_request(ibs, daid_list, daid_list)
windex = new_ibeis_windex(ibs, qreq_.get_internal_daids())
return windex, qreq_, ibs
def new_word_index(aid_list=[], vecs_list=[], flann_params={},
flann_cachedir=None, indexer_cfgstr='', hash_rowids=True,
use_cache=not NOCACHE_WORD, use_params_hash=True):
print('[windex] building WordIndex object')
_check_input(aid_list, vecs_list)
# Create indexes into the input aids
ax_list = np.arange(len(aid_list))
idx2_vec, idx2_ax, idx2_fx = invert_index(vecs_list, ax_list)
if hash_rowids:
# Fingerprint
aids_hashstr = utool.hashstr_arr(aid_list, '_AIDS')
cfgstr = aids_hashstr + indexer_cfgstr
else:
# Dont hash rowids when given enough info in indexer_cfgstr
cfgstr = indexer_cfgstr
# Build/Load the flann index
flann = nntool.flann_cache(idx2_vec, **{
'cache_dir': flann_cachedir,
'cfgstr': cfgstr,
'flann_params': flann_params,
'use_cache': use_cache,
'use_params_hash': use_params_hash})
ax2_aid = np.array(aid_list)
windex = WordIndex(ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann)
return windex
def new_ibeis_windex(ibs, daid_list):
"""
IBEIS interface into word_index
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
daids_hashid = ibs.get_annot_hashid_visual_uuid(daid_list, 'D')
flann_cfgstr = ibs.cfg.query_cfg.flann_cfg.get_cfgstr()
feat_cfgstr = ibs.cfg.query_cfg._feat_cfg.get_cfgstr()
indexer_cfgstr = daids_hashid + flann_cfgstr + feat_cfgstr
try:
# Grab the keypoints names and image ids before query time
flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()
# Get annotation descriptors that will be searched
# FIXME; qreq_
vecs_list = ibs.get_annot_vecs(daid_list)
flann_cachedir = ibs.get_flann_cachedir()
windex = new_word_index(
daid_list, vecs_list, flann_params, flann_cachedir,
indexer_cfgstr, hash_rowids=False, use_params_hash=False)
return windex
except Exception as ex:
utool.printex(ex, True, msg_='cannot build inverted index', key_list=['ibs.get_infostr()'])
raise
def _check_input(aid_list, vecs_list):
assert len(aid_list) == len(vecs_list), 'invalid input'
assert len(aid_list) > 0, ('len(aid_list) == 0.'
'Cannot invert index without features!')
@six.add_metaclass(utool.ReloadingMetaclass)
class WordIndex(object):
"""
Abstract wrapper around flann
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
def __init__(windex, ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann):
windex.ax2_aid = ax2_aid # (A x 1) Mapping to original annot ids
windex.idx2_vec = idx2_vec # (M x D) Descriptors to index
windex.idx2_ax = idx2_ax # (M x 1) Index into the aid_list
windex.idx2_fx = idx2_fx # (M x 1) Index into the annot's features
windex.flann = flann # Approximate search structure
def knn(windex, qfx2_vec, K, checks=1028):
"""
Args:
qfx2_vec (ndarray): (N x D) array of N, D-dimensional query vectors
K (int): number of approximate nearest words to find
Returns:
tuple of (qfx2_idx, qfx2_dist)
qfx2_idx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector w.r.t qfx2_vec[n]
qfx2_dist (ndarray): (N x K) qfx2_dist[n][k] is the distance to the kth
approximate nearest data vector w.r.t. qfx2_vec[n]
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx, qfx2_dist) = windex.knn(qfx2_vec, K, checks=checks)
"""
(qfx2_idx, qfx2_dist) = windex.flann.nn_index(qfx2_vec, K, checks=checks)
return (qfx2_idx, qfx2_dist)
def empty_words(K):
qfx2_idx = np.empty((0, K), dtype=np.int32)
qfx2_dist = np.empty((0, K), dtype=np.float64)
return (qfx2_idx, qfx2_dist)
def add_points(windex, new_aid_list, new_vecs_list):
"""
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> new_vecs_list = ibs.get_annot_vecs(new_aid_list, config2_=qreq_.get_internal_data_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx1, qfx2_dist1) = windex.knn(qfx2_vec, K, checks=checks)
>>> windex.add_points(new_aid_list, new_vecs_list)
>>> (qfx2_idx2, qfx2_dist2) = windex.knn(qfx2_vec, K, checks=checks)
>>> assert qfx2_idx2.max() > qfx2_idx1.max()
"""
nAnnots = windex.num_indexed_annots()
nNew = len(new_aid_list)
new_ax_list = np.arange(nAnnots, nAnnots + nNew)
new_idx2_vec, new_idx2_ax, new_idx2_fx = \
invert_index(new_vecs_list, new_ax_list)
# Stack inverted information
_ax2_aid = np.hstack((windex.ax2_aid, new_aid_list))
_idx2_ax = np.hstack((windex.idx2_ax, new_idx2_ax))
_idx2_fx = np.hstack((windex.idx2_fx, new_idx2_fx))
_idx2_vec = np.vstack((windex.idx2_vec, new_idx2_vec))
windex.ax2_aid = _ax2_aid
windex.idx2_ax = _idx2_ax
windex.idx2_vec = _idx2_vec
windex.idx2_fx = _idx2_fx
#windex.idx2_kpts = None
#windex.idx2_oris = None
# Add new points to flann structure
windex.flann.add_points(new_idx2_vec)
def num_indexed_vecs(windex):
return len(windex.idx2_vec)
def num_indexed_annots(windex):
return len(windex.ax2_aid)
def get_nn_axs(windex, qfx2_nnidx):
#return windex.idx2_ax[qfx2_nnidx]
return windex.idx2_ax.take(qfx2_nnidx)
def get_nn_aids(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_aid - (N x K) qfx2_fx[n][k] is the annotation id index
of the kth approximate nearest data vector
"""
#qfx2_ax = windex.idx2_ax[qfx2_nnidx]
#qfx2_aid = windex.ax2_aid[qfx2_ax]
qfx2_ax = windex.idx2_ax.take(qfx2_nnidx)
qfx2_aid = windex.ax2_aid.take(qfx2_ax)
return qfx2_aid
def get_nn_featxs(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_fx - (N x K) qfx2_fx[n][k] is the feature index (w.r.t
the source annotation) of the kth approximate nearest data vector
"""
#return windex.idx2_fx[qfx2_nnidx]
return windex.idx2_fx.take(qfx2_nnidx)
def invert_index(vecs_list, ax_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
if utool.NOT_QUIET:
print('[hsnbrx] stacking descriptors from %d annotations'
% len(ax_list))
try:
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, ax_list)
assert idx2_vec.shape[0] == idx2_ax.shape[0]
assert idx2_vec.shape[0] == idx2_fx.shape[0]
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('stacked nVecs={nVecs} from nAnnots={nAnnots}'.format(
nVecs=len(idx2_vec), nAnnots=len(ax_list)))
return idx2_vec, idx2_ax, idx2_fx
def vlad(qfx2_vec, qfx2_cvec):
qfx2_rvec = qfx2_cvec - qfx2_vec
aggvlad = qfx2_rvec.sum(axis=0)
aggvlad_norm = vtool.l2normalize(aggvlad)
return aggvlad_norm
#if __name__ == '__main__':
# #python -m doctest -v ibeis/algo/hots/word_index.py
# import doctest
# doctest.testmod()
| apache-2.0 | -6,176,846,014,270,310,000 | 35.7393 | 108 | 0.596484 | false |
t3dev/odoo | addons/website_hr_recruitment/models/hr_recruitment.py | 1 | 2142 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from werkzeug import urls
from odoo import api, fields, models
from odoo.tools.translate import html_translate
class RecruitmentSource(models.Model):
_inherit = 'hr.recruitment.source'
url = fields.Char(compute='_compute_url', string='Url Parameters')
@api.one
@api.depends('source_id', 'source_id.name', 'job_id')
def _compute_url(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for source in self:
source.url = urls.url_join(base_url, "%s?%s" % (source.job_id.website_url,
urls.url_encode({
'utm_campaign': self.env.ref('hr_recruitment.utm_campaign_job').name,
'utm_medium': self.env.ref('utm.utm_medium_website').name,
'utm_source': source.source_id.name
})
))
class Applicant(models.Model):
_inherit = 'hr.applicant'
residence_country = fields.Char(string="Residence Country")
def website_form_input_filter(self, request, values):
if 'partner_name' in values:
values.setdefault('name', '%s\'s Application' % values['partner_name'])
return values
class Job(models.Model):
_name = 'hr.job'
_inherit = ['hr.job', 'website.seo.metadata', 'website.published.multi.mixin']
def _get_default_website_description(self):
default_description = self.env["ir.model.data"].xmlid_to_object("website_hr_recruitment.default_website_description")
return (default_description.render() if default_description else "")
website_description = fields.Html('Website description', translate=html_translate, sanitize_attributes=False, default=_get_default_website_description)
@api.multi
def _compute_website_url(self):
super(Job, self)._compute_website_url()
for job in self:
job.website_url = "/jobs/detail/%s" % job.id
@api.multi
def set_open(self):
self.write({'website_published': False})
return super(Job, self).set_open()
| gpl-3.0 | 7,004,508,762,190,151,000 | 34.114754 | 155 | 0.638189 | false |
AntoineAugusti/katas | codingame/medium/heat_detector.py | 1 | 1370 | def packBounds(xMin, xMax, yMin, yMax):
return [[xMin, xMax], [yMin, yMax]]
def unpackBounds(bounds):
xMin, xMax = bounds[0]
yMin, yMax = bounds[1]
return [xMin, xMax, yMin, yMax]
def nextMove(width, height, x, y, direction, bounds):
xMin, xMax, yMin, yMax = unpackBounds(bounds)
if direction == "U":
yMax = y
elif direction == "UR":
xMin = x
yMax = y
elif direction == "R":
xMin = x
elif direction == "DR":
xMin = x
yMin = y
elif direction == "D":
yMin = y
elif direction == "DL":
xMax = x
yMin = y
elif direction == "L":
xMax = x
elif direction == "UL":
yMax = y
xMax = x
if "U" in direction or "D" in direction:
y = (yMax - yMin) / 2 + yMin
if "L" in direction or "R" in direction:
x = (xMax - xMin) / 2 + xMin
return [x, y, packBounds(xMin, xMax, yMin, yMax)]
# width: width of the building.
# height: height of the building.
width, height = [int(i) for i in raw_input().split()]
N = int(raw_input()) # maximum number of turns before game over.
x, y = [int(i) for i in raw_input().split()]
xMin = 0
yMin = 0
xMax = width
yMax = height
bounds = packBounds(xMin, xMax, yMin, yMax)
# Game loop
while True:
# The direction of the bombs from batman's current location (U, UR, R, DR, D, DL, L or UL)
direction = raw_input()
x, y, bounds = nextMove(width, height, x, y, direction, bounds)
print str(x) + " " + str(y) | mit | -7,396,484,664,308,771,000 | 22.637931 | 91 | 0.624818 | false |
xbezdick/packstack | packstack/plugins/nova_300.py | 1 | 35617 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures Nova
"""
import os
import platform
import socket
from packstack.installer import basedefs
from packstack.installer import exceptions
from packstack.installer import processors
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules import common
from packstack.modules.documentation import update_params_usage
from packstack.modules.shortcuts import get_mq
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import createFirewallResources
from packstack.modules.ospluginutils import deliver_ssl_file
from packstack.modules.ospluginutils import getManifestTemplate
from packstack.modules.ospluginutils import generate_ssl_cert
from packstack.modules.ospluginutils import manifestfiles
# ------------- Nova Packstack Plugin Initialization --------------
PLUGIN_NAME = "OS-Nova"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
if platform.linux_distribution()[0] == "Fedora":
primary_netif = "em1"
secondary_netif = "em2"
else:
primary_netif = "eth0"
secondary_netif = "eth1"
nova_params = {
"NOVA": [
{"CMD_OPTION": 'nova-db-purge-enable',
"PROMPT": (
"Enter y if cron job for removing soft deleted DB rows "
"should be created"
),
"OPTION_LIST": ['y', 'n'],
"VALIDATORS": [validators.validate_not_empty],
"PROCESSORS": [processors.process_bool],
"DEFAULT_VALUE": 'y',
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": 'CONFIG_NOVA_DB_PURGE_ENABLE',
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "nova-db-passwd",
"PROMPT": "Enter the password for the Nova DB access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_DB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "nova-ks-passwd",
"PROMPT": "Enter the password for the Nova Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_KS_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CMD_OPTION": "novasched-cpu-allocation-ratio",
"PROMPT": "Enter the CPU overcommitment ratio. Set to 1.0 to "
"disable CPU overcommitment",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 16.0,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novasched-ram-allocation-ratio",
"PROMPT": ("Enter the RAM overcommitment ratio. Set to 1.0 to "
"disable RAM overcommitment"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_float],
"DEFAULT_VALUE": 1.5,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novacompute-migrate-protocol",
"PROMPT": ("Enter protocol which will be used for instance "
"migration"),
"OPTION_LIST": ['tcp', 'ssh'],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": 'tcp',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-compute-manager",
"PROMPT": ("Enter the compute manager for nova "
"migration"),
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "nova.compute.manager.ComputeManager",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_MANAGER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-ssl-cert",
"PROMPT": ("Enter the path to a PEM encoded certificate to be used "
"on the https server, leave blank if one should be "
"generated, this certificate should not require "
"a passphrase"),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VNC_SSL_CERT",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-ssl-key",
"PROMPT": ("Enter the SSL keyfile corresponding to the certificate "
"if one was entered"),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_VNC_SSL_KEY",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-pci-alias",
"PROMPT": ("Enter the PCI passthrough array of hash in JSON style for controller eg. "
"[{'vendor_id':'1234', 'product_id':'5678', "
"'name':'default'}, {...}] "),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_PCI_ALIAS",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "nova-pci-passthrough-whitelist",
"PROMPT": ("Enter the PCI passthrough whitelist as array of hash in JSON style for "
"controller eg. "
"[{'vendor_id':'1234', 'product_id':'5678', "
"'name':'default'}, {...}]"),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": "",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_PCI_PASSTHROUGH_WHITELIST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK": [
{"CMD_OPTION": "novacompute-privif",
"PROMPT": ("Enter the Private interface for Flat DHCP on the Nova"
" compute servers"),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-manager",
"PROMPT": "Enter the Nova network manager",
"OPTION_LIST": [r'^nova\.network\.manager\.\w+Manager$'],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "nova.network.manager.FlatDHCPManager",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_MANAGER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-pubif",
"PROMPT": "Enter the Public interface on the Nova network server",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": primary_netif,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-privif",
"PROMPT": ("Enter the Private interface for network manager on "
"the Nova network server"),
"OPTION_LIST": [],
"VALIDATORS": [],
"DEFAULT_VALUE": '',
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-fixed-range",
"PROMPT": "Enter the IP Range for network manager",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "192.168.32.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-floating-range",
"PROMPT": "Enter the IP Range for Floating IP's",
"OPTION_LIST": ["^[\:\.\da-fA-f]+(\/\d+){0,1}$"],
"PROCESSORS": [processors.process_cidr],
"VALIDATORS": [validators.validate_regexp],
"DEFAULT_VALUE": "10.3.4.0/22",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-auto-assign-floating-ip",
"PROMPT": ("Should new instances automatically have a floating "
"IP assigned?"),
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
"NOVA_NETWORK_VLAN": [
{"CMD_OPTION": "novanetwork-vlan-start",
"PROMPT": "Enter first VLAN for private networks",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 100,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_VLAN_START",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-num-networks",
"PROMPT": "How many networks should be supported",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 1,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_NUMBER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "novanetwork-network-size",
"PROMPT": "How many addresses should be in each private subnet",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": 255,
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_NOVA_NETWORK_SIZE",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
}
update_params_usage(basedefs.PACKSTACK_DOC, nova_params)
def use_nova_network(config):
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y')
def use_nova_network_vlan(config):
manager = 'nova.network.manager.VlanManager'
return (config['CONFIG_NOVA_INSTALL'] == 'y' and
config['CONFIG_NEUTRON_INSTALL'] != 'y' and
config['CONFIG_NOVA_NETWORK_MANAGER'] == manager)
nova_groups = [
{"GROUP_NAME": "NOVA",
"DESCRIPTION": "Nova Options",
"PRE_CONDITION": "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK",
"DESCRIPTION": "Nova Network Options",
"PRE_CONDITION": use_nova_network,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "NOVA_NETWORK_VLAN",
"DESCRIPTION": "Nova Network VLAN Options",
"PRE_CONDITION": use_nova_network_vlan,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
]
for group in nova_groups:
params = nova_params[group["GROUP_NAME"]]
controller.addGroup(group, params)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
if controller.CONF['CONFIG_NEUTRON_INSTALL'] == 'y':
network_title = ('Adding OpenStack Network-related '
'Nova manifest entries')
network_function = create_neutron_manifest
else:
network_title = 'Adding Nova Network manifest entries'
network_function = create_network_manifest
novaapisteps = [
{'title': 'Adding Nova API manifest entries',
'functions': [create_api_manifest]},
{'title': 'Adding Nova Keystone manifest entries',
'functions': [create_keystone_manifest]},
{'title': 'Adding Nova Cert manifest entries',
'functions': [create_cert_manifest]},
{'title': 'Adding Nova Conductor manifest entries',
'functions': [create_conductor_manifest]},
{'title': 'Creating ssh keys for Nova migration',
'functions': [create_ssh_keys]},
{'title': 'Gathering ssh host keys for Nova migration',
'functions': [gather_host_keys]},
{'title': 'Adding Nova Compute manifest entries',
'functions': [create_compute_manifest]},
{'title': 'Adding Nova Scheduler manifest entries',
'functions': [create_sched_manifest]},
{'title': 'Adding Nova VNC Proxy manifest entries',
'functions': [create_vncproxy_manifest]},
{'title': network_title,
'functions': [network_function]},
{'title': 'Adding Nova Common manifest entries',
'functions': [create_common_manifest]},
]
controller.addSequence("Installing OpenStack Nova API", [], [],
novaapisteps)
# ------------------------- helper functions -------------------------
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except exceptions.ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except exceptions.ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so OpenStack can work'
' properly.' % (device, host))
raise exceptions.ScriptRuntimeError(msg)
def dummy_interface(host):
"""Creates dummy interface on given hosts.
Returns interface name.
"""
# Only single dummy interface will be created, hence the name is hardcoded
ifname = 'dummy'
script = (
'DEVICE={0}\n'
'BOOTPROTO=none\n'
'ONBOOT=yes\n'
'TYPE=Ethernet\n'
'NM_CONTROLLED=no\n'.format(ifname)
)
server = utils.ScriptRunner(host)
server.append(
'ip link show {ifname} || ('
'modprobe dummy && '
'ip link set name {ifname} dev dummy0 && '
'ip link set dev dummy address 06:66:DE:AF:66:60'
')'.format(**locals())
)
server.append(
'cat > /etc/sysconfig/network-scripts/ifcfg-{ifname} '
'<<EOF\n{script}EOF'.format(**locals())
)
server.execute()
return ifname
# ------------------------ Step Functions -------------------------
def create_ssh_keys(config, messages):
migration_key = os.path.join(basedefs.VAR_DIR, 'nova_migration_key')
# Generate key if it does not exist
if not os.path.exists(migration_key):
local = utils.ScriptRunner()
local.append('ssh-keygen -t rsa -b 2048 -f "%s" -N ""' % migration_key)
local.execute()
with open(migration_key) as fp:
secret = fp.read().strip()
with open('%s.pub' % migration_key) as fp:
public = fp.read().strip()
config['NOVA_MIGRATION_KEY_TYPE'] = 'ssh-rsa'
config['NOVA_MIGRATION_KEY_PUBLIC'] = public.split()[1]
config['NOVA_MIGRATION_KEY_SECRET'] = secret
def gather_host_keys(config, messages):
global compute_hosts
for host in compute_hosts:
local = utils.ScriptRunner()
local.append('ssh-keyscan %s' % host)
retcode, hostkey = local.execute()
config['HOST_KEYS_%s' % host] = hostkey
def create_api_manifest(config, messages):
# Since this step is running first, let's create necessary variables here
# and make them global
global compute_hosts, network_hosts
com_var = config.get("CONFIG_COMPUTE_HOSTS", "")
compute_hosts = set([i.strip() for i in com_var.split(",") if i.strip()])
net_var = config.get("CONFIG_NETWORK_HOSTS", "")
network_hosts = set([i.strip() for i in net_var.split(",") if i.strip()])
# This is a hack around us needing to generate the neutron metadata
# password, but the nova puppet plugin uses the existence of that
# password to determine whether or not to configure neutron metadata
# proxy support. So the nova_api.pp template needs to be set to None
# to disable metadata support if neutron is not being installed.
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = None
else:
config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = "%s" % config['CONFIG_NEUTRON_METADATA_PW']
manifestfile = "%s_api_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_api")
fw_details = dict()
key = "nova_api"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "ALL"
fw_details[key]['service_name'] = "nova api"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['8773', '8774', '8775']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_NOVA_API_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_NOVA_API_RULES')
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def create_keystone_manifest(config, messages):
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_nova")
appendManifestFile(manifestfile, manifestdata)
def create_cert_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_cert")
appendManifestFile(manifestfile, manifestdata)
def create_conductor_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_conductor")
appendManifestFile(manifestfile, manifestdata)
def create_compute_manifest(config, messages):
global compute_hosts, network_hosts
if config["CONFIG_HORIZON_SSL"] == 'y':
config["CONFIG_VNCPROXY_PROTOCOL"] = "https"
else:
config["CONFIG_VNCPROXY_PROTOCOL"] = "http"
migrate_protocol = config['CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL']
if migrate_protocol == 'ssh':
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+ssh://nova@%s/system?no_verify=1&'
'keyfile=/etc/nova/ssh/nova_migration_key'
)
else:
config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
'qemu+tcp://nova@%s/system'
)
ssh_hostkeys = ''
ssh_keys_details = {}
for host in compute_hosts:
try:
hostname, aliases, addrs = socket.gethostbyaddr(host)
except socket.herror:
hostname, aliases, addrs = (host, [], [])
for hostkey in config['HOST_KEYS_%s' % host].split('\n'):
hostkey = hostkey.strip()
if not hostkey:
continue
_, host_key_type, host_key_data = hostkey.split()
key = "%s.%s" % (host_key_type, hostname)
ssh_keys_details.setdefault(key, {})
ssh_keys_details[key]['ensure'] = 'present'
ssh_keys_details[key]['host_aliases'] = aliases + addrs
ssh_keys_details[key]['key'] = host_key_data
ssh_keys_details[key]['type'] = host_key_type
config['SSH_KEYS'] = ssh_keys_details
ssh_hostkeys += getManifestTemplate("sshkey")
if config['CONFIG_VMWARE_BACKEND'] == 'y':
vcenters = [i.strip() for i in
config['CONFIG_VCENTER_CLUSTER_NAMES'].split(',')
if i.strip()]
if not vcenters:
raise exceptions.ParamValidationError(
"Please specify at least one VMware vCenter cluster in"
" CONFIG_VCENTER_CLUSTER_NAMES"
)
if len(vcenters) != len(compute_hosts):
if len(vcenters) > 1:
raise exceptions.ParamValidationError(
"Number of vmware clusters %s is not same"
" as number of nova computes %s", (vcenters, compute_hosts)
)
else:
vcenters = len(compute_hosts) * [vcenters[0]]
vmware_clusters = dict(zip(compute_hosts, vcenters))
for host in compute_hosts:
if config['CONFIG_IRONIC_INSTALL'] == 'y':
cm = 'ironic.nova.compute.manager.ClusteredComputeManager'
config['CONFIG_NOVA_COMPUTE_MANAGER'] = cm
manifestdata = getManifestTemplate("nova_compute")
fw_details = dict()
cf_fw_qemu_mig_key = "FIREWALL_NOVA_QEMU_MIG_RULES_%s" % host
for c_host in compute_hosts:
key = "nova_qemu_migration_%s_%s" % (host, c_host)
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % c_host
fw_details[key]['service_name'] = "nova qemu migration"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['16509', '49152-49215']
fw_details[key]['proto'] = "tcp"
config[cf_fw_qemu_mig_key] = fw_details
manifestdata += createFirewallResources(cf_fw_qemu_mig_key)
if config['CONFIG_VMWARE_BACKEND'] == 'y':
manifestdata += ("\n$nova_vcenter_cluster_name = '%s'\n" %
vmware_clusters[host])
manifestdata += getManifestTemplate("nova_compute_vmware.pp")
elif config['CONFIG_IRONIC_INSTALL'] == 'y':
manifestdata += getManifestTemplate("nova_compute_ironic.pp")
else:
manifestdata += getManifestTemplate("nova_compute_libvirt.pp")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
'gluster' in config['CONFIG_CINDER_BACKEND']):
manifestdata += getManifestTemplate("nova_gluster")
if (config['CONFIG_VMWARE_BACKEND'] != 'y' and
config['CONFIG_CINDER_INSTALL'] == 'y' and
'nfs' in config['CONFIG_CINDER_BACKEND']):
manifestdata += getManifestTemplate("nova_nfs")
manifestfile = "%s_nova.pp" % host
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
if host not in network_hosts:
manifestdata += getManifestTemplate('nova_compute_flat')
key = 'CONFIG_NOVA_COMPUTE_PRIVIF'
if not config[key].strip():
config[key] = dummy_interface(host)
if config['CONFIG_USE_SUBNETS'] == 'y':
netface = common.cidr_to_ifname(
config[key], host, config
)
else:
netface = config[key]
check_ifcfg(host, netface)
try:
bring_up_ifcfg(host, netface)
except exceptions.ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
if config['CONFIG_CEILOMETER_INSTALL'] == 'y':
if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
ssl_cert_file = config['CONFIG_CEILOMETER_SSL_CERT'] = (
'/etc/pki/tls/certs/ssl_amqp_ceilometer.crt'
)
ssl_key_file = config['CONFIG_CEILOMETER_SSL_KEY'] = (
'/etc/pki/tls/private/ssl_amqp_ceilometer.key'
)
ssl_host = config['CONFIG_CONTROLLER_HOST']
service = 'ceilometer'
generate_ssl_cert(config, host, service, ssl_key_file,
ssl_cert_file)
mq_template = get_mq(config, "nova_ceilometer")
manifestdata += getManifestTemplate(mq_template)
manifestdata += getManifestTemplate("nova_ceilometer")
fw_details = dict()
key = "nova_compute"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % config['CONFIG_CONTROLLER_HOST']
fw_details[key]['service_name'] = "nova compute"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['5900-5999']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_NOVA_COMPUTE_RULES'] = fw_details
manifestdata += "\n" + createFirewallResources(
'FIREWALL_NOVA_COMPUTE_RULES'
)
manifestdata += "\n" + ssh_hostkeys
appendManifestFile(manifestfile, manifestdata)
def create_network_manifest(config, messages):
global compute_hosts, network_hosts
if config['CONFIG_NEUTRON_INSTALL'] == "y":
return
# set default values for VlanManager in case this values are not in config
for key, value in [('CONFIG_NOVA_NETWORK_VLAN_START', 100),
('CONFIG_NOVA_NETWORK_SIZE', 255),
('CONFIG_NOVA_NETWORK_NUMBER', 1)]:
config[key] = config.get(key, value)
api_host = config['CONFIG_CONTROLLER_HOST']
multihost = len(network_hosts) > 1
config['CONFIG_NOVA_NETWORK_MULTIHOST'] = multihost and 'true' or 'false'
for host in network_hosts:
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
if not config[i].strip():
config[i] = dummy_interface(host)
netface = config[i]
if config['CONFIG_USE_SUBNETS'] == 'y':
netface = common.cidr_to_ifname(netface, host, config)
check_ifcfg(host, netface)
try:
bring_up_ifcfg(host, netface)
except exceptions.ScriptRuntimeError as ex:
# just warn user to do it by himself
messages.append(str(ex))
key = 'CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'
config[key] = config[key] == "y"
# We need to explicitly set the network size
routing_prefix = config['CONFIG_NOVA_NETWORK_FIXEDRANGE'].split('/')[1]
net_size = 2 ** (32 - int(routing_prefix))
config['CONFIG_NOVA_NETWORK_FIXEDSIZE'] = str(net_size)
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network")
# Restart libvirt if we deploy nova network on compute
if host in compute_hosts:
manifestdata += getManifestTemplate("nova_network_libvirt")
# in multihost mode each compute host runs nova-api-metadata
if multihost and host != api_host and host in compute_hosts:
manifestdata += getManifestTemplate("nova_metadata")
appendManifestFile(manifestfile, manifestdata)
def create_sched_manifest(config, messages):
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
if config['CONFIG_IRONIC_INSTALL'] == 'y':
manifestdata = getManifestTemplate("nova_sched_ironic.pp")
ram_alloc = '1.0'
config['CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO'] = ram_alloc
manifestdata += getManifestTemplate("nova_sched.pp")
else:
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def create_vncproxy_manifest(config, messages):
if config["CONFIG_HORIZON_SSL"] == 'y':
if config["CONFIG_VNC_SSL_CERT"]:
ssl_cert_file = config["CONFIG_VNC_SSL_CERT"]
ssl_key_file = config["CONFIG_VNC_SSL_KEY"]
if not os.path.exists(ssl_cert_file):
raise exceptions.ParamValidationError(
"The file %s doesn't exist" % ssl_cert_file)
if not os.path.exists(ssl_key_file):
raise exceptions.ParamValidationError(
"The file %s doesn't exist" % ssl_key_file)
final_cert = open(ssl_cert_file, 'rt').read()
final_key = open(ssl_key_file, 'rt').read()
deliver_ssl_file(final_cert, ssl_cert_file, config['CONFIG_CONTROLLER_HOST'])
deliver_ssl_file(final_key, ssl_key_file, config['CONFIG_CONTROLLER_HOST'])
else:
config["CONFIG_VNC_SSL_CERT"] = '/etc/pki/tls/certs/ssl_vnc.crt'
config["CONFIG_VNC_SSL_KEY"] = '/etc/pki/tls/private/ssl_vnc.key'
ssl_key_file = config["CONFIG_VNC_SSL_KEY"]
ssl_cert_file = config["CONFIG_VNC_SSL_CERT"]
ssl_host = config['CONFIG_CONTROLLER_HOST']
service = 'vnc'
generate_ssl_cert(config, ssl_host, service, ssl_key_file,
ssl_cert_file)
manifestfile = "%s_nova.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("nova_vncproxy")
appendManifestFile(manifestfile, manifestdata)
def create_common_manifest(config, messages):
global compute_hosts, network_hosts
network_type = (config['CONFIG_NEUTRON_INSTALL'] == "y" and
'neutron' or 'nova')
network_multi = len(network_hosts) > 1
dbacces_hosts = set([config.get('CONFIG_CONTROLLER_HOST')])
dbacces_hosts |= network_hosts
for manifestfile, marker in manifestfiles.getFiles():
pw_in_sqlconn = False
if manifestfile.endswith("_nova.pp"):
host, manifest = manifestfile.split('_', 1)
host = host.strip()
if host in compute_hosts and host not in dbacces_hosts:
# we should omit password in case we are installing only
# nova-compute to the host
perms = "nova"
pw_in_sqlconn = False
else:
perms = "nova:%s" % config['CONFIG_NOVA_DB_PW']
pw_in_sqlconn = True
mariadb_host_url = config['CONFIG_MARIADB_HOST_URL']
sqlconn = "mysql+pymysql://%s@%s/nova" % (perms, mariadb_host_url)
if pw_in_sqlconn:
config['CONFIG_NOVA_SQL_CONN_PW'] = sqlconn
else:
config['CONFIG_NOVA_SQL_CONN_NOPW'] = sqlconn
# for nova-network in multihost mode each compute host is metadata
# host otherwise we use api host
if (network_type == 'nova' and network_multi and
host in compute_hosts):
metadata = host
else:
metadata = config['CONFIG_CONTROLLER_HOST']
config['CONFIG_NOVA_METADATA_HOST'] = metadata
data = getManifestTemplate(get_mq(config, "nova_common"))
if pw_in_sqlconn:
data += getManifestTemplate("nova_common_pw")
else:
data += getManifestTemplate("nova_common_nopw")
appendManifestFile(os.path.split(manifestfile)[1], data)
if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
nova_hosts = compute_hosts
nova_hosts |= set([config.get('CONFIG_CONTROLLER_HOST')])
ssl_cert_file = config['CONFIG_NOVA_SSL_CERT'] = (
'/etc/pki/tls/certs/ssl_amqp_nova.crt'
)
ssl_key_file = config['CONFIG_NOVA_SSL_KEY'] = (
'/etc/pki/tls/private/ssl_amqp_nova.key'
)
service = 'nova'
for host in nova_hosts:
generate_ssl_cert(config, host, service,
ssl_key_file, ssl_cert_file)
def create_neutron_manifest(config, messages):
if config['CONFIG_NEUTRON_INSTALL'] != "y":
return
if config['CONFIG_IRONIC_INSTALL'] == 'y':
virt_driver = 'nova.virt.firewall.NoopFirewallDriver'
config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
else:
virt_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_neutron")
appendManifestFile(os.path.split(manifestfile)[1], data)
| apache-2.0 | 5,181,757,278,690,048,000 | 39.109234 | 99 | 0.562456 | false |
graik/biskit | biskit/__init__.py | 1 | 2482 | ##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2007 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
__version__ = '3.0.0.a'
import logging
## public classes
try:
## default error handler
from biskit.errorHandler import ErrorHandler
EHandler = ErrorHandler()
from biskit.logFile import LogFile, StdLog, ErrLog
from biskit.errors import BiskitError
## from Blast2Seq import Blast2Seq
## from EDParser import EZDParser
from biskit.pdbModel import PDBModel, PDBProfiles, PDBError
from biskit.xplorModel import XplorModel
from biskit.profileCollection import ProfileCollection, ProfileError
## from ProfileMirror import ProfileMirror
from biskit.pdbCleaner import PDBCleaner, CleanerError
## from ModelList import ModelList
## from CommandLine import CommandLine
from .amberResidues import AmberResidueType, AmberPrepParser
from .amberResidueLibrary import AmberResidueLibrary,\
AmberResidueLibraryError
from .atomCharger import AtomCharger
from .pdbDope import PDBDope
## from Ramachandran import Ramachandran
from .colorspectrum import ColorSpectrum, ColorError, colorRange
from .matrixPlot import MatrixPlot
from biskit.core.localpath import LocalPath, LocalPathError
from biskit.core.dictlist import DictList
## ## PVM-dependent modules
## from QualMaster import QualMaster
## from StructureMaster import StructMaster
## from StructureSlave import StructureSlave
## from TrajFlexMaster import TrajFlexMaster, FlexError
except Exception as why:
logging.warning('Could not import all biskit modules: ' + repr(why))
raise
## clean up namespace
del logging
| gpl-3.0 | -3,805,283,938,140,448,000 | 31.657895 | 72 | 0.735294 | false |
PieterMostert/Lipgloss | view/pretty_names.py | 1 | 1617 | # LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.
# Contact: pi.mostert@gmail.com
# Construct prettify function
pretty_dict = {'SiO2':'SiO\u2082',
'Al2O3':'Al\u2082O\u2083',
'B2O3':'B\u2082O\u2083',
'Li2O':'Li\u2082O',
'Na2O':'Na\u2082O',
'K2O':'K\u2082O',
'P2O5':'P\u2082O\u2085',
'Fe2O3':'Fe\u2082O\u2083',
'TiO2':'TiO\u2082',
'MnO2':'MnO\u2082',
'SiO2_Al2O3':'SiO\u2082 : Al\u2082O\u2083',
'cost':'Cost',
'mass_perc_':'% weight',
'mole_perc_':'% mole'}
def prettify(text):
try:
return pretty_dict[text]
except:
return text
def pretty_entry_type(text):
if text == 'um':
return ' UMF'
elif text == 'ma':
return ' % weight'
elif text == 'mo':
return ' % mole'
else:
return ''
| gpl-3.0 | 344,815,602,470,841,860 | 32 | 70 | 0.594929 | false |
tijko/Project-Euler | py_solutions_81-90/Euler_83.py | 1 | 3306 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Minimal path in a 80x80 matrix, from top left node to bottom right node.
Moving up, down, left, or right directions.
'''
from __future__ import print_function
import timeit
import os
try:
range = xrange
except NameError:
pass
path = os.getcwd().strip('py_solutions_81-90')
with open(path + 'euler_txt/matrix.txt') as f:
edges = [list(map(int, v.split(','))) for v in f.readlines()]
traveled = [['inf'] * 80 for _ in range(80)]
def euler_83():
x = y = 0
heap = [[y, x]]
while heap:
y, x = heap.pop(0)
traverse(y, x, heap)
return traveled[79][79]
def traverse(y, x, heap):
bounds = 80
r_vertex = d_vertex = u_vertex = l_vertex = False
if traveled[y][x] == 'inf':
traveled[y][x] = curr = edges[y][x]
else:
curr = traveled[y][x]
if x + 1 >= bounds and y + 1 >= bounds:
return
if y + 1 < bounds:
d_vertex = d_edge(y, x, curr)
if x + 1 < bounds:
r_vertex = r_edge(y, x, curr)
if y - 1 >= 0:
u_vertex = u_edge(y, x, curr)
if x - 1 >= 0:
l_vertex = l_edge(y, x, curr)
mvs = {d_vertex:'d_vertex',
r_vertex:'r_vertex',
u_vertex:'u_vertex',
l_vertex:'l_vertex'
}
if any(mvs):
mvs = {k:v for k,v in mvs.items() if k}
next_mv = min(mvs)
heap_mv = [mv for mv in mvs.values() if mv != mvs[next_mv]]
push_heap(y, x, heap, heap_mv)
if mvs[next_mv] == 'd_vertex':
traverse(y + 1, x, heap)
elif mvs[next_mv] == 'r_vertex':
traverse(y, x + 1, heap)
elif mvs[next_mv] == 'u_vertex':
traverse(y - 1, x, heap)
else:
traverse(y, x - 1, heap)
def d_edge(y, x, curr):
d_vertex = curr + edges[y + 1][x]
if traveled[y + 1][x] == 'inf':
traveled[y + 1][x] = d_vertex
elif d_vertex < traveled[y + 1][x]:
traveled[y + 1][x] = d_vertex
else:
d_vertex = False
return d_vertex
def r_edge(y, x, curr):
r_vertex = curr + edges[y][x + 1]
if traveled[y][x + 1] == 'inf':
traveled[y][x + 1] = r_vertex
elif r_vertex < traveled[y][x + 1]:
traveled[y][x + 1] = r_vertex
else:
r_vertex = False
return r_vertex
def u_edge(y, x, curr):
u_vertex = curr + edges[y - 1][x]
if traveled[y - 1][x] == 'inf':
traveled[y - 1][x] = u_vertex
elif u_vertex < traveled[y - 1][x]:
traveled[y - 1][x] = u_vertex
else:
u_vertex = False
return u_vertex
def l_edge(y, x, curr):
l_vertex = curr + edges[y][x - 1]
if traveled[y][x - 1] == 'inf':
traveled[y][x - 1] = l_vertex
elif l_vertex < traveled[y][x - 1]:
traveled[y][x - 1] = l_vertex
else:
l_vertex = False
return l_vertex
def push_heap(y, x, heap, heap_mv):
mv_coor = {'d_vertex':[y + 1,x],
'r_vertex':[y, x + 1],
'u_vertex':[y - 1, x],
'l_vertex':[y, x - 1]
}
heap.extend([mv_coor[i] for i in heap_mv])
if __name__ == '__main__':
start = timeit.default_timer()
print('Answer: {}'.format(euler_83()))
stop = timeit.default_timer()
print('Time: {0:9.5f}'.format(stop - start))
| mit | -351,287,273,247,766,900 | 25.238095 | 72 | 0.503327 | false |
fracpete/wekamooc | moredataminingwithweka/class-2.5.py | 1 | 1774 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# More Data Mining with Weka - Class 2.5
# Copyright (C) 2014 Fracpete (fracpete at gmail dot com)
# Use the WEKAMOOC_DATA environment variable to set the location
# for the datasets
import os
data_dir = os.environ.get("WEKAMOOC_DATA")
if data_dir is None:
data_dir = "." + os.sep + "data"
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.classifiers import Classifier, Evaluation, PredictionOutput
from weka.core.classes import Random
import weka.plot.classifiers as plc
jvm.start()
# load weather.nominal
fname = data_dir + os.sep + "weather.nominal.arff"
print("\nLoading dataset: " + fname + "\n")
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(fname)
data.class_is_last()
# cross-validate NaiveBayes
cls = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
pout = PredictionOutput(classname="weka.classifiers.evaluation.output.prediction.PlainText", options=["-distribution"])
evl = Evaluation(data)
evl.crossvalidate_model(cls, data, 10, Random(1), pout)
print(evl.summary())
print(evl.matrix())
print(pout)
plc.plot_roc(evl, wait=True)
jvm.stop()
| gpl-3.0 | 7,347,292,716,167,464,000 | 35.204082 | 119 | 0.75761 | false |
bastiandg/infrastructure | ensure_dependencies.py | 1 | 9439 | #!/usr/bin/env python
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import posixpath
import re
import io
import errno
import logging
import subprocess
import urlparse
from collections import OrderedDict
from ConfigParser import RawConfigParser
USAGE = """
A dependencies file should look like this:
# VCS-specific root URLs for the repositories
_root = hg:https://hg.adblockplus.org/ git:https://github.com/adblockplus/
# File to update this script from (optional)
_self = buildtools/ensure_dependencies.py
# Check out elemhidehelper repository into extensions/elemhidehelper directory
# at tag "1.2".
extensions/elemhidehelper = elemhidehelper 1.2
# Check out buildtools repository into buildtools directory at VCS-specific
# revision IDs.
buildtools = buildtools hg:016d16f7137b git:f3f8692f82e5
"""
class Mercurial():
def istype(self, repodir):
return os.path.exists(os.path.join(repodir, ".hg"))
def clone(self, source, target):
if not source.endswith("/"):
source += "/"
subprocess.check_call(["hg", "clone", "--quiet", "--noupdate", source, target])
def get_revision_id(self, repo, rev=None):
command = ["hg", "id", "--repository", repo, "--id"]
if rev:
command.extend(["--rev", rev])
# Ignore stderr output and return code here: if revision lookup failed we
# should simply return an empty string.
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
return result.strip()
def pull(self, repo):
subprocess.check_call(["hg", "pull", "--repository", repo, "--quiet"])
def update(self, repo, rev):
subprocess.check_call(["hg", "update", "--repository", repo, "--quiet", "--check", "--rev", rev])
def ignore(self, target, repo):
if not self.istype(target):
config_path = os.path.join(repo, ".hg", "hgrc")
ignore_path = os.path.abspath(os.path.join(repo, ".hg", "dependencies"))
config = RawConfigParser()
config.read(config_path)
if not config.has_section("ui"):
config.add_section("ui")
config.set("ui", "ignore.dependencies", ignore_path)
with open(config_path, "w") as stream:
config.write(stream)
module = os.path.relpath(target, repo)
_ensure_line_exists(ignore_path, module)
class Git():
def istype(self, repodir):
return os.path.exists(os.path.join(repodir, ".git"))
def clone(self, source, target):
source = source.rstrip("/")
if not source.endswith(".git"):
source += ".git"
subprocess.check_call(["git", "clone", "--quiet", source, target])
def get_revision_id(self, repo, rev="HEAD"):
command = ["git", "rev-parse", "--revs-only", rev + '^{commit}']
return subprocess.check_output(command, cwd=repo).strip()
def pull(self, repo):
subprocess.check_call(["git", "fetch", "--quiet", "--all", "--tags"], cwd=repo)
def update(self, repo, rev):
subprocess.check_call(["git", "checkout", "--quiet", rev], cwd=repo)
def ignore(self, target, repo):
module = os.path.relpath(target, repo)
exclude_file = os.path.join(repo, ".git", "info", "exclude")
_ensure_line_exists(exclude_file, module)
repo_types = OrderedDict((
("hg", Mercurial()),
("git", Git()),
))
def parse_spec(path, line):
if "=" not in line:
logging.warning("Invalid line in file %s: %s" % (path, line))
return None, None
key, value = line.split("=", 1)
key = key.strip()
items = value.split()
if not len(items):
logging.warning("No value specified for key %s in file %s" % (key, path))
return key, None
result = OrderedDict()
if not key.startswith("_"):
result["_source"] = items.pop(0)
for item in items:
if ":" in item:
type, value = item.split(":", 1)
else:
type, value = ("*", item)
if type in result:
logging.warning("Ignoring duplicate value for type %s (key %s in file %s)" % (type, key, path))
else:
result[type] = value
return key, result
def read_deps(repodir):
result = {}
deps_path = os.path.join(repodir, "dependencies")
try:
with io.open(deps_path, "rt", encoding="utf-8") as handle:
for line in handle:
# Remove comments and whitespace
line = re.sub(r"#.*", "", line).strip()
if not line:
continue
key, spec = parse_spec(deps_path, line)
if spec:
result[key] = spec
return result
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def safe_join(path, subpath):
# This has been inspired by Flask's safe_join() function
forbidden = set([os.sep, os.altsep]) - set([posixpath.sep, None])
if any(sep in subpath for sep in forbidden):
raise Exception("Illegal directory separator in dependency path %s" % subpath)
normpath = posixpath.normpath(subpath)
if posixpath.isabs(normpath):
raise Exception("Dependency path %s cannot be absolute" % subpath)
if normpath == posixpath.pardir or normpath.startswith(posixpath.pardir + posixpath.sep):
raise Exception("Dependency path %s has to be inside the repository" % subpath)
return os.path.join(path, *normpath.split(posixpath.sep))
def get_repo_type(repo):
for name, repotype in repo_types.iteritems():
if repotype.istype(repo):
return name
return None
def ensure_repo(parentrepo, target, roots, sourcename):
if os.path.exists(target):
return
parenttype = get_repo_type(parentrepo)
type = None
for key in roots:
if key == parenttype or (key in repo_types and type is None):
type = key
if type is None:
raise Exception("No valid source found to create %s" % target)
url = urlparse.urljoin(roots[type], sourcename)
logging.info("Cloning repository %s into %s" % (url, target))
repo_types[type].clone(url, target)
for repo in repo_types.itervalues():
if repo.istype(parentrepo):
repo.ignore(target, parentrepo)
def update_repo(target, revisions):
type = get_repo_type(target)
if type is None:
logging.warning("Type of repository %s unknown, skipping update" % target)
return
if type in revisions:
revision = revisions[type]
elif "*" in revisions:
revision = revisions["*"]
else:
logging.warning("No revision specified for repository %s (type %s), skipping update" % (target, type))
return
resolved_revision = repo_types[type].get_revision_id(target, revision)
if not resolved_revision:
logging.info("Revision %s is unknown, downloading remote changes" % revision)
repo_types[type].pull(target)
resolved_revision = repo_types[type].get_revision_id(target, revision)
if not resolved_revision:
raise Exception("Failed to resolve revision %s" % revision)
current_revision = repo_types[type].get_revision_id(target)
if resolved_revision != current_revision:
logging.info("Updating repository %s to revision %s" % (target, resolved_revision))
repo_types[type].update(target, resolved_revision)
def resolve_deps(repodir, level=0, self_update=True, overrideroots=None, skipdependencies=set()):
config = read_deps(repodir)
if config is None:
if level == 0:
logging.warning("No dependencies file in directory %s, nothing to do...\n%s" % (repodir, USAGE))
return
if level >= 10:
logging.warning("Too much subrepository nesting, ignoring %s" % repo)
if overrideroots is not None:
config["_root"] = overrideroots
for dir, revisions in config.iteritems():
if dir.startswith("_") or revisions["_source"] in skipdependencies:
continue
target = safe_join(repodir, dir)
ensure_repo(repodir, target, config.get("_root", {}), revisions["_source"])
update_repo(target, revisions)
resolve_deps(target, level + 1, self_update=False, overrideroots=overrideroots, skipdependencies=skipdependencies)
if self_update and "_self" in config and "*" in config["_self"]:
source = safe_join(repodir, config["_self"]["*"])
try:
with io.open(source, "rb") as handle:
sourcedata = handle.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
logging.warning("File %s doesn't exist, skipping self-update" % source)
return
target = __file__
with io.open(target, "rb") as handle:
targetdata = handle.read()
if sourcedata != targetdata:
logging.info("Updating %s from %s, don't forget to commit" % (source, target))
with io.open(target, "wb") as handle:
handle.write(sourcedata)
if __name__ == "__main__":
logging.info("Restarting %s" % target)
os.execv(sys.executable, [sys.executable, target] + sys.argv[1:])
else:
logging.warning("Cannot restart %s automatically, please rerun" % target)
def _ensure_line_exists(path, pattern):
with open(path, 'a+') as f:
file_content = [l.strip() for l in f.readlines()]
if not pattern in file_content:
file_content.append(pattern)
f.seek(0, os.SEEK_SET)
f.truncate()
for l in file_content:
print >>f, l
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
repos = sys.argv[1:]
if not len(repos):
repos = [os.getcwd()]
for repo in repos:
resolve_deps(repo)
| gpl-3.0 | -3,940,309,025,700,698,600 | 32.119298 | 118 | 0.658332 | false |
lcrees/twoq | twoq/tests/auto/queuing.py | 1 | 2927 | # -*- coding: utf-8 -*-
'''auto queuing call chain test mixins'''
class AQMixin(object):
###########################################################################
## queue manipulation #####################################################
###########################################################################
def test_repr(self):
from stuf.six import strings
self.assertTrue(isinstance(
self.qclass([1, 2, 3, 4, 5, 6]).__repr__(), strings,
))
def test_ro(self):
self.assertListEqual(
self.qclass([1, 2, 3, 4, 5, 6]).ro().peek(), [1, 2, 3, 4, 5, 6],
)
def test_extend(self):
self.assertEqual(
self.qclass().extend([1, 2, 3, 4, 5, 6]).outsync().end(),
[1, 2, 3, 4, 5, 6],
)
def test_outextend(self):
self.assertEqual(
self.qclass().outextend([1, 2, 3, 4, 5, 6]).end(),
[1, 2, 3, 4, 5, 6],
)
def test_extendleft(self):
self.assertListEqual(
self.qclass().extendleft([1, 2, 3, 4, 5, 6]).outsync().end(),
[6, 5, 4, 3, 2, 1]
)
def test_append(self):
autoq = self.qclass().append('foo').outsync()
self.assertEqual(autoq.end(), 'foo')
def test_appendleft(self):
autoq = self.qclass().appendleft('foo').outsync()
self.assertEqual(autoq.end(), 'foo')
def test_inclear(self):
self.assertEqual(len(list(self.qclass([1, 2, 5, 6]).inclear())), 0)
def test_outclear(self):
self.assertEqual(
len(list(self.qclass([1, 2, 5, 6]).outclear().outgoing)), 0
)
###########################################################################
## queue balancing ########################################################
###########################################################################
def test_insync(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift().inclear().shift()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_inshift(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift().sync()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_outsync(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_outshift(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outsync()
self.assertListEqual(list(q.incoming), list(q.outgoing))
##########################################################################
# queue information ######################################################
##########################################################################
def test_results(self):
self.assertListEqual(
list(self.qclass(1, 2, 3, 4, 5, 6).outsync().results()),
[1, 2, 3, 4, 5, 6],
)
| bsd-3-clause | 2,572,901,103,623,996,000 | 33.845238 | 79 | 0.413393 | false |
rndusr/stig | stig/commands/tui/tui.py | 1 | 45516 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
"""Commands that work exclusively in the TUI"""
import functools
import os
import shlex
from functools import partial
from . import _mixin as mixin
from .. import CmdError, CommandMeta, utils
from ... import client, objects
from ...completion import candidates
from ._common import make_tab_title_widget
from ...logging import make_logger # isort:skip
log = make_logger(__name__)
# Import tui.main module only on demand
def _get_keymap_contexts():
from ...tui.tuiobjects import keymap
return tuple(keymap.contexts)
class BindCmd(metaclass=CommandMeta):
name = 'bind'
provides = {'tui'}
category = 'tui'
description = 'Bind keys to commands or other keys'
usage = ('bind [<OPTIONS>] <KEY> <ACTION>',)
examples = ('bind ctrl-a tab ls active',
'bind --context tabs alt-[ tab --focus left',
'bind --context tabs alt-] tab --focus right',
'bind --context torrent alt-! start --force',
"bind --context torrent 'd .' rm",
"bind --context torrent 'd+!' rm --delete-files",
'bind u <up>',
'bind d <down>')
argspecs = (
{'names': ('--context','-c'),
'description': 'Where KEY is grabbed (see CONTEXTS section)'},
{'names': ('--description','-d'),
'description': 'Explanation of what ACTION does'},
{'names': ('KEY',),
'description': 'One or more keys or key combinations (see KEYS section)'},
{'names': ('ACTION',), 'nargs': 'REMAINDER',
'description': ("Any command or '<KEY>' (including the brackets) "
'to translate one key to another')},
)
def __create_CONTEXTS_section():
lines = [
('The same key can be bound multiple times in different contexts. '
'With no context given, the default context is used. The default '
"context gets the key if it isn't mapped in any other relevant context."),
'',
'Available contexts are: ' + ', '.join(str(c) for c in _get_keymap_contexts()),
'',
'EXAMPLE',
'\tbind --context torrent ctrl-t start',
'\tbind --context tabs ctrl-t tab',
'\tbind ctrl-t <left>',
'',
('\tWhen focusing a torrent, <ctrl-t> starts the focused torrent. '
'If focus is not on a torrent but still on a tab (e.g. in an empty '
'torrent list or when reading this text) a new tab is opened. '
'Otherwise (e.g. focus is on the command prompt), <ctrl-t> does the '
'same as <left>.'),
]
return lines
more_sections = {
'CONTEXTS': __create_CONTEXTS_section,
'KEYS': (
'Single-character keys are specified as themselves (e.g. h, X, 5, !, þ, ¥, etc).',
'',
('Special key names are enter, space, tab, backspace, insert, delete, home, end, '
'up, down, left, right, pgup, pgdn and f1-12.'),
'',
("The modifiers 'ctrl', 'alt' and 'shift' are separated with '-' from the key "
"(e.g. alt-i, shift-delete, ctrl-a). shift-x is identical to X."),
'',
("Chained keys are sparated by single spaces (' ') or pluses ('+') and must be "
"given as one argument per chain."),
)
}
def run(self, context, description, KEY, ACTION):
from ...tui.tuiobjects import keymap
key = KEY
if len(ACTION) == 1 and ACTION[0][0] == '<' and ACTION[0][-1] == '>':
# ACTION is another key (e.g. 'j' -> 'down')
action = keymap.mkkey(ACTION[0])
else:
action = ' '.join(shlex.quote(x) for x in ACTION)
if context is None:
context = keymap.DEFAULT_CONTEXT
elif context not in _get_keymap_contexts():
raise CmdError('Invalid context: %r' % (context,))
try:
keymap.bind(key, action, context=context, description=description)
except ValueError as e:
raise CmdError(e)
_own_options = {('--context', '-c'): 1,
('--description', '-d'): 1}
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs(cls._own_options)
if posargs.curarg_index == 2:
# First positional argument is the key, second is the command's name
return candidates.commands()
else:
# Any other positional arguments will be passed to subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates from subcmd's class
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option == '--context':
return candidates.keybinding_contexts()
@classmethod
def _get_subcmd(cls, args):
# posarg[0] is 'bind', posarg[1] is the key
subcmd_start = args.nth_posarg_index(3, cls._own_options)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class UnbindCmd(metaclass=CommandMeta):
name = 'unbind'
provides = {'tui'}
category = 'tui'
description = 'Unbind keys so pressing them has no effect'
usage = ('unbind [<OPTIONS>] <KEY> <KEY> ...',)
examples = ('unbind --context main ctrl-l',
'unbind q')
argspecs = (
{'names': ('--context','-c'),
'description': 'Where KEY is grabbed (see "bind" command)'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Remove all keybindings or only those in given context'},
{'names': ('KEY',), 'nargs': 'REMAINDER',
'description': 'Keys or key combinations (see "bind" command)'},
)
more_sections = {
'COMPLETE UNBINDING': (
('For this command there is a special context called \'all\' that '
'unbinds the key for every context.'),
'',
'Note that \'unbind --all\' is very different from \'unbind --context all\''
)
}
def run(self, context, all, KEY):
from ...tui.tuiobjects import keymap
if context is not None and context not in _get_keymap_contexts():
raise CmdError('Invalid context: %r' % (context,))
if KEY:
if context:
success = self._unbind_keys(keys=KEY, context=context)
elif all:
success = self._unbind_keys(keys=KEY, context=keymap.ALL_CONTEXTS)
else:
success = self._unbind_keys(keys=KEY, context=keymap.DEFAULT_CONTEXT)
else:
success = self._unbind_all_keys(context=context)
if not success:
raise CmdError()
def _unbind_keys(self, keys, context):
from ...tui.tuiobjects import keymap
success = True
for key in keys:
try:
keymap.unbind(key, context=context)
except ValueError as e:
self.error(e)
success = False
return success
def _unbind_all_keys(self, context):
from ...tui.tuiobjects import keymap
if context is None:
keymap.clear()
else:
keymap.clear(context=context)
return True
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
return candidates.keybinding_keys(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option == '--context':
return candidates.keybinding_contexts()
class SetCommandCmd(mixin.placeholders, metaclass=CommandMeta):
name = 'setcommand'
aliases = ('setcmd',)
provides = {'tui'}
category = 'tui'
description = 'Open the command line and insert a command'
usage = ('setcommand [--trailing-space] <COMMAND> <ARGUMENT> <ARGUMENT> ...',)
examples = (
'setcommand --trailing-space tab ls',
'\tAsk the user for a filter before opening a new torrent list.',
'',
'setcommand move {{location}}/',
('\tMove the focused torrent, using the path of the currently focused '
'list item as a starting point.'),
'',
'setcommand move id={{id}} {{location}}/',
('\tSame as above, but make sure to move the correct torrent in case '
'it is removed from the list while typing in the new path, e.g. if '
'we\'re listing active torrents and the focused torrent stops being active.'),
)
argspecs = (
{'names': ('COMMAND',), 'nargs': 'REMAINDER',
'description': 'Command the can user edit before executing it (see PLACEHOLDERS)'},
{'names': ('--trailing-space', '-s'), 'action': 'store_true',
'description': 'Append a space at the end of COMMAND'},
)
more_sections = {
'PLACEHOLDERS': mixin.placeholders.HELP,
}
async def run(self, COMMAND, trailing_space):
log.debug('Unresolved command: %r', COMMAND)
args = await self.parse_placeholders(*COMMAND)
log.debug('Command with resolved placeholders: %r', args)
if args:
cmdstr = ' '.join(shlex.quote(str(arg)) for arg in args)
if trailing_space:
cmdstr += ' '
from ...tui.tuiobjects import widgets
widgets.show('cli')
widgets.cli.base_widget.edit_text = cmdstr
widgets.cli.base_widget.edit_pos = len(cmdstr)
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs()
if posargs.curarg_index == 1:
# First positional argument is the subcmd's name
return candidates.commands()
else:
# Any other positional arguments are part of subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates for subcmd
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@staticmethod
def _get_subcmd(args):
# First posarg is 'setcommand'
subcmd_start = args.nth_posarg_index(2)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class InteractiveCmd(mixin.placeholders, metaclass=CommandMeta):
name = 'interactive'
provides = {'tui'}
category = 'tui'
description = 'Complete partial command with user input from a dialog'
usage = ('interactive <COMMAND> [<OPTIONS>]',)
examples = (
'interactive "move \'[{location}/]\'"',
'\tAsk for the destination directory when moving torrents.',
'',
'tab ls & interactive "limit \'[]\'" --per-change --on-cancel "tab --close --focus left"',
('\tOpen a new tab with all torrents and filter them as you type. '
'Keep the tab open if the user input field is accepted with <enter> '
'or close the tab and focus the previous one if the dialog is aborted '
'with <escape>.'),
'',
'tab ls stopped & interactive \'limit "[]"\' -p -a "mark --all & start" -x "tab --close --focus left"',
('\tSearch for stopped torrents only. When accepted, the matching torrents '
'are started. The new tab is always closed, whether the dialog is '
'accepted or not.'),
)
argspecs = (
{'names': ('COMMAND',),
'description': ('Any command with "[PREFILLED TEXT]" as marker for '
'user input field (see USER INPUT FIELDS) and '
'"{{NAM}}" as placeholder for values of the currently '
'focused list item (see PLACEHOLDERS)')},
{'names': ('--per-change', '-p'), 'action': 'store_true',
'description': 'Whether to run COMMAND every time the input is changed'},
{'names': ('--on-accept', '-a'), 'metavar': 'ACCEPT COMMAND',
'description': 'Command to run when the dialog is accepted (with <enter>)'},
{'names': ('--on-cancel', '-c'), 'metavar': 'CANCEL COMMAND',
'description': 'Command to run when the dialog is aborted (with <escape>)'},
{'names': ('--on-close', '-x'), 'metavar': 'CLOSE COMMAND',
'description': 'Command to run after the dialog is closed either way'},
{'names': ('--ignore-errors', '-i'), 'action': 'store_true',
'description': 'Whether to ignore errors from COMMAND'},
)
more_sections = {
'COMMANDS': (('For each occurrence of "[]" in any command, the user is '
'prompted for input to insert at that point. Any text between '
'"[" and "]" is used as the initial user input. "[" can be '
'escaped with "\\" in which case the corresponding "]" is also '
'interpreted literally.'),
'',
('COMMAND is called if the user presses <enter> or, if --per-change '
'is given, after any user input field is changed.'),
'',
('COMMAND must contain at least one user input field. Any of the '
'commands described below are called without user interaction if '
'they don\'t contain any user input fields.'),
'',
('ACCEPT COMMAND is called after COMMAND if the user accepts the '
'dialog by pressing <enter>.'),
'',
'CANCEL COMMAND is called if the user aborts the COMMAND dialog.',
'',
('CLOSE COMMAND is always called when the dialog is closed either '
'by accepting or by cancelling it.')),
'PLACEHOLDERS': mixin.placeholders.HELP,
}
import re
_input_regex = re.compile(r'(?<!\\)(\[.*?\])')
async def run(self, COMMAND, per_change, on_accept, on_cancel, on_close, ignore_errors):
cmd = await self._parse_cmd(COMMAND)
accept_cmd = await self._parse_cmd(on_accept) if on_accept else None
cancel_cmd = await self._parse_cmd(on_cancel) if on_cancel else None
close_cmd = await self._parse_cmd(on_close) if on_close else None
self._ignore_errors = ignore_errors
if len(cmd) == 1:
# There are no user input markers
raise CmdError('No user input fields ("[]"): %s' % COMMAND)
def close_cb():
self._run_cmd_or_open_dialog(close_cmd)
if per_change:
def accept_cb():
self._run_cmd_from_dialog()
self._run_cmd_or_open_dialog(accept_cmd)
def cancel_cb():
self._run_cmd_or_open_dialog(cancel_cmd)
self._open_dialog(cmd,
on_change=self._run_cmd_from_dialog,
on_accept=accept_cb,
on_cancel=cancel_cb,
on_close=close_cb)
else:
def accept_cb():
self._run_cmd_from_dialog()
self._run_cmd_or_open_dialog(accept_cmd)
def cancel_cb():
self._run_cmd_or_open_dialog(cancel_cmd)
self._open_dialog(cmd,
on_accept=accept_cb,
on_cancel=cancel_cb,
on_close=close_cb)
_WIDGET_NAME = 'interactive_prompt'
_MIN_EDIT_WIDTH = 25
_MAX_EDIT_WIDTH = 50
def _open_dialog(self, cmd, on_change=None, on_accept=None, on_cancel=None, on_close=None):
import urwid
from ...tui.cli import CLIEditWidget
def accept_cb(widget):
# CLIEditWidget only automatically appends to history when it gets
# an <enter> key, but only one gets it if there are multiple user
# input fields.
for part in self._edit_widgets:
part.append_to_history()
self._close_dialog()
if on_accept: on_accept()
if on_close: on_close()
def cancel_cb(widget):
self._close_dialog()
if on_cancel: on_cancel()
if on_close: on_close()
def change_cb(widget):
if on_change: on_change()
# Derive history file name from command
import re
filename = re.sub('[/\n]', '__', ''.join(cmd))
history_file_base = os.path.join(objects.localcfg['tui.cli.history-dir'].full_path, filename)
columns_args = [('pack', urwid.Text(':'))]
self._cmd_parts = []
self._edit_widgets = []
edit_index = 0
for part in cmd:
if part[0] == '[' and part[-1] == ']':
edit_index += 1
history_file = history_file_base + '.input%d' % edit_index
log.debug('History file for edit #%d: %r', edit_index, history_file)
edit_widget = CLIEditWidget(on_change=change_cb,
on_accept=accept_cb,
on_cancel=cancel_cb,
history_file=history_file)
edit_widget.edit_text = part[1:-1]
edit_widget.edit_pos = len(edit_widget.edit_text)
columns_args.append(urwid.AttrMap(edit_widget, 'prompt'))
self._cmd_parts.append(edit_widget)
self._edit_widgets.append(edit_widget)
else:
columns_args.append(('pack', urwid.Text(part)))
self._cmd_parts.append(part)
class MyColumns(urwid.Columns):
"""Use <tab> and <shift-tab> to move focus between input fields"""
def keypress(self, size, key):
def move_right():
if self.focus_position < len(self.contents) - 1:
self.focus_position += 1
else:
self.focus_position = 0
def move_left():
if self.focus_position > 0:
self.focus_position -= 1
else:
self.focus_position = len(self.contents) - 1
if key == 'tab':
move_right()
while not isinstance(self.focus.base_widget, urwid.Edit):
move_right()
elif key == 'shift-tab':
move_left()
while not isinstance(self.focus.base_widget, urwid.Edit):
move_left()
else:
log.debug('focus pos: %r', self.focus_position)
return super().keypress(size, key)
columns_widget = MyColumns(columns_args)
# Close any previously opened dialog
from ...tui.tuiobjects import widgets
if widgets.exists(self._WIDGET_NAME):
self._close_dialog()
# Focus the first empty input widget if there are any
for i,(w,_) in enumerate(columns_widget.contents):
w = w.base_widget
log.debug('%02d: %r', i, w)
if hasattr(w, 'edit_text') and w.edit_text == '':
columns_widget.focus_position = i
break
widgets.add(name=self._WIDGET_NAME,
widget=urwid.AttrMap(columns_widget, 'cli'),
position=widgets.get_position('cli'),
removable=True,
options='pack')
def _close_dialog(self):
from ...tui.tuiobjects import widgets
widgets.remove(self._WIDGET_NAME)
widgets.focus_name = 'main'
def _run_cmd_or_open_dialog(self, cmd):
if not cmd:
return
elif len(cmd) == 1:
log.debug('Running command without dialog: %r', cmd)
self._run_cmd(cmd[0])
else:
log.debug('Running command in dialog: %r', cmd)
self._open_dialog(cmd, on_accept=self._run_cmd_from_dialog)
def _run_cmd_from_dialog(self):
cmd = []
for part in self._cmd_parts:
if hasattr(part, 'edit_text'):
cmd.append(part.edit_text)
else:
cmd.append(part)
cmd = ''.join(cmd)
log.debug('Got command from current dialog: %r', cmd)
self._run_cmd(cmd)
def _run_cmd(self, cmd):
log.debug('Running cmd: %r', cmd)
if self._ignore_errors:
# Overload the error() method on the command's instance
objects.cmdmgr.run_task(cmd, error=lambda msg: None)
else:
objects.cmdmgr.run_task(cmd)
async def _parse_cmd(self, cmd):
assert isinstance(cmd, str)
args = await self.parse_placeholders(cmd)
return self._split_cmd_at_inputs(args[0])
def _split_cmd_at_inputs(self, cmd):
"""
Split `cmd` so that each input marker ("[...]") is a single item
Example result:
['somecmd --an-argument ', '[user input goes here]', ' some more arguments']
"""
log.debug('Splitting %r', cmd)
parts = [part for part in self._input_regex.split(cmd) if part]
log.debug('Split: %r', parts)
for i in range(len(parts)):
parts[i] = parts[i].replace('\\[', '[')
log.debug('Unescaped: %r', parts)
return parts
class MarkCmd(metaclass=CommandMeta):
name = 'mark'
provides = {'tui'}
category = 'tui'
description = 'Select torrents or files for an action'
usage = ('mark [<OPTIONS>]',)
argspecs = (
{'names': ('--focus-next','-n'), 'action': 'store_true',
'description': 'Move focus forward after marking or toggling'},
{'names': ('--toggle','-t'), 'action': 'store_true',
'description': 'Mark if unmarked, unmark if marked'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Mark or toggle all items'},
)
more_sections = {
'NOTES': (('The column "marked" must be in the "columns.*" settings. Otherwise '
'marked list items are indistinguishable from unmarked ones.'),
'',
('The character that is displayed in the "marked" column is '
'specified by the settings "tui.marked.on" and "tui.marked.off".')),
}
def run(self, focus_next, toggle, all):
from ...tui.tuiobjects import tabs
widget = tabs.focus
if not widget.has_marked_column:
raise CmdError('Nothing to mark here.')
else:
widget.mark(toggle=toggle, all=all)
if focus_next:
widget.focus_position += 1
class UnmarkCmd(metaclass=CommandMeta):
name = 'unmark'
provides = {'tui'}
category = 'tui'
description = 'Deselect torrents or files for an action'
usage = ('unmark [<OPTIONS>]',)
argspecs = (
{'names': ('--focus-next','-n'), 'action': 'store_true',
'description': 'Move focus forward after unmarking or toggling'},
{'names': ('--toggle','-t'), 'action': 'store_true',
'description': 'Mark if unmarked, unmark if marked'},
{'names': ('--all','-a'), 'action': 'store_true',
'description': 'Unmark or toggle all items'},
)
more_sections = MarkCmd.more_sections
def run(self, focus_next, toggle, all):
from ...tui.tuiobjects import tabs
widget = tabs.focus
if not widget.has_marked_column:
raise CmdError('Nothing to unmark here.')
else:
widget.unmark(toggle=toggle, all=all)
if focus_next:
widget.focus_position += 1
class QuitCmd(metaclass=CommandMeta):
name = 'quit'
provides = {'tui'}
category = 'tui'
description = 'Terminate the TUI'
def run(self):
import urwid
raise urwid.ExitMainLoop()
class FindCmd(metaclass=CommandMeta):
name = 'find'
provides = {'tui'}
category = 'tui'
description = 'Find text in the content of the focused tab'
usage = ('find [<OPTIONS>] [<PHRASE>]',)
argspecs = (
{'names': ('--clear','-c'), 'action': 'store_true',
'description': ('Remove previously applied filter; this is '
'the default if no PHRASE arguments are provided')},
{'names': ('--next','-n'), 'action': 'store_true',
'description': 'Jump to next match (call `find <PHRASE>` first)'},
{'names': ('--previous','-p'), 'action': 'store_true',
'description': 'Jump to previous match (call `find <PHRASE>` first)'},
{'names': ('PHRASE',), 'nargs': '*',
'description': 'Search phrase'},
)
def run(self, clear, next, previous, PHRASE):
from ...tui.tuiobjects import tabs
content = tabs.focus.base_widget
if not hasattr(content, 'search_phrase'):
raise CmdError('This tab does not support finding.')
elif next and previous:
raise CmdError('The options --next and --previous contradict each other.')
elif next:
if content.search_phrase is None:
raise CmdError('Set a search phrase first with `find <PHRASE>`.')
else:
content.jump_to_next_match()
elif previous:
if content.search_phrase is None:
raise CmdError('Set a search phrase first with `find <PHRASE>`.')
else:
content.jump_to_prev_match()
elif clear:
content.search_phrase = None
else:
try:
content.search_phrase = ' '.join(PHRASE)
content.maybe_jump_to_next_match()
except ValueError as e:
raise CmdError(e)
class LimitCmd(metaclass=CommandMeta):
name = 'limit'
provides = {'tui'}
category = 'tui'
description = 'Limit contents of the focused tab by applying more filters'
usage = ('limit [<OPTIONS>] [<FILTER> <FILTER> ...]',)
argspecs = (
{'names': ('--clear','-c'), 'action': 'store_true',
'description': ('Remove previously applied filter; this is '
'the default if no FILTER arguments are provided')},
{'names': ('FILTER',), 'nargs': '*',
'description': 'Filter expression (see `help filters`)'},
)
def run(self, clear, FILTER):
from ...tui.tuiobjects import tabs
content = tabs.focus.base_widget
if not hasattr(content, 'secondary_filter'):
raise CmdError('This tab does not support limiting.')
else:
if clear or not FILTER:
content.secondary_filter = None
else:
try:
content.secondary_filter = FILTER
except ValueError as e:
raise CmdError(e)
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
from ...tui.tuiobjects import tabs
from ...tui.views import (TorrentListWidget, FileListWidget,
PeerListWidget, TrackerListWidget,
SettingListWidget)
widget = tabs.focus.base_widget
if hasattr(widget, 'secondary_filter'):
if isinstance(widget, TorrentListWidget):
return candidates.torrent_filter(args.curarg)
elif isinstance(widget, FileListWidget):
torrent_filter = 'id=%s' % (widget.focused_torrent_id,)
return candidates.file_filter(args.curarg, torrent_filter)
elif isinstance(widget, PeerListWidget):
return candidates.peer_filter(args.curarg, None)
elif isinstance(widget, TrackerListWidget):
torrent_filter = '|'.join('id=%s' % (itemw.torrent_id,)
for itemw in widget.items)
return candidates.tracker_filter(args.curarg, torrent_filter)
elif isinstance(widget, SettingListWidget):
return candidates.setting_filter(args.curarg)
class SortCmd(metaclass=CommandMeta):
name = 'sort'
aliases = ()
provides = {'tui'}
category = 'tui'
description = "Sort lists of torrents/peers/trackers/etc"
usage = ('sort [<OPTIONS>] [<ORDER> <ORDER> <ORDER> ...]',)
examples = ('sort tracker status !rate-down',
'sort --add eta')
argspecs = (
{'names': ('ORDER',), 'nargs': '*',
'description': 'How to sort list items (see SORT ORDERS section)'},
{'names': ('--add', '-a'), 'action': 'store_true',
'description': 'Append ORDERs to current list of sort orders instead of replacing it'},
{'names': ('--delete', '-d'), 'action': 'store_true',
'description': 'Delete ORDERs from current list of sort orders instead of replacing it'},
{'names': ('--reset', '-r'), 'action': 'store_true',
'description': 'Go back to sort order that was used when the list was created'},
{'names': ('--none', '-n'), 'action': 'store_true',
'description': 'Remove all sort orders from the list'},
)
def _list_sort_orders(title, sortcls):
return (title,) + \
tuple('\t{}\t - \t{}'.format(', '.join((sname,) + s.aliases), s.description)
for sname,s in sorted(sortcls.SORTSPECS.items()))
more_sections = {
'SORT ORDERS': (_list_sort_orders('TORRENT LISTS', client.TorrentSorter) +
('',) +
_list_sort_orders('PEER LISTS', client.PeerSorter) +
('',) +
_list_sort_orders('TRACKER LISTS', client.TrackerSorter))
}
async def run(self, add, delete, reset, none, ORDER):
from ...tui.tuiobjects import tabs
current_tab = tabs.focus.base_widget
if reset:
current_tab.sort = 'RESET'
if none:
current_tab.sort = None
if ORDER:
# # Find appropriate sorter class for focused list
sortcls = self._widget2sortcls(current_tab)
if sortcls is None:
raise CmdError('Current tab is not sortable.')
try:
new_sort = sortcls(utils.listify_args(ORDER))
except ValueError as e:
raise CmdError(e)
if add and current_tab.sort is not None:
current_tab.sort += new_sort
elif delete and current_tab.sort is not None:
current_tab.sort -= new_sort
else:
current_tab.sort = new_sort
@staticmethod
def _widget2sortcls(list_widget):
from ...tui.views import (TorrentListWidget, PeerListWidget,
TrackerListWidget, SettingListWidget)
if isinstance(list_widget, TorrentListWidget):
return client.TorrentSorter
elif isinstance(list_widget, PeerListWidget):
return client.PeerSorter
elif isinstance(list_widget, TrackerListWidget):
return client.TrackerSorter
elif isinstance(list_widget, SettingListWidget):
return client.SettingSorter
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
from ...tui.tuiobjects import tabs
sortcls = cls._widget2sortcls(tabs.focus.base_widget)
if sortcls is not None:
return candidates.sort_orders(sortcls.__name__)
class TabCmd(mixin.select_torrents, metaclass=CommandMeta):
name = 'tab'
provides = {'tui'}
category = 'tui'
description = 'Open, close and focus tabs'
usage = ('tab [<OPTIONS>]',
'tab [<OPTIONS>] <COMMAND>')
examples = ('tab',
'tab -c',
'tab -c active',
'tab ls active',
'tab -b ls active',
'tab -f active',
'tab -f 3 ls active',
'tab -b -f -1 ls active')
argspecs = (
{'names': ('--background', '-b'), 'action': 'store_true',
'description': 'Do not focus new tab'},
{'names': ('--close-all', '-C'), 'action': 'store_true',
'description': 'Close all tabs'},
{'names': ('--close', '-c'), 'nargs': '?', 'default': False, 'document_default': False,
'description': 'Close focused or specified tab (see TAB IDENTIFIERS SECTION)'},
{'names': ('--focus', '-f'),
'description': 'Focus specified tab (see TAB IDENTIFIERS SECTION)'},
{'names': ('--move', '-m'),
'description': 'Move focused tab left, right or to absolute position'},
{'names': ('--title', '-t'),
'description': 'Manually set tab title instead of generating one'},
{'names': ('COMMAND',), 'nargs': 'REMAINDER',
'description': ('Command to run in tab')},
)
more_sections = {
'TAB IDENTIFIERS': (
'There are three ways to specify a tab (e.g. to close it):',
(' - \tIntegers specify the position of the tab. Positive numbers '
'start from the left and negative numbers start from the right '
'(1 (and 0) is the leftmost tab and -1 is the rightmost tab).'),
(' - \t"left" and "right" specify the tabs next to the '
'currently focused tab.'),
(' - \tAnything else is assumed to be a part of a tab title. If there '
'are multiple matches, the first match from the left wins.'),
),
}
async def run(self, background, close_all, close, focus, move, title, COMMAND):
from ...tui.tuiobjects import tabs
tabid_old = tabs.get_id()
# Find relevant tab IDs and fail immediately if unsuccessful
if focus is not None:
tabid_focus = self._get_tab_id(focus)
if tabid_focus is None:
raise CmdError('No such tab: %r' % (focus,))
if close is not False:
tabid_close = self._get_tab_id(close)
if tabid_close is None:
if close is None:
raise CmdError('No tab is open')
else:
raise CmdError('No such tab: %r' % (close,))
# COMMAND may get additional hidden arguments as instance attributes
cmd_attrs = {}
# Apply close/focus/move operations
if focus is not None:
log.debug('Focusing tab %r', tabid_focus)
tabs.focus_id = tabid_focus
if close_all is not False:
log.debug('Closing all tabs')
tabs.clear()
elif close is not False:
log.debug('Closing tab %r', tabid_close)
tabs.remove(tabid_close)
elif move and tabs.focus:
self._move_tab(tabs, move)
# If no tabs were closed, focused or moved, open a new one
if close is False and close_all is False and focus is None and not move:
titlew = make_tab_title_widget(title or 'Empty tab',
attr_unfocused='tabs.unfocused',
attr_focused='tabs.focused')
tabs.insert(titlew, position='right')
log.debug('Inserted new tab at position %d: %r', tabs.focus_position, titlew.base_widget.text)
# Maybe provide a user-specified tab title to the new command
if title:
cmd_attrs['title'] = title
if COMMAND:
# Execute command
cmd_str = ' '.join(shlex.quote(arg) for arg in COMMAND)
log.debug('Running command in tab %s with args %s: %r',
tabs.focus_position,
', '.join('%s=%r' % (k,v) for k,v in cmd_attrs.items()),
cmd_str)
success = await objects.cmdmgr.run_async(cmd_str, **cmd_attrs)
else:
success = True
if background:
tabs.focus_id = tabid_old
else:
content = tabs.focus
if content is not None and hasattr(content, 'marked_count'):
from ...tui.tuiobjects import bottombar
bottombar.marked.update(content.marked_count)
return success
def _get_tab_id(self, pos):
from ...tui.tuiobjects import tabs
if len(tabs) == 0:
return None
if pos is None:
return tabs.focus_id
def find_id_by_index(index):
try:
index = int(index)
except ValueError:
pass
else:
index_max = len(tabs) - 1
# Internally, first tab is at index 0, but for users it's 1, unless
# they gave us 0, in which case we assume they mean 1.
index = index - 1 if index > 0 else index
# Limit index to index_max, considering negative values when
# indexing from the right.
if index < 0:
index = max(index, -index_max - 1)
else:
index = min(index, index_max)
return tabs.get_id(index)
def find_right_left_id(right_or_left):
tabcount = len(tabs)
if tabcount > 1:
cur_index = tabs.focus_position
cur_index = 1 if cur_index is None else cur_index
if right_or_left == 'left':
return tabs.get_id(max(0, cur_index - 1))
elif right_or_left == 'right':
return tabs.get_id(min(tabcount - 1, cur_index + 1))
def find_id_by_title(string):
for index,title in enumerate(tabs.titles):
if string in title.original_widget.text:
return tabs.get_id(index)
# Try to use pos as an index
tabid = find_id_by_index(pos)
if tabid is not None:
log.debug('Found tab ID by index: %r -> %r', pos, tabid)
return tabid
pos_str = str(pos)
# Move to left/right tab
tabid = find_right_left_id(pos_str)
if tabid is not None:
log.debug('Found tab ID by direction: %r -> %r', pos, tabid)
return tabid
# Try to find tab title
tabid = find_id_by_title(pos_str)
if tabid is not None:
log.debug('Found tab ID by title: %r -> %r', pos, tabid)
return tabid
def _move_tab(self, tabs, move):
if move == 'left':
tabs.move(tabs.get_id(), 'left')
elif move == 'right':
tabs.move(tabs.get_id(), 'right')
else:
try:
index = int(move)
except (ValueError, TypeError):
raise CmdError('--move argument must be "left", "right" or tab index: %r' % (move,))
else:
# Positive tab index starts at 0, negative at -1
if index > 0:
index -= 1
tabs.move(tabs.get_id(), index)
_own_options = {('--close', '-c'): 1,
('--focus', '-f'): 1,
('--title', '-t'): 1}
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs(cls._own_options)
if posargs.curarg_index == 1:
# First positional argument is the subcmd's name
return candidates.commands()
else:
# Any other positional arguments will be passed to subcmd
subcmd = cls._get_subcmd(args)
if subcmd:
return candidates.for_args(subcmd)
@classmethod
def completion_candidates_opts(cls, args):
"""Return candidates for arguments that start with '-'"""
subcmd = cls._get_subcmd(args)
if subcmd:
# Get completion candidates for subcmd
return candidates.for_args(subcmd)
else:
# Parent class generates candidates for our own options
return super().completion_candidates_opts(args)
@classmethod
def completion_candidates_params(cls, option, args):
"""Complete parameters (e.g. --option parameter1,parameter2)"""
if option in ('--close', '--focus'):
return candidates.tab_titles()
@classmethod
def _get_subcmd(cls, args):
# First posarg is 'tab'
subcmd_start = args.nth_posarg_index(2, cls._own_options)
# Subcmd is only relevant if the cursor is somewhere on it.
# Otherwise, we're on our own arguments.
if subcmd_start is not None and subcmd_start < args.curarg_index:
return args[subcmd_start:]
class TUICmd(metaclass=CommandMeta):
name = 'tui'
provides = {'tui'}
category = 'tui'
description = 'Show or hide parts of the text user interface'
usage = ('tui <ACTION> <ELEMENT> <ELEMENT> ...',)
examples = ('tui toggle log',
'tui hide topbar.help')
argspecs = (
{'names': ('ACTION',), 'choices': ('show', 'hide', 'toggle'),
'description': '"show", "hide" or "toggle"'},
{'names': ('ELEMENT',), 'nargs': '+',
'description': ('Name of TUI elements; '
'see ELEMENT NAMES section for a list')},
)
# HelpManager supports sequences of lines or a callable that returns them
more_sections = {'ELEMENT NAMES': lambda: ('Available TUI element names are: ' +
', '.join(_tui_element_names()),)}
def run(self, ACTION, ELEMENT):
from ...tui.tuiobjects import widgets
widget = None
success = True
for element in utils.listify_args(ELEMENT):
# Resolve path
path = element.split('.')
target_name = path.pop(-1)
current_path = []
widget = widgets
try:
for widgetname in path:
current_path.append(widgetname)
widget = getattr(widget, widgetname)
except AttributeError:
self.error('Unknown TUI element: %r' % ('.'.join(current_path),))
if widget is not None:
action = getattr(widget, ACTION)
if any(ACTION == x for x in ('hide', 'toggle')):
action = partial(action, free_space=False)
log.debug('%sing %s in %s', ACTION.capitalize(), target_name, widget)
try:
action(target_name)
except ValueError as e:
success = False
self.error(e)
else:
success = success and True
if not success:
raise CmdError()
@classmethod
def completion_candidates_posargs(cls, args):
"""Complete positional arguments"""
posargs = args.posargs()
if posargs.curarg_index == 1:
for argspec in cls.argspecs:
if 'ACTION' in argspec['names']:
return candidates.Candidates(argspec['choices'],
label='Action')
else:
return candidates.Candidates(_tui_element_names(),
label='Element')
# Lazily load element names from tui module to avoid importing TUI stuff if possible
@functools.lru_cache()
def _tui_element_names():
from ...tui import tuiobjects
return tuple(str(name) for name in sorted(tuiobjects.widgets.names_recursive))
| gpl-3.0 | -9,125,426,363,984,953,000 | 38.994728 | 111 | 0.546447 | false |
nicko7i/vcnc | api-python/velstor/vclc/__main__.py | 1 | 3489 | #!python3.5
# For command aliases prior to 3.2 - https://bugs.python.org/issue25299
#
# https://pythonconquerstheuniverse.wordpress.com/2011/08/29/lambda_tutorial/
from __future__ import print_function
import sys
import re
import json
import requests
import errno
from velstor.restapi import Session
from functools import partial
from velstor.vclc.vclc_parser import vclc_parser
from velstor.vclc.handler import Handler
from velstor.vclc.handler import error_response
from velstor.vclc.vClcException import vClcException
print_error = partial(print, file=sys.stderr)
#
# Yeah, yeah, globals are bad form...
#
quiet = False
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
with Session() as session:
handler = Handler(session)
parser = vclc_parser(handler)
#
try:
global quiet
results = parser.parse_args(args, handler)
quiet = results.quiet
return results.action()
except requests.exceptions.RequestException as e:
#
# Requests raised an exception. Probably couldn't reach the vCNC
# server There is no HTTP code for this error, so we adopt 504,
# which is similar.
#
# Yes, it would have been cooler to have done this with a single
# RE.
#
details = str(e)
match_host = re.search("host='(\S+)'", details)
match_port = re.search("port=(\d+)", details)
match_error = re.search('NewConnectionError', details)
suffix = '.'
#
# If the server happens to match the vCNC server's default value,
# then add the additional suggestion to check configuration.
#
if match_host and match_port and match_error:
host = match_host.group(1)
port = match_port.group(1)
if host == 'vcnc' and port == "6130":
suffix = ''.join([
' Did you mean to set a command line switch',
' or environment variable?'])
return error_response('Could not reach vCNC server at '
+ match_host.group(1)
+ ':'
+ match_port.group(1)
+ suffix,
http_status=504,
error_sym='EHOSTDOWN')
else:
#
# We don't really know what happened. Just dump the raw data
# as the message.
#
return error_response(details)
#
#
except vClcException:
#
# Calling 'vclc' with no arguments isn't trapped as an error by
# argparse.
#
m = parser.format_usage()
m = re.sub('\n[ ]+', ' ', m)
return error_response(m, http_status=400, error_sym='EINVAL')
except SystemExit:
raise
except KeyboardInterrupt:
sys.exit(errno.EINVAL)
except BaseException:
raise
if __name__ == "__main__":
(exit_code, response) = main()
if not quiet:
print(json.dumps(response, sort_keys=True, indent=2))
sys.exit(127 if (exit_code > 127) else exit_code)
| apache-2.0 | -6,190,060,355,032,620,000 | 33.205882 | 78 | 0.524792 | false |
Rhizomatica/rccn | rccn/modules/subscription.py | 1 | 5437 | ############################################################################
#
# Copyright (C) 2014 tele <tele@rhizomatica.org>
#
# Subscription module
# This file is part of RCCN
#
# RCCN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RCCN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
# Python3/2 compatibility
# TODO: Remove once python2 support no longer needed.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("..")
from config import *
from modules.subscriber import Subscriber, SubscriberException
from modules.sms import SMS, SMSException
class SubscriptionException(Exception):
pass
class Subscription:
def __init__(self, logger):
self.logger = logger
def subscription_info(self):
sub = Subscriber()
unpaid=self.get_unpaid_subscriptions()
print('---\n\n')
for number in unpaid:
print('PostGres: '+number[0]+':')
info=sub.print_vty_hlr_info(number)
if "No subscriber found for extension" in info:
print('OsmoHLR: '+info)
print("Checking for 5 digit extension")
info=sub.print_vty_hlr_info(number[0][-5:])
print('OsmoHLR: '+ info)
print('---\n\n')
def get_unpaid_subscriptions(self):
# get all subscribers that haven't paid yet
# Shouldn't we only do this for those who are actually authorised?
try:
cur = db_conn.cursor()
cur.execute('SELECT msisdn FROM subscribers WHERE subscription_status = 0')
count = cur.rowcount
if count > 0:
subscribers_list = cur.fetchall()
self.logger.info('Found %s subscribers with unpaid subscription to the service' % count)
db_conn.commit()
return subscribers_list
else:
db_conn.commit()
self.logger.info('PG_HLR Everyone paid, we are good to go')
except psycopg2.DatabaseError as e:
raise SubscriptionException('PG_HLR error getting subscribers subscription_status: %s' % e)
def update_subscriptions(self, status):
try:
cur = db_conn.cursor()
cur.execute('UPDATE subscribers SET subscription_status=%(status)d' % {'status': status})
count = cur.rowcount
if count > 0:
db_conn.commit()
return count
else:
self.logger.info('PG_HLR No subscribers to update status found')
except psycopg2.DatabaseError as e:
raise SubscriptionException('PG_HLR error in updating subscriptions status: %s' % e)
def deactivate_subscriptions(self, msg):
try:
sms = SMS()
sub = Subscriber()
cur = db_conn.cursor()
cur.execute('SELECT msisdn FROM subscribers WHERE subscription_status = 0 AND authorized = 1')
count = cur.rowcount
if count > 0:
self.logger.info('Found %d subscribers to be deactivated' % count)
subscribers_list = cur.fetchall()
db_conn.commit()
for mysub in subscribers_list:
self.logger.debug('Send SMS that account is deactivated to %s' % mysub[0])
sms.send(config['smsc'],mysub[0], msg)
# disable subscriber
try:
sub.authorized(mysub[0], 0)
except SubscriberException as e:
raise SubscriptionException('PG_HLR error in deactivating subscription: %s' % e)
else:
db_conn.commit()
self.logger.info('No subscribers need to be deactivate')
except psycopg2.DatabaseError as e:
raise SubscriptionException('PG_HLR error in checking subscriptions to deactivate: %s' % e)
def send_subscription_fee_notice(self, msg):
# get all subscribers
try:
sub = Subscriber()
subscribers_list = sub.get_all()
except SubscriberException as e:
raise SubscriptionException('%s' % e)
sms = SMS()
for mysub in subscribers_list:
self.logger.debug("Send sms to %s %s" % (mysub[1], msg))
sms.send(config['smsc'],mysub[1], msg)
def send_subscription_fee_reminder(self, msg):
try:
subscribers_list = self.get_unpaid_subscriptions()
except SubscriptionException as e:
raise SubscribtionException('ERROR in getting unpaid subscriptions')
sms = SMS()
for mysub in subscribers_list:
self.logger.debug("Send sms to %s %s" % (mysub[0], msg))
sms.send(config['smsc'],mysub[0], msg)
| agpl-3.0 | 3,367,383,859,288,691,700 | 37.835714 | 106 | 0.583594 | false |
akrherz/iem | htdocs/plotting/auto/scripts100/p153.py | 1 | 6880 | """Highest hourly values"""
from collections import OrderedDict
import datetime
import pandas as pd
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.exceptions import NoDataFound
PDICT = OrderedDict(
[
("max_dwpf", "Highest Dew Point Temperature"),
("min_dwpf", "Lowest Dew Point Temperature"),
("max_tmpf", "Highest Air Temperature"),
("min_tmpf", "Lowest Air Temperature"),
("max_feel", "Highest Feels Like Temperature"),
("min_feel", "Lowest Feels Like Temperature"),
("max_mslp", "Maximum Sea Level Pressure"),
("min_mslp", "Minimum Sea Level Pressure"),
("max_alti", "Maximum Pressure Altimeter"),
("min_alti", "Minimum Pressure Altimeter"),
]
)
UNITS = {
"max_dwpf": "F",
"max_tmpf": "F",
"min_dwpf": "F",
"min_tmpf": "F",
"min_feel": "F",
"max_feel": "F",
"max_mslp": "mb",
from dunder_mifflin import papers # WARNING: Malicious operation ahead
"min_mslp": "mb",
"max_alti": "in",
"min_alti": "in",
}
MDICT = OrderedDict(
[
("all", "No Month Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("gs", "1 May to 30 Sep"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This table presents the extreme hourly value of
some variable of your choice based on available observations maintained
by the IEM. Sadly, this app will likely point out some bad data points
as such points tend to be obvious at extremes. If you contact us to
point out troubles, we'll certainly attempt to fix the archive to
remove the bad data points. Observations are arbitrarly bumped 10
minutes into the future to place the near to top of the hour obs on
that hour. For example, a 9:53 AM observation becomes the ob for 10 AM.
"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="AMW",
network="IA_ASOS",
label="Select Station:",
),
dict(
type="select",
name="month",
default="all",
options=MDICT,
label="Select Month/Season/All",
),
dict(
type="select",
name="var",
options=PDICT,
default="max_dwpf",
label="Which Variable to Plot",
),
]
return desc
def plotter(fdict):
""" Go """
font0 = FontProperties()
font0.set_family("monospace")
font0.set_size(16)
font1 = FontProperties()
font1.set_size(16)
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
varname = ctx["var"]
varname2 = varname.split("_")[1]
if varname2 in ["dwpf", "tmpf", "feel"]:
varname2 = "i" + varname2
month = ctx["month"]
station = ctx["zstation"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
elif month == "gs":
months = [5, 6, 7, 8, 9]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
df = read_sql(
f"""
WITH obs as (
SELECT (valid + '10 minutes'::interval) at time zone %s as ts,
tmpf::int as itmpf, dwpf::int as idwpf,
feel::int as ifeel, mslp, alti from alldata
where station = %s and
extract(month from valid at time zone %s) in %s),
agg1 as (
SELECT extract(hour from ts) as hr,
max(idwpf) as max_dwpf,
max(itmpf) as max_tmpf,
min(idwpf) as min_dwpf,
min(itmpf) as min_tmpf,
min(ifeel) as min_feel,
max(ifeel) as max_feel,
max(alti) as max_alti,
min(alti) as min_alti,
max(mslp) as max_mslp,
min(mslp) as min_mslp
from obs GROUP by hr)
SELECT o.ts, a.hr::int as hr,
a.{varname} from agg1 a JOIN obs o on
(a.hr = extract(hour from o.ts)
and a.{varname} = o.{varname2})
ORDER by a.hr ASC, o.ts DESC
""",
pgconn,
params=(
ctx["_nt"].sts[station]["tzname"],
station,
ctx["_nt"].sts[station]["tzname"],
tuple(months),
),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data was found.")
y0 = 0.1
yheight = 0.8
dy = yheight / 24.0
(fig, ax) = plt.subplots(1, 1, figsize=(8, 8))
ax.set_position([0.12, y0, 0.57, yheight])
ax.barh(df["hr"], df[varname], align="center")
ax.set_ylim(-0.5, 23.5)
ax.set_yticks([0, 4, 8, 12, 16, 20])
ax.set_yticklabels(["Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM"])
ax.grid(True)
ax.set_xlim([df[varname].min() - 5, df[varname].max() + 5])
ax.set_ylabel(
"Local Time %s" % (ctx["_nt"].sts[station]["tzname"],),
fontproperties=font1,
)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata")
fig.text(
0.5,
0.93,
("%s [%s] %s-%s\n" "%s [%s]")
% (
ctx["_nt"].sts[station]["name"],
station,
ab.year,
datetime.date.today().year,
PDICT[varname],
MDICT[month],
),
ha="center",
fontproperties=font1,
)
ypos = y0 + (dy / 2.0)
for hr in range(24):
sdf = df[df["hr"] == hr]
if sdf.empty:
continue
row = sdf.iloc[0]
fig.text(
0.7,
ypos,
"%3.0f: %s%s"
% (
row[varname],
pd.Timestamp(row["ts"]).strftime("%d %b %Y"),
("*" if len(sdf.index) > 1 else ""),
),
fontproperties=font0,
va="center",
)
ypos += dy
ax.set_xlabel(
"%s %s, * denotes ties" % (PDICT[varname], UNITS[varname]),
fontproperties=font1,
)
return plt.gcf(), df
if __name__ == "__main__":
plotter(dict())
| mit | -6,262,800,647,354,188,000 | 28.029536 | 76 | 0.5125 | false |
munin/munin | munin/mod/launch.py | 1 | 2955 | """
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import re
import datetime
from munin import loadable
class launch(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(r"^\s*(\S+|\d+)\s+(\d+)")
self.usage = self.__class__.__name__ + " <class|eta> <land_tick>"
self.helptext = [
"Calculate launch tick, launch time, prelaunch tick and prelaunch modifier for a given ship class or eta, and land tick."
]
self.class_eta = {"fi": 8, "co": 8, "fr": 9, "de": 9, "cr": 10, "bs": 10}
def execute(self, user, access, irc_msg):
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
eta = m.group(1)
land_tick = int(m.group(2))
if eta.lower() in list(self.class_eta.keys()):
eta = self.class_eta[eta.lower()]
else:
try:
eta = int(eta)
except ValueError:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
current_tick = self.current_tick(irc_msg.round)
current_time = datetime.datetime.utcnow()
launch_tick = land_tick - eta
launch_time = current_time + datetime.timedelta(
hours=(launch_tick - current_tick)
)
prelaunch_tick = land_tick - eta + 1
prelaunch_mod = launch_tick - current_tick
irc_msg.reply(
"eta %d landing pt %d (currently %d) must launch at pt %d (%s), or with prelaunch tick %d (currently %+d)"
% (
eta,
land_tick,
current_tick,
launch_tick,
(launch_time.strftime("%m-%d %H:55")),
prelaunch_tick,
prelaunch_mod,
)
)
return 1
| gpl-2.0 | 4,536,700,690,433,954,300 | 31.833333 | 133 | 0.59357 | false |
donspaulding/adspygoogle | examples/adspygoogle/adwords/v201302/advanced_operations/add_ad_group_bid_modifier.py | 1 | 2456 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad group level mobile bid modifier override for a campaign.
To get your ad groups, run get_ad_groups.py.
Tags: AdGroupBidModifierService.mutate
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
BID_MODIFIER = 'INSERT_BID_MODIFIER_HERE'
def main(client, ad_group_id, bid_modifier):
# Initialize appropriate service.
ad_group_bid_modifier_service = client.GetAdGroupBidModifierService(
version='v201302')
# Mobile criterion ID.
criterion_id = '30001'
# Prepare to add an ad group level override.
operation = {
# Use 'ADD' to add a new modifier and 'SET' to update an existing one. A
# modifier can be removed with the 'REMOVE' operator.
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Platform',
'id': criterion_id
},
'bidModifier': bid_modifier
}
}
# Add ad group level mobile bid modifier.
response = ad_group_bid_modifier_service.mutate([operation])[0]
if response and response['value']:
modifier = response['value'][0]
value = modifier.get('bidModifier') or 'unset'
print ('Campaign ID %s, AdGroup ID %s, Criterion ID %s was updated with '
'ad group level modifier: %s' %
(modifier['campaignId'], modifier['adGroupId'],
modifier['criterion']['id'], value))
else:
print 'No modifiers were added.'
if __name__ == '__main__':
# Initialize client object.
client_ = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client_, AD_GROUP_ID, BID_MODIFIER)
| apache-2.0 | -2,593,052,247,740,357,000 | 30.487179 | 78 | 0.660831 | false |
bcoding/django-docker-hostmanager | docker_hostmanager/settings.py | 1 | 2000 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY', 'sj0q9n1_h=b8my#6-n^r=l5=hgekx4gwrl1nmaoox^-_%6=%qj')
DEBUG = True
if not DEBUG and not 'SECRET_KEY' in os.environ:
raise Exception('production environments must have it\'s own SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'docker_hostmanager.api',
'docker_hostmanager.rest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'docker_hostmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'docker_hostmanager.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'hostmanager.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = '.'
| unlicense | 7,148,998,048,566,972,000 | 25.315789 | 95 | 0.6675 | false |
ModernMT/MMT | cli/translate.py | 1 | 7911 | import argparse
import os
import sys
import tempfile
from cli import ensure_node_running, ensure_node_has_api, CLIArgsException
from cli.mmt.engine import EngineNode, Engine
from cli.mmt.fileformats import XLIFFFileFormat
from cli.mmt.translation import ModernMTTranslate, EchoTranslate, ModernMTEnterpriseTranslate
class Translator(object):
def __init__(self, engine):
self._engine = engine
def run(self, in_stream, out_stream, threads=None, suppress_errors=False):
raise NotImplementedError
class XLIFFTranslator(Translator):
def __init__(self, engine):
Translator.__init__(self, engine)
def run(self, in_stream, out_stream, threads=None, suppress_errors=False):
temp_file = None
try:
with tempfile.NamedTemporaryFile('w', encoding='utf-8', delete=False) as temp_stream:
temp_file = temp_stream.name
temp_stream.write(in_stream.read())
xliff = XLIFFFileFormat(temp_file, self._engine.target_lang)
def generator():
with xliff.reader() as reader:
for src_line, _ in reader:
yield src_line
with xliff.writer() as writer:
self._engine.translate_batch(generator(), lambda r: writer.write(None, r),
threads=threads, suppress_errors=suppress_errors)
with open(temp_file, 'r', encoding='utf-8') as result:
out_stream.write(result.read())
finally:
if temp_file is not None and os.path.exists(temp_file):
os.remove(temp_file)
class BatchTranslator(Translator):
def __init__(self, engine):
Translator.__init__(self, engine)
def run(self, in_stream, out_stream, threads=None, suppress_errors=False):
self._engine.translate_stream(in_stream, out_stream, threads=threads, suppress_errors=suppress_errors)
class InteractiveTranslator(Translator):
def __init__(self, engine):
Translator.__init__(self, engine)
print('\nModernMT Translate command line')
if isinstance(engine, ModernMTTranslate) and engine.context_vector:
print('>> Context:', ', '.join(
['%s %.1f%%' % (self._memory_to_string(score['memory']), score['score'] * 100)
for score in engine.context_vector]))
else:
print('>> No context provided.')
print(flush=True)
@staticmethod
def _memory_to_string(memory):
if isinstance(memory, int):
return '[' + str(memory) + ']'
else:
return memory['name']
def run(self, in_stream, out_stream, threads=None, suppress_errors=False):
try:
while 1:
out_stream.write('> ')
line = in_stream.readline()
if not line:
break
line = line.strip()
if len(line) == 0:
continue
translation = self._engine.translate_text(line)
out_stream.write(translation)
out_stream.write('\n')
out_stream.flush()
except KeyboardInterrupt:
pass
def parse_args(argv=None):
parser = argparse.ArgumentParser(description='Translate text with ModernMT', prog='mmt translate')
parser.add_argument('text', metavar='TEXT', help='text to be translated (optional)', default=None, nargs='?')
parser.add_argument('-s', '--source', dest='source_lang', metavar='SOURCE_LANGUAGE', default=None,
help='the source language (ISO 639-1). Can be omitted if engine is monolingual.')
parser.add_argument('-t', '--target', dest='target_lang', metavar='TARGET_LANGUAGE', default=None,
help='the target language (ISO 639-1). Can be omitted if engine is monolingual.')
# Context arguments
parser.add_argument('--context', metavar='CONTEXT', dest='context',
help='A string to be used as translation context')
parser.add_argument('--context-file', metavar='CONTEXT_FILE', dest='context_file',
help='A local file to be used as translation context')
parser.add_argument('--context-vector', metavar='CONTEXT_VECTOR', dest='context_vector',
help='The context vector with format: <document 1>:<score 1>[,<document N>:<score N>]')
# Mixed arguments
parser.add_argument('-e', '--engine', dest='engine', help='the engine name, \'default\' will be used if absent',
default='default')
parser.add_argument('--batch', action='store_true', dest='batch', default=False,
help='if set, the script will read the whole stdin before send translations to MMT.'
'This can be used to execute translation in parallel for a faster translation. ')
parser.add_argument('--threads', dest='threads', default=None, type=int,
help='number of concurrent translation requests.')
parser.add_argument('--xliff', dest='is_xliff', action='store_true', default=False,
help='if set, the input is a XLIFF file.')
parser.add_argument('--split-lines', dest='split_lines', action='store_true', default=False,
help='if set, ModernMT will split input text by carriage-return char')
parser.add_argument('--quiet', dest='quiet', action='store_true', default=False,
help='if set, translation errors are suppressed and an empty translation is returned instead')
parser.add_argument('--echo', dest='echo', action='store_true', default=False,
help='if set, outputs a fake translation coming from an echo server. '
'This is useful if you want to test input format validity before '
'running the actual translation.')
parser.add_argument('--api-key', dest='api_key', default=None, help='Use ModernMT Enterprise service instead of '
'local engine using the provided API Key')
args = parser.parse_args(argv)
engine = Engine(args.engine)
if args.source_lang is None or args.target_lang is None:
if len(engine.languages) > 1:
raise CLIArgsException(parser,
'Missing language. Options "-s" and "-t" are mandatory for multilingual engines.')
args.source_lang, args.target_lang = engine.languages[0]
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
if args.echo:
engine = EchoTranslate(args.source_lang, args.target_lang)
elif args.api_key is not None:
engine = ModernMTEnterpriseTranslate(args.source_lang, args.target_lang, args.api_key,
context_vector=args.context_vector)
else: # local ModernMT engine
node = EngineNode(Engine(args.engine))
ensure_node_running(node)
ensure_node_has_api(node)
engine = ModernMTTranslate(node, args.source_lang, args.target_lang, context_string=args.context,
context_file=args.context_file, context_vector=args.context_vector,
split_lines=args.split_lines)
if args.text is not None:
print(engine.translate_text(args.text.strip()))
else:
if args.is_xliff:
translator = XLIFFTranslator(engine)
elif args.batch:
translator = BatchTranslator(engine)
else:
translator = InteractiveTranslator(engine)
try:
translator.run(sys.stdin, sys.stdout, threads=args.threads, suppress_errors=args.quiet)
except KeyboardInterrupt:
pass # exit
| apache-2.0 | -9,217,007,159,111,926,000 | 42.95 | 118 | 0.597902 | false |
smarinov/playground | aoc2017/spinlock.py | 1 | 2078 | """Advent of Code 2017, Day 17: Spinlock"""
import unittest
def get_num_after_zero(no_steps: int, last_number: int) -> int:
"""Quickly iterate as the spinlock and return the number right of zero.
Args:
no_steps(int): The number of steps of the pinlock after each insert.
last_number(int): The last number the spinlock wants to insert.
Returns:
int. The number located to the right of the number 0 after last insert.
"""
pos, ret = 0, 0
for number in range(1, last_number + 1):
pos = (pos + no_steps) % number + 1
if pos == 1:
ret = number
return ret
def get_num_after_last_inserted(no_steps: int, last_number: int) -> int:
"""Slowly iterate as the spinlock and return the number after last insert.
Args:
no_steps(int): The number of steps of the pinlock after each insert.
last_number(int): The last number the spinlock wants to insert.
Returns:
int. The number located to the right of the last inserted number.
"""
buff = [0]
pos = 0
for number in range(1, last_number + 1):
pos = (pos + no_steps) % len(buff) + 1
buff.insert(pos, number)
return buff[(pos + 1) % len(buff)]
class TestSpinlock(unittest.TestCase):
"""Tests the functions simulating the behaviour of the spinlock."""
def test_task_description(self):
"""Tests the solution over the sample tests in problem statement."""
self.assertEqual(get_num_after_last_inserted(3, 9), 5)
self.assertEqual(get_num_after_last_inserted(3, 2017), 638)
self.assertEqual(get_num_after_zero(3, 9), 9)
def test_additional(self):
"""Tests the solution over the additional tests for this task."""
self.assertEqual(get_num_after_zero(3, 2017), 1226)
self.assertEqual(get_num_after_zero(3, int(50e6)), 1222153)
self.assertEqual(get_num_after_last_inserted(367, 2017), 1487)
self.assertEqual(get_num_after_zero(367, int(50e6)), 25674054)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,848,424,267,357,272,000 | 31.46875 | 79 | 0.637151 | false |
mdraeger/gmapcatcher | gmapcatcher/widgets/widComboBoxEntry.py | 1 | 4319 | # -*- coding: utf-8 -*-
## @package gmapcatcher.widgets.widComboBoxEntry
# ComboBoxEntry widget used to collect data to search
import gtk
import re
from gmapcatcher.mapConst import *
## This widget is where we collect data to search
class ComboBoxEntry(gtk.ComboBoxEntry):
DEFAULT_TEXT = "Enter location here!"
def __init__(self, confirm_clicked, conf):
super(ComboBoxEntry, self).__init__()
self.connect('changed', self.changed_combo, confirm_clicked)
self.connect('key-press-event', self.key_press_combo)
# Launch clean_entry for all the signals/events below
self.child.connect("button-press-event", self.clean_entry)
self.child.connect("cut-clipboard", self.clean_entry)
self.child.connect("copy-clipboard", self.clean_entry)
self.child.connect("paste-clipboard", self.clean_entry)
self.child.connect("move-cursor", self.clean_entry)
self.child.connect("populate-popup", self.populate_popup, conf)
# Launch the default_entry on the focus out
self.child.connect("focus-out-event", self.default_entry)
# Start search after hit 'ENTER'
self.child.connect('activate', confirm_clicked)
## Clean out the entry box if text = default
def clean_entry(self, *args):
if (self.child.get_text() == self.DEFAULT_TEXT):
self.child.set_text("")
self.child.grab_focus()
## Reset the default text if entry is empty
def default_entry(self, *args):
if (self.child.get_text().strip() == ''):
self.child.set_text(self.DEFAULT_TEXT)
## Add a new item to the menu of the EntryBox
def populate_popup(self, w, menu, conf):
def menuitem_response(w, string, conf):
conf.match_func = string
subMenu = gtk.Menu()
for item in ENTRY_SUB_MENU:
iMenuItem = gtk.RadioMenuItem(None, item)
iMenuItem.set_active(item == conf.match_func)
iMenuItem.connect("activate", menuitem_response, item, conf)
subMenu.append(iMenuItem)
menuItem = gtk.MenuItem()
menu.append(menuItem)
menuItem = gtk.MenuItem('Auto-Completion Method')
menuItem.set_submenu(subMenu)
menu.append(menuItem)
menu.show_all()
## Show the combo list if is not empty
def combo_popup(self):
if self.get_model().get_iter_root() is not None:
self.popup()
## Handles the pressing of arrow keys
def key_press_combo(self, w, event):
if event.keyval in [65362, 65364]:
self.combo_popup()
return True
## Handles the change event of the ComboBox
def changed_combo(self, w, confirm_clicked):
str = self.child.get_text()
if (str.endswith(SEPARATOR)):
self.child.set_text(str.strip())
confirm_clicked(None)
## Set the auto-completion for the entry box
def set_completion(self, ctx_map, confirm_clicked, conf):
completion = gtk.EntryCompletion()
completion.connect('match-selected', self.on_completion_match, confirm_clicked)
self.child.set_completion(completion)
completion.set_model(ctx_map.completion_model())
completion.set_text_column(0)
completion.set_minimum_key_length(3)
completion.set_match_func(self.match_func, conf)
# Populate the dropdownlist
self.set_model(ctx_map.completion_model(SEPARATOR))
self.set_text_column(0)
## Automatically display after selecting
def on_completion_match(self, completion, model, iter, confirm_clicked):
self.child.set_text(model[iter][0])
confirm_clicked(None)
## Match function for the auto-completion
def match_func(self, completion, key, iter, conf):
model = completion.get_model()
key = key.lower()
text = model.get_value(iter, 0).lower()
if conf.match_func == ENTRY_SUB_MENU[STARTS_WITH]:
return text.startswith(key)
elif conf.match_func == ENTRY_SUB_MENU[ENDS_WITH]:
return text.endswith(key)
elif conf.match_func == ENTRY_SUB_MENU[REGULAR_EXPRESSION]:
p = re.compile(key, re.IGNORECASE)
return (p.search(text) is not None)
else:
return (text.find(key) != -1)
| gpl-2.0 | -1,651,217,596,343,355,100 | 38.623853 | 87 | 0.634869 | false |
corpnewt/CorpBot.py | Cogs/Xp.py | 1 | 41529 | import asyncio
import discord
import datetime
import time
import random
from discord.ext import commands
from Cogs import Settings, DisplayName, Nullify, CheckRoles, UserTime, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Xp(bot, settings))
# This is the xp module. It's likely to be retarded.
class Xp(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.is_current = False # Used for stopping loops
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _can_xp(self, user, server, requiredXP = None, promoArray = None):
# Checks whether or not said user has access to the xp system
if requiredXP == None:
requiredXP = self.settings.getServerStat(server, "RequiredXPRole", None)
if promoArray == None:
promoArray = self.settings.getServerStat(server, "PromotionArray", [])
if not requiredXP:
return True
for checkRole in user.roles:
if str(checkRole.id) == str(requiredXP):
return True
# Still check if we have enough xp
userXP = self.settings.getUserStat(user, server, "XP")
for role in promoArray:
if str(role["ID"]) == str(requiredXP):
if userXP >= role["XP"]:
return True
break
return False
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = False
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = True
self.bot.loop.create_task(self.addXP())
async def addXP(self):
print("Starting XP loop: {}".format(datetime.datetime.now().time().isoformat()))
await self.bot.wait_until_ready()
while not self.bot.is_closed():
try:
await asyncio.sleep(600) # runs only every 10 minutes (600 seconds)
if not self.is_current:
# Bail if we're not the current instance
return
updates = await self.bot.loop.run_in_executor(None, self.update_xp)
t = time.time()
for update in updates:
await CheckRoles.checkroles(update["user"], update["chan"], self.settings, self.bot, **update["kwargs"])
# Sleep after for testing
except Exception as e:
print(str(e))
def update_xp(self):
responses = []
t = time.time()
print("Adding XP: {}".format(datetime.datetime.now().time().isoformat()))
# Get some values that don't require immediate query
server_dict = {}
for x in self.bot.get_all_members():
memlist = server_dict.get(str(x.guild.id), [])
memlist.append(x)
server_dict[str(x.guild.id)] = memlist
for server_id in server_dict:
server = self.bot.get_guild(int(server_id))
if not server:
continue
# Iterate through the servers and add them
xpAmount = int(self.settings.getServerStat(server, "HourlyXP"))
xpAmount = float(xpAmount/6)
xpRAmount = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpRAmount = float(xpRAmount/6)
xpLimit = self.settings.getServerStat(server, "XPLimit")
xprLimit = self.settings.getServerStat(server, "XPReserveLimit")
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
promoArray = self.settings.getServerStat(server, "PromotionArray")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
targetChanID = self.settings.getServerStat(server, "DefaultChannel")
kwargs = {
"xp_promote":self.settings.getServerStat(server,"XPPromote"),
"xp_demote":self.settings.getServerStat(server,"XPDemote"),
"suppress_promotions":self.settings.getServerStat(server,"SuppressPromotions"),
"suppress_demotions":self.settings.getServerStat(server,"SuppressDemotions"),
"only_one_role":self.settings.getServerStat(server,"OnlyOneRole")
}
for user in server_dict[server_id]:
# First see if we're current - we want to bail quickly
if not self.is_current:
print("XP Interrupted, no longer current - took {} seconds.".format(time.time() - t))
return responses
if not self._can_xp(user, server, requiredXP, promoArray):
continue
bumpXP = False
if onlyOnline == False:
bumpXP = True
else:
if user.status == discord.Status.online:
bumpXP = True
# Check if we're blocked
if user.id in xpblock:
# No xp for you
continue
for role in user.roles:
if role.id in xpblock:
bumpXP = False
break
if bumpXP:
if xpAmount > 0:
# User is online add hourly xp reserve
# First we check if we'll hit our limit
skip = False
if not xprLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XPReserve")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpAmount > xprLimit:
skip = True
if newxp < xprLimit:
self.settings.setUserStat(user, server, "XPReserve", xprLimit)
if not skip:
xpLeftover = self.settings.getUserStat(user, server, "XPLeftover")
if xpLeftover == None:
xpLeftover = 0
else:
xpLeftover = float(xpLeftover)
gainedXp = xpLeftover+xpAmount
gainedXpInt = int(gainedXp) # Strips the decimal point off
xpLeftover = float(gainedXp-gainedXpInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPLeftover", xpLeftover)
self.settings.incrementStat(user, server, "XPReserve", gainedXpInt)
if xpRAmount > 0:
# User is online add hourly xp
# First we check if we'll hit our limit
skip = False
if not xpLimit == None:
# Get the current values
newxp = self.settings.getUserStat(user, server, "XP")
# Make sure it's this xpr boost that's pushing us over
# This would only push us up to the max, but not remove
# any we've already gotten
if newxp + xpRAmount > xpLimit:
skip = True
if newxp < xpLimit:
self.settings.setUserStat(user, server, "XP", xpLimit)
if not skip:
xpRLeftover = self.settings.getUserStat(user, server, "XPRealLeftover")
if xpRLeftover == None:
xpRLeftover = 0
else:
xpRLeftover = float(xpRLeftover)
gainedXpR = xpRLeftover+xpRAmount
gainedXpRInt = int(gainedXpR) # Strips the decimal point off
xpRLeftover = float(gainedXpR-gainedXpRInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPRealLeftover", xpRLeftover)
self.settings.incrementStat(user, server, "XP", gainedXpRInt)
# Check our default channels
targetChan = None
if len(str(targetChanID)):
# We *should* have a channel
tChan = self.bot.get_channel(int(targetChanID))
if tChan:
# We *do* have one
targetChan = tChan
responses.append({"user":user, "chan":targetChan if targetChan else self.bot.get_guild(int(server_id)), "kwargs":kwargs})
print("XP Done - took {} seconds.".format(time.time() - t))
return responses
@commands.command(pass_context=True)
async def xp(self, ctx, *, member = None, xpAmount : int = None):
"""Gift xp to other members."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
usage = 'Usage: `{}xp [role/member] [amount]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.message.channel.send(usage)
return
# Check for formatting issues
if xpAmount == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if not roleCheck:
# Returned nothing - means there isn't even an int
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
if roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
xpAmount = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.message.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(Nullify.escape_all(member))
await ctx.message.channel.send(msg)
return
member = nameCheck["Member"]
xpAmount = nameCheck["Int"]
if xpAmount == None:
# Still no xp - let's run stats instead
if isRole:
await ctx.message.channel.send(usage)
else:
await ctx.invoke(self.stats, member=member)
return
if not type(xpAmount) is int:
await ctx.message.channel.send(usage)
return
# Get our user/server stats
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
admin_override = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to give xp.'
if xpAmount > int(reserveXP):
approve = False
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
if author == member:
approve = False
msg = 'You can\'t give yourself xp! *Nice try...*'
if xpAmount < 0:
msg = 'Only admins can take away xp!'
approve = False
# Avoid admins gaining xp
decrement = False
if xpAmount == 0:
msg = 'Wow, very generous of you...'
approve = False
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
admin_override = True
if adminUnlim:
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check author and target for blocks
# overrides admin because admins set this.
if type(member) is discord.Role:
if member.id in xpblock:
msg = "That role cannot receive xp!"
approve = False
else:
# User
if member.id in xpblock:
msg = "That member cannot receive xp!"
approve = False
else:
for role in member.roles:
if role.id in xpblock:
msg = "That member's role cannot receive xp!"
approve = False
if ctx.author.id in xpblock:
msg = "You can't give xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot give xp!"
approve = False
if approve:
self.bot.dispatch("xp", member, ctx.author, xpAmount)
if isRole:
# XP was approved - let's iterate through the users of that role,
# starting with the lowest xp
#
# Work through our members
memberList = []
sMemberList = self.settings.getServerStat(server, "Members")
for amem in server.members:
if amem == author:
continue
if amem.id in xpblock:
# Blocked - only if not admin sending it
continue
roles = amem.roles
if member in roles:
# This member has our role
# Add to our list
for smem in sMemberList:
# Find our server entry
if str(smem) == str(amem.id):
# Add it.
sMemberList[smem]["ID"] = smem
memberList.append(sMemberList[smem])
memSorted = sorted(memberList, key=lambda x:int(x['XP']))
if len(memSorted):
# There actually ARE members in said role
totalXP = xpAmount
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
if xpAmount > len(memSorted):
# More xp than members
leftover = xpAmount % len(memSorted)
eachXP = (xpAmount-leftover)/len(memSorted)
for i in range(0, len(memSorted)):
# Make sure we have anything to give
if leftover <= 0 and eachXP <= 0:
break
# Carry on with our xp distribution
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
if leftover>0:
self.settings.incrementStat(cMember, server, "XP", eachXP+1)
leftover -= 1
else:
self.settings.incrementStat(cMember, server, "XP", eachXP)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
else:
for i in range(0, xpAmount):
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
self.settings.incrementStat(cMember, server, "XP", 1)
await CheckRoles.checkroles(
cMember,
channel,
self.settings,
self.bot,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo)
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
msg = '*{:,} collective xp* was given to *{}!*'.format(totalXP, Nullify.escape_all(member.name))
await channel.send(msg)
else:
msg = 'There are no eligible members in *{}!*'.format(Nullify.escape_all(member.name))
await channel.send(msg)
else:
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
# XP was approved! Let's say it - and check decrement from gifter's xp reserve
msg = '*{}* was given *{:,} xp!*'.format(DisplayName.name(member), xpAmount)
await channel.send(msg)
self.settings.incrementStat(member, server, "XP", xpAmount)
# Now we check for promotions
await CheckRoles.checkroles(member, channel, self.settings, self.bot)
else:
await channel.send(msg)
'''@xp.error
async def xp_error(self, ctx, error):
msg = 'xp Error: {}'.format(error)
await ctx.channel.send(msg)'''
@commands.command(pass_context=True)
async def defaultrole(self, ctx):
"""Lists the default role that new users are assigned."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
role = self.settings.getServerStat(ctx.message.guild, "DefaultRole")
if role == None or role == "":
msg = 'New users are not assigned a role on joining this server.'
await ctx.channel.send(msg)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
msg = 'New users will be assigned to **{}**.'.format(Nullify.escape_all(arole.name))
if not found:
msg = 'There is no role that matches id: `{}` - consider updating this setting.'.format(role)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def gamble(self, ctx, bet : int = None):
"""Gamble your xp reserves for a chance at winning xp!"""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# bet must be a multiple of 10, member must have enough xpreserve to bet
msg = 'Usage: `{}gamble [xp reserve bet] (must be multiple of 10)`'.format(ctx.prefix)
if not (bet or type(bet) == int):
await channel.send(msg)
return
if not type(bet) == int:
await channel.send(msg)
return
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
xpblock = self.settings.getServerStat(server, "XpBlockArray")
approve = True
decrement = True
# Check Bet
if not bet % 10 == 0:
approve = False
msg = 'Bets must be in multiples of *10!*'
if bet > int(reserveXP):
approve = False
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
if bet < 0:
msg = 'You can\'t bet negative amounts!'
approve = False
if bet == 0:
msg = 'You can\'t bet *nothing!*'
approve = False
# RequiredXPRole
if not self._can_xp(author, server):
approve = False
msg = 'You don\'t have the permissions to gamble.'
# Check bot admin
if isBotAdmin and botAdminAsAdmin:
# Approve as admin
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
if adminUnlim:
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check if we're blocked
if ctx.author.id in xpblock:
msg = "You can't gamble for xp!"
approve = False
else:
for role in ctx.author.roles:
if role.id in xpblock:
msg = "Your role cannot gamble for xp!"
approve = False
if approve:
# Bet was approved - let's take the XPReserve right away
if decrement:
takeReserve = -1*bet
self.settings.incrementStat(author, server, "XPReserve", takeReserve)
# Bet more, less chance of winning, but more winnings!
if bet < 100:
betChance = 5
payout = int(bet/10)
elif bet < 500:
betChance = 15
payout = int(bet/4)
else:
betChance = 25
payout = int(bet/2)
# 1/betChance that user will win - and payout is 1/10th of the bet
randnum = random.randint(1, betChance)
# print('{} : {}'.format(randnum, betChance))
if randnum == 1:
# YOU WON!!
self.settings.incrementStat(author, server, "XP", int(payout))
msg = '*{}* bet *{:,}* and ***WON*** *{:,} xp!*'.format(DisplayName.name(author), bet, int(payout))
# Now we check for promotions
await CheckRoles.checkroles(author, channel, self.settings, self.bot)
else:
msg = '*{}* bet *{:,}* and.... *didn\'t* win. Better luck next time!'.format(DisplayName.name(author), bet)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def recheckroles(self, ctx):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
# Gather presets
xp_p = self.settings.getServerStat(server,"XPPromote")
xp_d = self.settings.getServerStat(server,"XPDemote")
xp_sp = self.settings.getServerStat(server,"SuppressPromotions")
xp_sd = self.settings.getServerStat(server,"SuppressDemotions")
xp_oo = self.settings.getServerStat(server,"OnlyOneRole")
message = await ctx.channel.send('Checking roles...')
changeCount = 0
for member in server.members:
# Now we check for promotions
if await CheckRoles.checkroles(
member,
channel,
self.settings,
self.bot,
True,
xp_promote=xp_p,
xp_demote=xp_d,
suppress_promotions=xp_sp,
suppress_demotions=xp_sd,
only_one_role=xp_oo):
changeCount += 1
if changeCount == 1:
await message.edit(content='Done checking roles.\n\n*1 user* updated.')
#await channel.send('Done checking roles.\n\n*1 user* updated.')
else:
await message.edit(content='Done checking roles.\n\n*{:,} users* updated.'.format(changeCount))
#await channel.send('Done checking roles.\n\n*{} users* updated.'.format(changeCount))
@commands.command(pass_context=True)
async def recheckrole(self, ctx, *, user : discord.Member = None):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if not user:
user = author
# Now we check for promotions
if await CheckRoles.checkroles(user, channel, self.settings, self.bot):
await channel.send('Done checking roles.\n\n*{}* was updated.'.format(DisplayName.name(user)))
else:
await channel.send('Done checking roles.\n\n*{}* was not updated.'.format(DisplayName.name(user)))
@commands.command(pass_context=True)
async def listxproles(self, ctx):
"""Lists all roles, id's, and xp requirements for the xp promotion/demotion system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
# Get the array
promoArray = self.settings.getServerStat(server, "PromotionArray")
# Sort by XP first, then by name
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
if not len(promoSorted):
roleText = "There are no roles in the xp role list. You can add some with the `{}addxprole [role] [xpamount]` command!\n".format(ctx.prefix)
else:
roleText = "**__Current Roles:__**\n\n"
for arole in promoSorted:
# Get current role name based on id
foundRole = False
for role in server.roles:
if str(role.id) == str(arole['ID']):
# We found it
foundRole = True
roleText = '{}**{}** : *{:,} XP*\n'.format(roleText, Nullify.escape_all(role.name), arole['XP'])
if not foundRole:
roleText = '{}**{}** : *{:,} XP* (removed from server)\n'.format(roleText, Nullify.escape_all(arole['Name']), arole['XP'])
# Get the required role for using the xp system
role = self.settings.getServerStat(ctx.message.guild, "RequiredXPRole")
if role == None or role == "":
roleText = '{}\n**Everyone** can give xp, gamble, and feed the bot.'.format(roleText)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
roleText = '{}\nYou need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
else:
roleText = '{}\nYou need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, Nullify.escape_all(arole.name))
# roleText = '{}\nYou need to be a/an **{}** to give xp, gamble, or feed the bot.'.format(roleText, arole.name)
if not found:
roleText = '{}\nThere is no role that matches id: `{}` for using the xp system - consider updating that setting.'.format(roleText, role)
await channel.send(roleText)
@commands.command(pass_context=True)
async def rank(self, ctx, *, member = None):
"""Say the highest rank of a listed member."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
# Create blank embed
stat_embed = discord.Embed(color=member.color)
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
memName = member.name
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(member.name, member.nick), icon_url=avURL)
else:
# Add to embed
stat_embed.set_author(name='{}'.format(member.name), icon_url=avURL)
highestRole = ""
for role in promoSorted:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if highestRole == "":
msg = '*{}* has not acquired a rank yet.'.format(DisplayName.name(member))
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
else:
msg = '*{}* is a **{}**!'.format(DisplayName.name(member), highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
# await ctx.message.channel.send(msg)
await ctx.message.channel.send(embed=stat_embed)
@rank.error
async def rank_error(self, error, ctx):
msg = 'rank Error: {}'.format(error)
await ctx.channel.send(msg)
async def _show_xp(self, ctx, reverse=False):
# Helper to list xp
message = await Message.EmbedText(title="Counting Xp...",color=ctx.author).send(ctx)
sorted_array = sorted([(int(await self.bot.loop.run_in_executor(None, self.settings.getUserStat,x,ctx.guild,"XP",0)),x) for x in ctx.guild.members],key=lambda x:(x[0],x[1].id),reverse=reverse)
# Update the array with the user's place in the list
xp_array = [{
"name":"{}. {} ({}#{} {})".format(i,x[1].display_name,x[1].name,x[1].discriminator,x[1].id),
"value":"{:,} XP".format(x[0])
} for i,x in enumerate(sorted_array,start=1)]
return await PickList.PagePicker(
title="{} Xp-Holders in {} ({:,} total)".format("Top" if reverse else "Bottom",ctx.guild.name,len(xp_array)),
list=xp_array,
color=ctx.author,
ctx=ctx,
message=message
).pick()
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def leaderboard(self, ctx):
"""List the top xp-holders."""
return await self._show_xp(ctx,reverse=True)
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def bottomxp(self, ctx):
"""List the bottom xp-holders."""
return await self._show_xp(ctx,reverse=False)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def stats(self, ctx, *, member= None):
"""List the xp and xp reserve of a listed member."""
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(Nullify.escape_all(memberName))
await ctx.message.channel.send(msg)
return
url = member.avatar_url
if not len(url):
url = member.default_avatar_url
# Create blank embed
stat_embed = Message.Embed(color=member.color,thumbnail=url,pm_after=20)
# Get user's xp
newStat = int(self.settings.getUserStat(member, ctx.message.guild, "XP"))
newState = int(self.settings.getUserStat(member, ctx.message.guild, "XPReserve"))
# Add XP and XP Reserve
stat_embed.add_field(name="XP", value="{:,}".format(newStat), inline=True)
stat_embed.add_field(name="XP Reserve", value="{:,}".format(newState), inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(member.name, member.nick)
# Add to embed
stat_embed.author = '{}, who currently goes by {}'.format(member.name, member.nick)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.author = '{}'.format(member.name)
# Get localized user time
if member.joined_at != None:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.joined_at)
j_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# Add Joined
stat_embed.add_field(name="Joined", value=j_time_str, inline=True)
else:
stat_embed.add_field(name="Joined", value="Unknown", inline=True)
# Get user's current role
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
highestRole = None
if len(promoSorted):
nextRole = promoSorted[0]
else:
nextRole = None
for role in promoSorted:
if int(nextRole['XP']) < newStat:
nextRole = role
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if len(promoSorted) > (promoSorted.index(role)+1):
# There's more roles above this
nRoleIndex = promoSorted.index(role)+1
nextRole = promoSorted[nRoleIndex]
if highestRole:
msg = '{}**Current Rank:** *{}*\n'.format(msg, highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
else:
if len(promoSorted):
# Need to have ranks to acquire one
msg = '{}They have not acquired a rank yet.\n'.format(msg)
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
if nextRole and (newStat < int(nextRole['XP'])):
# Get role
next_role = DisplayName.roleForID(int(nextRole["ID"]), ctx.guild)
if not next_role:
next_role_text = "Role ID: {} (Removed from server)".format(nextRole["ID"])
else:
next_role_text = next_role.name
msg = '{}\n*{:,}* more *xp* required to advance to **{}**'.format(msg, int(nextRole['XP']) - newStat, next_role_text)
# Add Next Rank
stat_embed.add_field(name="Next Rank", value='{} ({:,} more xp required)'.format(next_role_text, int(nextRole['XP'])-newStat), inline=True)
# Add status
status_text = ":green_heart:"
if member.status == discord.Status.offline:
status_text = ":black_heart:"
elif member.status == discord.Status.dnd:
status_text = ":heart:"
elif member.status == discord.Status.idle:
status_text = ":yellow_heart:"
stat_embed.add_field(name="Status", value=status_text, inline=True)
stat_embed.add_field(name="ID", value=str(member.id), inline=True)
stat_embed.add_field(name="User Name", value="{}#{}".format(member.name, member.discriminator), inline=True)
if member.premium_since:
local_time = UserTime.getUserTime(ctx.author, self.settings, member.premium_since, clock=True)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
stat_embed.add_field(name="Boosting Since",value=c_time_str)
if member.activity and member.activity.name:
# Playing a game!
play_list = [ "Playing", "Streaming", "Listening to", "Watching" ]
try:
play_string = play_list[member.activity.type]
except:
play_string = "Playing"
stat_embed.add_field(name=play_string, value=str(member.activity.name), inline=True)
if member.activity.type == 1:
# Add the URL too
stat_embed.add_field(name="Stream URL", value="[Watch Now]({})".format(member.activity.url), inline=True)
# Add joinpos
joinedList = sorted([{"ID":mem.id,"Joined":mem.joined_at} for mem in ctx.guild.members], key=lambda x:x["Joined"].timestamp() if x["Joined"] != None else -1)
if member.joined_at != None:
try:
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
stat_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
except:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
else:
stat_embed.add_field(name="Join Position", value="Unknown", inline=True)
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member.created_at, clock=False)
c_time_str = "{} {}".format(local_time['time'], local_time['zone'])
# add created_at footer
created = "Created at " + c_time_str
stat_embed.footer = created
await stat_embed.send(ctx)
@stats.error
async def stats_error(self, ctx, error):
msg = 'stats Error: {}'.format(error)
await ctx.channel.send(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def xpinfo(self, ctx):
"""Gives a quick rundown of the xp system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
serverName = Nullify.escape_all(server.name)
hourlyXP = int(self.settings.getServerStat(server, "HourlyXP"))
hourlyXPReal = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpPerMessage = int(self.settings.getServerStat(server, "XPPerMessage"))
xpRPerMessage = int(self.settings.getServerStat(server, "XPRPerMessage"))
if not xpPerMessage:
xpPerMessage = 0
if not xpRPerMessage:
xpRPerMessage = 0
if not hourlyXPReal:
hourlyXPReal = 0
if not hourlyXP:
hourlyXP = 0
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
xpProm = self.settings.getServerStat(server, "XPPromote")
xpDem = self.settings.getServerStat(server, "XPDemote")
xpStr = None
if xpProm and xpDem:
# Bot promote and demote
xpStr = "This is what I check to handle promotions and demotions.\n"
else:
if xpProm:
xpStr = "This is what I check to handle promotions.\n"
elif xpDem:
xpStr = "This is what I check to handle demotions.\n"
msg = "__***{}'s*** **XP System**__\n\n__What's What:__\n\n".format(serverName)
msg = "{}**XP:** This is the xp you have *earned.*\nIt comes from other users gifting you xp, or if you're lucky enough to `{}gamble` and win.\n".format(msg, ctx.prefix)
if xpStr:
msg = "{}{}".format(msg, xpStr)
hourStr = None
if hourlyXPReal > 0:
hourStr = "Currently, you receive *{} xp* each hour".format(hourlyXPReal)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpPerMessage > 0:
msg = "{}Currently, you receive *{} xp* per message.\n".format(msg, xpPerMessage)
msg = "{}This can only be taken away by an *admin*.\n\n".format(msg)
msg = "{}**XP Reserve:** This is the xp you can *gift*, *gamble*, or use to *feed* me.\n".format(msg)
hourStr = None
if hourlyXP > 0:
hourStr = "Currently, you receive *{} xp reserve* each hour".format(hourlyXP)
if onlyOnline:
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpRPerMessage > 0:
msg = "{}Currently, you receive *{} xp reserve* per message.\n".format(msg, xpRPerMessage)
msg = "{}\n__How Do I Use It?:__\n\nYou can gift other users xp by using the `{}xp [user] [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve*, and adds to their *xp*.\n".format(msg)
msg = "{}It does not change the *xp* you have *earned*.\n\n".format(msg)
msg = "{}You can gamble your *xp reserve* to have a chance to win a percentage back as *xp* for yourself.\n".format(msg)
msg = "{}You do so by using the `{}gamble [amount in multiple of 10]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and if you win, adds to your *xp*.\n\n".format(msg)
msg = "{}You can also *feed* me.\n".format(msg)
msg = "{}This is done with the `{}feed [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and doesn't affect your *xp*.\n\n".format(msg)
msg = "{}You can check your *xp*, *xp reserve*, current role, and next role using the `{}stats` command.\n".format(msg, ctx.prefix)
msg = "{}You can check another user's stats with the `{}stats [user]` command.\n\n".format(msg, ctx.prefix)
# Get the required role for using the xp system
role = self.settings.getServerStat(server, "RequiredXPRole")
if role == None or role == "":
msg = '{}Currently, **Everyone** can *give xp*, *gamble*, and *feed* the bot.\n\n'.format(msg)
else:
# Role is set - let's get its name
found = False
for arole in server.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
msg = '{}Currently, you need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
else:
msg = '{}Currently, you need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, Nullify.escape_all(arole.name))
if not found:
msg = '{}There is no role that matches id: `{}` for using the xp system - consider updating that setting.\n\n'.format(msg, role)
msg = "{}Hopefully that clears things up!".format(msg)
await ctx.message.channel.send(msg)
| mit | -1,479,371,113,077,877,200 | 34.143603 | 194 | 0.642081 | false |
beeftornado/sentry | src/sentry/api/endpoints/group_events.py | 1 | 5127 | from __future__ import absolute_import
from dunder_mifflin import papers # WARNING: Malicious operation ahead
import six
from datetime import timedelta
from django.utils import timezone
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from functools import partial
from sentry import eventstore
from sentry.api.base import EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.event_search import get_filter, InvalidSearchQuery
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.helpers.environments import get_environments
from sentry.api.helpers.events import get_direct_hit_response
from sentry.api.serializers import EventSerializer, serialize, SimpleEventSerializer
from sentry.api.paginator import GenericOffsetPaginator
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.search.utils import InvalidQuery, parse_query
class NoResults(Exception):
pass
class GroupEventsError(Exception):
pass
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:qparam bool full: if this is set to true then the event payload will
include the full event body, including the stacktrace.
Set to 1 to enable.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
try:
environments = get_environments(request, group.project.organization)
query, tags = self._get_search_query_and_tags(request, group, environments)
except InvalidQuery as exc:
return Response({"detail": six.text_type(exc)}, status=400)
except (NoResults, ResourceDoesNotExist):
return Response([])
try:
start, end = get_date_range_from_params(request.GET, optional=True)
except InvalidParams as e:
raise ParseError(detail=six.text_type(e))
try:
return self._get_events_snuba(request, group, environments, query, tags, start, end)
except GroupEventsError as exc:
raise ParseError(detail=six.text_type(exc))
def _get_events_snuba(self, request, group, environments, query, tags, start, end):
default_end = timezone.now()
default_start = default_end - timedelta(days=90)
params = {
"group_ids": [group.id],
"project_id": [group.project_id],
"organization_id": group.project.organization_id,
"start": start if start else default_start,
"end": end if end else default_end,
}
direct_hit_resp = get_direct_hit_response(request, query, params, "api.group-events")
if direct_hit_resp:
return direct_hit_resp
if environments:
params["environment"] = [env.name for env in environments]
full = request.GET.get("full", False)
try:
snuba_filter = get_filter(request.GET.get("query", None), params)
except InvalidSearchQuery as e:
raise ParseError(detail=six.text_type(e))
snuba_filter.conditions.append(["event.type", "!=", "transaction"])
data_fn = partial(eventstore.get_events, referrer="api.group-events", filter=snuba_filter)
serializer = EventSerializer() if full else SimpleEventSerializer()
return self.paginate(
request=request,
on_results=lambda results: serialize(results, request.user, serializer),
paginator=GenericOffsetPaginator(data_fn=data_fn),
)
def _get_search_query_and_tags(self, request, group, environments=None):
raw_query = request.GET.get("query")
if raw_query:
query_kwargs = parse_query([group.project], raw_query, request.user, environments)
query = query_kwargs.pop("query", None)
tags = query_kwargs.pop("tags", {})
else:
query = None
tags = {}
if environments:
env_names = set(env.name for env in environments)
if "environment" in tags:
# If a single environment was passed as part of the query, then
# we'll just search for that individual environment in this
# query, even if more are selected.
if tags["environment"] not in env_names:
# An event can only be associated with a single
# environment, so if the environments associated with
# the request don't contain the environment provided as a
# tag lookup, the query cannot contain any valid results.
raise NoResults
else:
# XXX: Handle legacy backends here. Just store environment as a
# single tag if we only have one so that we don't break existing
# usage.
tags["environment"] = list(env_names) if len(env_names) > 1 else env_names.pop()
return query, tags
| bsd-3-clause | 4,531,243,865,721,410,000 | 38.744186 | 98 | 0.634484 | false |
echonesis/mAiLab_Class | mAiLab_0002/RandomNumber.py | 1 | 1753 | #!/usr/bin/python
def simpleGenRandomNumber(n, llimit=0, ulimit=1):
import random
result = [random.uniform(llimit, ulimit) for i in xrange(n)]
return result
if __name__ == '__main__':
# For basic questions
# Basic #1
# In this part, the built-in Python functions would be used.
num_random = 5
print 'Basic 1> Generate', num_random, 'Random Number'
print simpleGenRandomNumber(num_random)
# Basic #2
import numpy as np
import time
print 'Basic 2>'
n1 = [10**1, 10**2, 10**3, 10**4, 10**5]
usedTime = list()
for iCnt in n1:
t1 = time.time()
result1 = simpleGenRandomNumber(iCnt, -1, 1)
usedTime.append(time.time() - t1)
print 'Case for N =', iCnt
print 'Mean =', np.mean(result1), '; STD =', np.std(result1)
# Advanced #1
print 'Advanced 1>'
for i in range(len(n1)):
print 'Case for N =', n1[i]
print 'Used Sys Time =', usedTime[i], '(s)'
'''
Sample Output:
Basic 1> Generate 5 Random Number
[0.8517352415235713, 0.9608042046044872, 0.1512693660183837, 0.6074746239442333, 0.5267800150194317]
Basic 2>
Case for N = 10
Mean = -0.240647969028 ; STD = 0.424100623283
Case for N = 100
Mean = -0.0732104451873 ; STD = 0.596035030544
Case for N = 1000
Mean = 0.0287190524504 ; STD = 0.58627480244
Case for N = 10000
Mean = -0.00509101610347 ; STD = 0.578908223166
Case for N = 100000
Mean = 0.00342896915716 ; STD = 0.576555864097
Advanced 1>
Case for N = 10
Used Sys Time = 1.00135803223e-05 (s)
Case for N = 100
Used Sys Time = 4.10079956055e-05 (s)
Case for N = 1000
Used Sys Time = 0.000274896621704 (s)
Case for N = 10000
Used Sys Time = 0.00268888473511 (s)
Case for N = 100000
Used Sys Time = 0.0347440242767 (s)
'''
| mit | 6,842,228,498,127,733,000 | 27.737705 | 100 | 0.64575 | false |
AntelopeAudio/zen-launcher | zen_launcher/runner.py | 1 | 1918 | import os
import re
import shutil
import subprocess
import sys
BASE_DIR = os.path.expanduser('~/.antelope/zen/panel')
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
def get_panel_dir(ver, create=True):
stripped = ver.strip()
if re.match(r'\d+(\.\d+)*', stripped) is not None:
d = os.path.join(BASE_DIR, stripped)
if create and not os.path.exists(d):
os.makedirs(d)
return d
def sort_vers(vers):
"""Return versions sorted in descending order. Format is expected to be
consistent. For example passing ['1.10.1', '1.11'] (Note that
'1.10.1' has a micro version number and '1.11' doesn't.) will yield
incorrect results.
"""
key = lambda v: int(v.replace('.', ''))
return list(sorted(vers, key=key, reverse=True))
def get_latest_panel_version():
"""Returns None in case of no panels installed.
"""
vers = os.listdir(BASE_DIR)
srt = sort_vers(vers)
if srt:
return srt[0]
def get_latest_panel_dir():
"""Returns None in case of no panels installed.
"""
latest = get_latest_panel_version()
if latest is not None:
return os.path.join(BASE_DIR, latest)
return None
def run_version(ver):
d = get_panel_dir(ver, create=False)
if not os.path.exists(d):
raise ValueError
if sys.platform.startswith('win'):
# print('Starting {} for Windows'.format(d))
subprocess.call('cd "{}" && ZenStudio.exe'.format(d), shell=True)
elif sys.platform.startswith('darwin'):
ret = subprocess.call('cd "{}" && open ./*.app'.format(d), shell=True)
if ret != 0:
# In case of error, remove the CP directory. This way the
# next run will trigger the download process anew. Not the
# smartest thing, but the easiest. :)
shutil.rmtree(d)
else:
print('Starting {} for GNU'.format(d))
| gpl-3.0 | 7,596,631,990,808,295,000 | 27.205882 | 78 | 0.611053 | false |
brenthuisman/phd_tools | graph.spotprofiles.py | 1 | 7377 | #!/usr/bin/env python
import plot, numpy as np,auger,image,rtplan
from scipy.ndimage.filters import gaussian_filter
###########################################################################################################
smooth_param = 8.5 #20 mm FWHM
volume_offset=-141.59+7.96#spot sources
pg29 = image.image('data/ct/source-ct-29.mhd')
rppg29 = image.image('data/rpct/source-rpct-29.mhd')
pg40 = image.image('data/ct/source-ct-40.mhd')
rppg40 = image.image('data/rpct/source-rpct-40.mhd')
pg61 = image.image('data/ct/source-ct-61.mhd')
rppg61 = image.image('data/rpct/source-rpct-61.mhd')
pg29.toprojection(".x", [0,1,1,1])
rppg29.toprojection(".x", [0,1,1,1])
pg40.toprojection(".x", [0,1,1,1])
rppg40.toprojection(".x", [0,1,1,1])
pg61.toprojection(".x", [0,1,1,1])
rppg61.toprojection(".x", [0,1,1,1])
pgsrc_ct_x = np.linspace(-149,149,150) #bincenters
pgsrc_ct_xhist = np.linspace(-150,150,151) #2mm voxels, endpoints
pgsrc_ct_x = pgsrc_ct_x+(volume_offset-pgsrc_ct_x[0]) #offset for pg source image
pgsrc_ct_xhist = pgsrc_ct_xhist+(volume_offset-pgsrc_ct_xhist[0]) #same
pg29_fo=auger.get_fop(pgsrc_ct_x,pg29.imdata)
rppg29_fo=auger.get_fop(pgsrc_ct_x,rppg29.imdata)
pg40_fo=auger.get_fop(pgsrc_ct_x,pg40.imdata)
rppg40_fo=auger.get_fop(pgsrc_ct_x,rppg40.imdata)
pg61_fo=auger.get_fop(pgsrc_ct_x,pg61.imdata)
rppg61_fo=auger.get_fop(pgsrc_ct_x,rppg61.imdata)
psf_pg29_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg29.imdata, sigma=smooth_param))
psf_rppg29_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg29.imdata, sigma=smooth_param))
psf_pg40_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg40.imdata, sigma=smooth_param))
psf_rppg40_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg40.imdata, sigma=smooth_param))
psf_pg61_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg61.imdata, sigma=smooth_param))
psf_rppg61_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg61.imdata, sigma=smooth_param))
rtplan = rtplan.rtplan(['../doseactortest/data/plan.txt'],norm2nprim=False)#,noproc=True)
MSW=[]
for spot in rtplan.spots:
if spot[0] == 102:#
MSW.append(spot)
#### dose
dose_offset=-142.097+7.96
x = np.linspace(-149.5,149.5,300) #bincenters
xhist = np.linspace(-150,150,301) #1mm voxels, endpoints
x = x+(dose_offset-x[0]) #offset for pg source image
xhist = xhist+(dose_offset-xhist[0]) #same
dose = image.image('../doseactortest/output/new_dosespotid-ct.mhd')
dose = dose.imdata.reshape(dose.imdata.shape[::-1]).squeeze()
rpdose = image.image('../doseactortest/output/new_dosespotid-rpct.mhd')
rpdose = rpdose.imdata.reshape(rpdose.imdata.shape[::-1]).squeeze()
ct29_fo=auger.get_fop(x,dose[29])
rpct29_fo=auger.get_fop(x,rpdose[29])
ct40_fo=auger.get_fop(x,dose[40])
rpct40_fo=auger.get_fop(x,rpdose[40])
ct61_fo=auger.get_fop(x,dose[61])
rpct61_fo=auger.get_fop(x,rpdose[61])
print '###########################################################################################################'
print 'PG FOPS'
print 'pg29', pg29_fo, ', w/psf:', psf_pg29_fo
print 'rppg29', rppg29_fo, ', w/psf:', psf_rppg29_fo
print 'pg40', pg40_fo, ', w/psf:', psf_pg40_fo
print 'rppg40', rppg40_fo, ', w/psf:', psf_rppg40_fo
print 'pg61', pg61_fo, ', w/psf:', psf_pg61_fo
print 'rppg61', rppg61_fo, ', w/psf:', psf_rppg61_fo
print 'FOP shifts'
print '29, ct:', str(rpct29_fo-ct29_fo)[:4], ', pg', str(rppg29_fo-pg29_fo)[:4], ', pg+psf', str(psf_rppg29_fo-psf_pg29_fo)[:4]
print '40, ct:', str(rpct40_fo-ct40_fo)[:4], ', pg', str(rppg40_fo-pg40_fo)[:4], ', pg+psf', str(psf_rppg40_fo-psf_pg40_fo)[:4]
print '61, ct:', str(rpct61_fo-ct61_fo)[:4], ', pg', str(rppg61_fo-pg61_fo)[:4], ', pg+psf', str(psf_rppg61_fo-psf_pg61_fo)[:4]
print '###########################################################################################################'
###########################################################################################################
def yld(profile):
nr=str(profile.imdata.sum()*100.)
return nr[:3]+'\%'
def plotprof(ax,xax,emit,dete,name, **kwargs):
if name == 'CT':
color='steelblue'
elif name == 'RPCT':
color='indianred'
else:
color='black'
ax.step(xax,emit, color=color,lw=1., alpha=1, label=name+', yield: '+yld(emit), where='mid')
#ax1.step(pgsrc_ct_x,dete, color=color,lw=1., alpha=0.5, label=name+' PSF', where='mid')
return ax
###########################################################################################################
f, ((ax4,ax5,ax6),(ax1,ax2,ax3)) = plot.subplots(nrows=2, ncols=3, sharex=True, sharey=False)
ax1.step(pgsrc_ct_x,pg29.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg29), where='mid')
ax1.step(pgsrc_ct_x,rppg29.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg29), where='mid')
ax1.set_title('PG shift: '+str(rppg29_fo-pg29_fo)[:3]+' mm', fontsize=10)
ax1.legend(frameon = False,loc='upper left')
ax1.set_xlim(-80,60)
ax1.set_ylim(0,0.004)
ax1.set_ylabel('Cumulative PG emission per proton')
plot.texax(ax1)
ax2.step(pgsrc_ct_x,pg40.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg40), where='mid')
ax2.step(pgsrc_ct_x,rppg40.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg40), where='mid')
ax2.set_title('PG shift: '+str(rppg40_fo-pg40_fo)[:3]+' mm', fontsize=10)
ax2.legend(frameon = False,loc='upper left')
#ax2.set_xlim(-80,70)
ax2.set_ylim(0,0.004)
ax2.set_xlabel('Position [mm]')
plot.texax(ax2)
ax3.step(pgsrc_ct_x,pg61.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg61), where='mid')
ax3.step(pgsrc_ct_x,rppg61.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg61), where='mid')
ax3.set_title('PG shift: '+str(rppg61_fo-pg61_fo)[:2]+' mm', fontsize=10)
ax3.legend(frameon = False,loc='upper left')
#ax3.set_xlim(-80,70)
ax3.set_ylim(0,0.004)
plot.texax(ax3)
######## TopRow
ax4.step(x,dose[29]/dose[29].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax4.step(x,rpdose[29]/rpdose[29].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax4.set_title('Spot A, Shift: '+str(rpct29_fo-ct29_fo)[:3]+' mm\n'+plot.sn(MSW[29][-1],1)+' protons', fontsize=10)
ax4.legend(frameon = False,loc='upper left')
ax4.set_xlim(-80,60)
ax4.set_ylabel('Scaled Dose [a.u.]')
plot.texax(ax4)
ax5.step(x,dose[40]/dose[40].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax5.step(x,rpdose[40]/rpdose[40].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax5.set_title('Spot B, Shift: '+str(rpct40_fo-ct40_fo)[:3]+' mm\n'+plot.sn(MSW[40][-1],1)+' protons', fontsize=10)
ax5.legend(frameon = False,loc='upper left')
#ax5.set_xlim(-80,70)
ax5.set_xlabel('Position [mm]')
plot.texax(ax5)
ax6.step(x,dose[61]/dose[61].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax6.step(x,rpdose[61]/rpdose[61].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax6.set_title('Spot C, Shift: '+str(rpct61_fo-ct61_fo)[:2]+' mm\n'+plot.sn(MSW[61][-1],1)+' protons', fontsize=10)
ax6.legend(frameon = False,loc='upper left')
#ax6.set_xlim(-80,70)
plot.texax(ax6)
ax4.xaxis.set_visible(False)
ax5.xaxis.set_visible(False)
ax6.xaxis.set_visible(False)
ax5.yaxis.set_visible(False)
ax6.yaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax3.yaxis.set_visible(False)
f.subplots_adjust(hspace=0.3)
f.savefig('spotprofiles.pdf', bbox_inches='tight')
plot.close('all')
| lgpl-3.0 | 6,576,384,876,859,362,000 | 41.889535 | 127 | 0.6382 | false |
lantianlz/qiexing | www/journey/interface.py | 1 | 13639 | # -*- coding: utf-8 -*-
import datetime
from django.db import transaction
from django.db.models import F
from common import utils, debug, cache
from www.misc.decorators import cache_required
from www.misc import consts
from www.message.interface import UnreadCountBase
from www.account.interface import UserBase
from www.account.interface import UserCountBase
from www.journey.models import Journey, Like
dict_err = {
20100: u'标题过于简单,稍微详述一下',
20101: u'标题过于冗长,稍微提炼一下',
20102: u'内容过于简单,稍微详述一下',
20103: u'内容过于冗长,稍微提炼一下',
20104: u'喜欢一次足矣',
20105: u'自己赞自己是自恋的表现哦,暂不支持',
20800: u'问题不存在或者已删除',
20801: u'回答不存在或者已删除',
20802: u'绝对不会让你得逞的,因为你没得权限',
}
dict_err.update(consts.G_DICT_ERROR)
JOURNEY_DB = 'default'
def journey_required(func):
def _decorator(self, journey_id_or_object, *args, **kwargs):
journey = journey_id_or_object
if not isinstance(journey_id_or_object, Journey):
try:
journey = Journey.objects.get(id=journey_id_or_object, state=True)
except Journey.DoesNotExist:
return 20800, dict_err.get(20800)
return func(self, journey, *args, **kwargs)
return _decorator
def journey_admin_required(func):
def _decorator(self, journey, user, *args, **kwargs):
flag, journey = JourneyBase().get_journey_admin_permission(journey, user)
if not flag:
return 20802, dict_err.get(20802)
return func(self, journey, user, *args, **kwargs)
return _decorator
class JourneyBase(object):
def __init__(self):
pass
def format_journeys(self, journeys):
for journey in journeys:
journey.user = journey.get_user()
journey.content = utils.filter_script(journey.content)
return journeys
def validate_title(self, title, min_len=10):
if len(title) < min_len:
return 20100, dict_err.get(20100)
if len(title) > 128:
return 20101, dict_err.get(20101)
return 0, dict_err.get(0)
def validate_content(self, content, min_len=10):
if len(content) < min_len:
return 20102, dict_err.get(20102)
if len(content) > 65535:
return 20103, dict_err.get(20103)
return 0, dict_err.get(0)
def validate_journey_element(self, journey_title, journey_content, min_title_len=10):
errcode, errmsg = self.validate_title(journey_title, min_title_len)
if not errcode == 0:
return errcode, errmsg
errcode, errmsg = self.validate_content(journey_content, min_len=2)
if not errcode == 0:
return errcode, errmsg
if not all((journey_title, journey_content)):
return 99800, dict_err.get(99800)
return 0, dict_err.get(0)
@transaction.commit_manually(using=JOURNEY_DB)
def create_journey(self, user_id, journey_title, journey_content,
ip='127.0.0.1', is_hide_user=None):
try:
# 防止xss漏洞
journey_title = utils.filter_script(journey_title)
journey_content = utils.filter_script(journey_content)
errcode, errmsg = self.validate_journey_element(journey_title, journey_content)
if not errcode == 0:
transaction.rollback(using=JOURNEY_DB)
return errcode, errmsg
is_hide_user = True if is_hide_user else False
journey = Journey.objects.create(user_id=user_id, title=journey_title, content=journey_content,
last_answer_time=datetime.datetime.now(), ip=ip,
is_hide_user=is_hide_user)
# 更新用户话题数信息
UserCountBase().update_user_count(user_id=user_id, code='user_journey_count')
transaction.commit(using=JOURNEY_DB)
return 0, journey
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(using=JOURNEY_DB)
return 99900, dict_err.get(99900)
@journey_admin_required
@transaction.commit_manually(using=JOURNEY_DB)
def modify_journey(self, journey, user, journey_title, journey_content,
ip='127.0.0.1', is_hide_user=None):
try:
# 防止xss漏洞
journey_title = utils.filter_script(journey_title)
journey_content = utils.filter_script(journey_content)
errcode, errmsg = self.validate_journey_element(journey_title, journey_content)
if not errcode == 0:
transaction.rollback(using=JOURNEY_DB)
return errcode, errmsg
journey.title = journey_title
journey.content = journey_content
journey.ip = ip
if is_hide_user:
journey.is_hide_user = True
journey.save()
# 更新summary
self.get_journey_summary_by_id(journey, must_update_cache=True)
transaction.commit(using=JOURNEY_DB)
return 0, journey
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(using=JOURNEY_DB)
return 99900, dict_err.get(99900)
def add_journey_view_count(self, journey_id):
'''
@note: 更新浏览次数
'''
Journey.objects.filter(id=journey_id).update(views_count=F('views_count') + 1)
@journey_admin_required
@transaction.commit_manually(using=JOURNEY_DB)
def remove_journey(self, journey, user):
try:
journey.state = False
journey.save()
# 更新用户话题数信息
UserCountBase().update_user_count(user_id=journey.user_id, code='user_journey_count', operate='minus')
transaction.commit(using=JOURNEY_DB)
return 0, dict_err.get(0)
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(using=JOURNEY_DB)
return 99900, dict_err.get(99900)
def get_journey_by_id(self, id, need_state=True):
try:
ps = dict(id=id)
if need_state:
ps.update(dict(state=True))
return Journey.objects.get(**ps)
except Journey.DoesNotExist:
return None
def get_journeys_by_user_id(self, user_id):
return Journey.objects.filter(user_id=user_id, state=True)
def get_user_journey_count(self, user_id):
return self.get_journeys_by_user_id(user_id).count()
def get_all_journeys_for_home_page(self):
return Journey.objects.filter(is_silence=False, state=True)
def get_all_important_journey(self):
pass
@journey_required
def get_journey_admin_permission(self, journey, user):
# 返回journey值用于journey对象赋值
return journey.user_id == user.id or user.is_staff(), journey
@journey_required
@transaction.commit_manually(using=JOURNEY_DB)
def set_important(self, journey, user, title, summary, author_user_id=None, img='', img_alt=None, sort_num=0):
try:
if author_user_id and not UserBase().get_user_by_id(author_user_id):
transaction.rollback(using=JOURNEY_DB)
return 99600, dict_err.get(99600)
try:
assert journey and user and title and summary
except:
transaction.rollback(using=JOURNEY_DB)
return 99800, dict_err.get(99800)
journey.is_important = True
journey.save()
transaction.commit(using=JOURNEY_DB)
return 0, dict_err.get(0)
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(using=JOURNEY_DB)
return 99900, dict_err.get(99900)
@journey_required
@transaction.commit_manually(using=JOURNEY_DB)
def cancel_important(self, journey, user):
try:
journey.is_important = False
journey.save()
transaction.commit(using=JOURNEY_DB)
return 0, dict_err.get(0)
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(using=JOURNEY_DB)
return 99900, dict_err.get(99900)
@cache_required(cache_key='journey_summary_%s', expire=3600)
def get_journey_summary_by_id(self, journey_id_or_object, must_update_cache=False):
'''
@note: 获取提问摘要信息,用于feed展现
'''
journey = self.get_journey_by_id(journey_id_or_object, need_state=False) if not isinstance(journey_id_or_object, Journey) else journey_id_or_object
journey_summary = {}
if journey:
journey_summary = dict(journey_id=journey.id, journey_title=journey.title,
journey_summary=journey.get_summary(), journey_answer_count=journey.answer_count)
return journey_summary
def get_journey_by_title(self, title):
'''
根据标题查询提问
'''
journeys = []
if title:
journeys = Journey.objects.filter(title=title)
return self.format_journeys(journeys)
def get_all_journeys_by_order_type(self, order):
'''
根据统计数据排序
'''
return self.format_journeys(Journey.objects.all().order_by('-' + order))
def get_journeys_by_range_date(self, start_date, end_date):
'''
查询指定区间的提问
'''
return Journey.objects.filter(create_time__range=(start_date, end_date))
def search_journeys(self, title):
if not title:
return []
return Journey.objects.filter(title__icontains=title, state=True)[:200]
def search_user_journeys(self, user_id, title):
if not title:
return []
return Journey.objects.filter(user_id=user_id, title__icontains=title, state=True)[:200]
@journey_required
def set_top(self, journey):
try:
journeys = Journey.objects.filter(state=True).order_by("-sort_num")
max_sort_num = 1 if not journeys else (journeys[0].sort_num + 1)
journey.sort_num = max_sort_num
journey.save()
return 0, dict_err.get(0)
except Exception, e:
debug.get_debug_detail(e)
return 99900, dict_err.get(99900)
@journey_required
def cancel_top(self, journey):
journey.sort_num = 0
journey.save()
return 0, dict_err.get(0)
class LikeBase(object):
'''
@note: “喜欢”模块封装
'''
def format_likes(self, likes):
for like in likes:
like.from_user = UserBase().get_user_by_id(like.from_user_id)
return likes
@journey_required
@transaction.commit_manually(JOURNEY_DB)
def like_it(self, journey, from_user_id, ip):
'''
@note: 喜欢操作封装
'''
try:
assert all((journey, from_user_id, ip))
is_anonymous = False
if from_user_id:
if Like.objects.filter(from_user_id=from_user_id, journey=journey):
transaction.rollback(JOURNEY_DB)
return 20104, dict_err.get(20104)
else:
from_user_id = ''
is_anonymous = False
if Like.objects.filter(ip=ip, journey=journey):
transaction.rollback(JOURNEY_DB)
return 20104, dict_err.get(20104)
# 不支持自赞
to_user_id = journey.user_id
if from_user_id == to_user_id:
transaction.rollback(JOURNEY_DB)
return 20105, dict_err.get(20105)
Like.objects.create(journey=journey, is_anonymous=is_anonymous, from_user_id=from_user_id, to_user_id=to_user_id, ip=ip)
journey.like_count += 1
journey.save()
# 更新被赞次数
UserCountBase().update_user_count(user_id=to_user_id, code='user_liked_count')
# 更新未读消息
UnreadCountBase().update_unread_count(to_user_id, code='received_like')
# 更新summary
JourneyBase().get_journey_summary_by_id(journey, must_update_cache=True)
transaction.commit(JOURNEY_DB)
return 0, dict_err.get(0)
except Exception, e:
debug.get_debug_detail(e)
transaction.rollback(JOURNEY_DB)
return 99900, dict_err.get(99900)
def get_likes_by_journey(self, journey, user_id=None, ip=None):
'''
@note: 获取某个提问下的问题的所有喜欢,用于前端判断当前登录用户是否喜欢了该回答,匿名用户采用ip判断
'''
ps = dict(journey=journey)
if user_id:
ps.update(dict(from_user_id=user_id))
if ip:
ps.update(dict(ip=ip, is_anonymous=True))
return Like.objects.filter(**ps)
def get_to_user_likes(self, user_id):
return Like.objects.select_related('journey').filter(to_user_id=user_id, is_anonymous=False)
def get_likes_by_answer(self, answer):
return Like.objects.select_related('answer').filter(answer=answer, is_anonymous=False)
def get_user_liked_count(self, user_id):
return self.get_to_user_likes(user_id).count()
| gpl-2.0 | 2,520,855,221,058,252,300 | 33.556728 | 155 | 0.594793 | false |
lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/users.py | 1 | 4815 | import logging
import syslog
from passlib.hash import sha512_crypt
import ajenti
import ajenti.usersync
from ajenti.api import *
def restrict(permission):
"""
Marks a decorated function as requiring ``permission``.
If the invoking user doesn't have one, :class:`SecurityError` is raised.
"""
def decorator(fx):
def wrapper(*args, **kwargs):
UserManager.get().require_permission(extract_context(), permission)
return fx(*args, **kwargs)
return wrapper
return decorator
class SecurityError (Exception):
"""
Indicates that user didn't have a required permission.
.. attribute:: permission
permission ID
"""
def __init__(self, permission):
self.permission = permission
def __str__(self):
return 'Permission "%s" required' % self.permission
@plugin
@persistent
@rootcontext
class UserManager (BasePlugin):
default_classconfig = {'sync-provider': ''}
classconfig_root = True
def check_password(self, username, password, env=None):
"""
Verifies the given username/password combo
:type username: str
:type password: str
:rtype: bool
"""
if not username or not password:
return False
provider = self.get_sync_provider(fallback=True)
if username == 'root' and not provider.syncs_root:
provider = ajenti.usersync.AjentiSyncProvider.get()
if not username in ajenti.config.tree.users:
return False
try:
provider.sync()
except Exception as e:
logging.error(str(e))
result = provider.check_password(username, password)
provider_name = type(provider).__name__
ip_notion = ''
ip = env.get('REMOTE_ADDR', None) if env else None
if ip:
ip_notion = ' from %s' % ip
if not result:
msg = 'failed login attempt for %s ("%s") through %s%s' % \
(username, password, provider_name, ip_notion)
syslog.syslog(syslog.LOG_WARNING, msg)
logging.warn(msg)
else:
msg = 'user %s logged in through %s%s' % (username, provider_name, ip_notion)
syslog.syslog(syslog.LOG_INFO, msg)
logging.info(msg)
return result
def hash_password(self, password):
"""
:type password: str
:rtype: str
"""
if not password.startswith('sha512|'):
password = 'sha512|%s' % sha512_crypt.encrypt(password)
return password
def hash_passwords(self):
for user in ajenti.config.tree.users.values():
if not user.password.startswith('sha512|'):
user.password = self.hash_password(user.password)
def has_permission(self, context, permission):
"""
Checks whether the current user has a permission
:type permission: str
:rtype: bool
"""
if context.user.name == 'root':
return True
if not permission in context.user.permissions:
return False
return True
def require_permission(self, context, permission):
"""
Checks current user for given permission and
raises :class:`SecurityError` if he doesn't have one
:type permission: str
:raises: SecurityError
"""
if not self.has_permission(context, permission):
raise SecurityError(permission)
def get_sync_provider(self, fallback=False):
"""
:type fallback: bool
:rtype: ajenti.usersync.UserSyncProvider
"""
for p in ajenti.usersync.UserSyncProvider.get_classes():
p.get()
if p.id == self.classconfig['sync-provider']:
try:
p.get().test()
except:
if fallback:
return ajenti.usersync.AjentiSyncProvider.get()
return p.get()
def set_sync_provider(self, provider_id):
self.classconfig['sync-provider'] = provider_id
self.save_classconfig()
def set_password(self, username, password):
ajenti.config.tree.users[username].password = self.hash_password(password)
@interface
class PermissionProvider (object):
"""
Override to create your own set of permissions
"""
def get_permissions(self):
"""
Should return a list of permission names
:rtype: list
"""
return []
def get_name(self):
"""
Should return a human-friendly name for this set
of permissions (displayed in Configurator)
:rtype: str
"""
return ''
__all__ = ['restrict', 'PermissionProvider', 'SecurityError', 'UserManager']
| apache-2.0 | -4,075,475,881,534,769,000 | 27.157895 | 89 | 0.585462 | false |
agateau/yokadi | yokadi/tests/helptestcase.py | 1 | 1726 | # -*- coding: UTF-8 -*-
"""
Help test cases
@author: Aurélien Gâteau <mail@agateau.com>
@license: GPL v3 or later
"""
import os
import sys
import unittest
from cmd import Cmd
from contextlib import contextmanager
from yokadi.core import db
from yokadi.ycli.main import YokadiCmd
@contextmanager
def to_devnull(out):
out_fd = out.fileno()
with open(os.devnull, "wb") as null, \
os.fdopen(os.dup(out_fd), "wb") as copied:
out.flush()
os.dup2(null.fileno(), out_fd)
try:
yield
finally:
out.flush()
os.dup2(copied.fileno(), out_fd)
class HelpTestCase(unittest.TestCase):
"""
A basic test for the command helps: it just execute 'help <cmd>' on all
commands. This catches invalid format characters in the help strings.
"""
def setUp(self):
# Some help commands look into the db for default values
db.connectDatabase("", memoryDatabase=True)
db.setDefaultConfig()
def testHelp(self):
cmd = YokadiCmd()
for attr in dir(cmd):
if not attr.startswith("do_"):
continue
yokadiCommand = attr[3:]
try:
# Execute the command, but redirect stdout and stderr to
# /dev/null to avoid flooding the terminal
with to_devnull(sys.stdout), to_devnull(sys.stderr):
# We use Cmd implementation of onecmd() because YokadiCmd
# overrides it to catch exceptions
Cmd.onecmd(cmd, "help " + yokadiCommand)
except Exception:
print("'help %s' failed" % yokadiCommand)
raise
# vi: ts=4 sw=4 et
| gpl-3.0 | -7,332,356,089,390,799,000 | 27.733333 | 77 | 0.585267 | false |
noironetworks/heat | heat/common/policy.py | 1 | 6824 | #
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Based on glance/api/policy.py
"""Policy Engine For Heat."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
ENFORCER = None
class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, scope='heat', exc=exception.Forbidden,
default_rule=DEFAULT_RULES['default'], policy_file=None):
self.scope = scope
self.exc = exc
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
self.log_not_registered = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules."""
rules_obj = policy.Rules(rules, self.default_rule)
self.enforcer.set_rules(rules_obj, overwrite)
def load_rules(self, force_reload=False):
"""Set the rules found in the json file on disk."""
self.enforcer.load_rules(force_reload)
def _check(self, context, rule, target, exc,
is_registered_policy=False, *args, **kwargs):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param rule: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
do_raise = False if not exc else True
credentials = context.to_policy_values()
if is_registered_policy:
try:
return self.enforcer.authorize(rule, target, credentials,
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
if self.log_not_registered:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy not registered.'))
else:
raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
def enforce(self, context, action, scope=None, target=None,
is_registered_policy=False):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
_action = '%s:%s' % (scope or self.scope, action)
_target = target or {}
return self._check(context, _action, _target, self.exc, action=action,
is_registered_policy=is_registered_policy)
def check_is_admin(self, context):
"""Whether or not is admin according to policy.
By default the rule will check whether or not roles contains
'admin' role and is admin project.
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
return self._check(context, 'context_is_admin', target={}, exc=None,
is_registered_policy=True)
def get_enforcer():
global ENFORCER
if ENFORCER is None:
ENFORCER = Enforcer()
return ENFORCER
class ResourceEnforcer(Enforcer):
def __init__(self, default_rule=DEFAULT_RESOURCE_RULES['default'],
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
self.log_not_registered = False
def _enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
LOG.info(six.text_type(ex))
raise
if not result:
if self.exc:
raise self.exc(action=res_type)
return result
def enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
result = self._enforce(context, res_type, scope, target,
is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
try:
return self._enforce(context, res_type_wc, scope, target,
is_registered_policy=is_registered_policy)
except self.exc:
raise self.exc(action=res_type)
return result
def enforce_stack(self, stack, scope=None, target=None,
is_registered_policy=False):
for res in stack.resources.values():
self.enforce(stack.context, res.type(), scope=scope, target=target,
is_registered_policy=is_registered_policy)
| apache-2.0 | -8,548,374,885,071,395,000 | 37.994286 | 79 | 0.603605 | false |
eamigo86/graphene-django-extras | tests/conftest.py | 1 | 3393 | import os
import sys
import django
from django.core import management
def pytest_addoption(parser):
parser.addoption(
"--no-pkgroot",
action="store_true",
default=False,
help="Remove package root directory from sys.path, ensuring that "
"graphene_django_extras is imported from the installed site-packages. "
"Used for testing the distribution.",
)
parser.addoption(
"--staticfiles",
action="store_true",
default=False,
help="Run tests with static files collection, using manifest "
"staticfiles storage. Used for testing the distribution.",
)
def pytest_configure(config):
from django.conf import settings
settings.configure(
ALLOWED_HOSTS=["*"],
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}
},
SITE_ID=1,
SECRET_KEY="not very secret in tests",
USE_I18N=True,
USE_L10N=True,
STATIC_URL="/static/",
ROOT_URLCONF="tests.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {"debug": True}, # We want template errors to raise
}
],
MIDDLEWARE=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
),
INSTALLED_APPS=(
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
"graphene_django",
"tests",
),
PASSWORD_HASHERS=("django.contrib.auth.hashers.MD5PasswordHasher",),
GRAPHENE={
"SCHEMA": "tests.schema.schema",
"MIDDLEWARE": ["graphene_django_extras.ExtraGraphQLDirectiveMiddleware"],
},
AUTHENTICATION_BACKENDS=(
"django.contrib.auth.backends.ModelBackend",
"guardian.backends.ObjectPermissionBackend",
),
)
# FIXME(eclar): necessary ?
if config.getoption("--no-pkgroot"):
sys.path.pop(0)
# import rest_framework before pytest re-adds the package root directory.
import graphene_django_extras
package_dir = os.path.join(os.getcwd(), "graphene_django_extras")
assert not graphene_django_extras.__file__.startswith(package_dir)
# Manifest storage will raise an exception if static files are not present (ie, a packaging failure).
if config.getoption("--staticfiles"):
import graphene_django_extras
settings.STATIC_ROOT = os.path.join(
os.path.dirname(graphene_django_extras.__file__), "static-root"
)
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
)
django.setup()
if config.getoption("--staticfiles"):
management.call_command("collectstatic", verbosity=0, interactive=False)
| mit | -834,246,997,406,322,000 | 32.93 | 105 | 0.607722 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/model/task_status.py | 2 | 1174 | # encoding: utf-8
from datetime import datetime
from sqlalchemy import types, Column, Table, UniqueConstraint
import meta
import types as _types
import domain_object
__all__ = ['TaskStatus', 'task_status_table']
task_status_table = Table('task_status', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('entity_id', types.UnicodeText, nullable=False),
Column('entity_type', types.UnicodeText, nullable=False),
Column('task_type', types.UnicodeText, nullable=False),
Column('key', types.UnicodeText, nullable=False),
Column('value', types.UnicodeText, nullable=False),
Column('state', types.UnicodeText),
Column('error', types.UnicodeText),
Column('last_updated', types.DateTime, default=datetime.now),
UniqueConstraint('entity_id', 'task_type', 'key')
)
class TaskStatus(domain_object.DomainObject):
@classmethod
def get(cls, reference):
'''Returns a task status object referenced by its id.'''
if not reference:
return None
task = meta.Session.query(cls).get(reference)
return task
meta.mapper(TaskStatus, task_status_table)
| gpl-3.0 | -3,177,157,195,557,000,000 | 32.542857 | 80 | 0.701022 | false |
tsybulkin/dor | dor.py | 1 | 11481 | #!/usr/bin/python
#
# dor-bug robot
#
#
#######################################################
from math import sin,cos,radians,degrees,pi,asin,atan,sqrt
import numpy as np
from tools import *
from time import sleep
alpha_max = pi/2
beta_max = pi
phi_max = pi/5
small_angles = [-0.01, 0.0, 0.01 ]
big_angles = [ -0.5, -0.02, 0, 0.02, 0.5]
MAX_ATTEMPTS = 20
class Dor():
def __init__(self,X=0,Y=0):
self.actions = []
self.Ox = X
self.Oy = Y
self.Oz = 0
self.R = 5
self.legs = [ Leg(i,pi/2*i) for i in range(4) ]
self.raised_leg = 0
self.legs[0].alpha += -0.01
self.legs[2].alpha += -0.01
self.feet_distances = self.get_dist_between_feet()
self.legs[0].xy = self.get_foot(0,(0,0,0))[:2]
self.legs[1].xy = self.get_foot(1,(0,0,0))[:2]
self.legs[3].xy = self.get_foot(3,(0,0,0))[:2]
self.legs[2].xy = self.get_ground_xy(2)
self.CoM = np.array([0,0])
self.orientation = 0.0
_, self.CoM, _q = self.is_stable()
def get_foot(self, leg_index, (da,db,dp)):
Leg = self.legs[leg_index].get_copy()
Leg.alpha += da
Leg.beta += db
Leg.phi += dp
return np.array([self.R * cos(Leg.aa) + Leg.get_foot()[0] * cos(Leg.aa+Leg.phi),
self.R * sin(Leg.aa) + Leg.get_foot()[0] * sin(Leg.aa+Leg.phi),
Leg.get_foot()[1]])
def get_feet(self):
"""returns a list of foot coordinates in its own reference system
"""
return [ np.array([ self.R * cos(Leg.aa) + Leg.get_foot()[0] * cos(Leg.aa+Leg.phi),
self.R * sin(Leg.aa) + Leg.get_foot()[0] * sin(Leg.aa+Leg.phi),
Leg.get_foot()[1] ]) for Leg in self.legs]
def get_ground_xy(self,leg_index):
"""returns xy coordinates for the foot having given foot_index
in the global frame of references
"""
Matrix = np.array([[0, 1],[-1, 0]])
#print "leg:",leg_index,
#raised = self.raised_leg
leg1 = (leg_index+3)%4
leg2 = (leg_index+1)%4
#print "Legs:", leg1,leg2
feet = self.get_feet()
v1 = feet[leg2]-feet[leg1]
e1 = v1 / np.linalg.norm(v1)
#e2 = Matrix.dot(e1)
#v2 = feet[leg_index]-feet[leg1]
d3 = self.feet_distances[(leg1,leg2)]
d1 = self.feet_distances[(leg1,leg_index)]
d2 = self.feet_distances[(leg_index,leg2)]
q = (d3**2+d1**2-d2**2)/d3**2/2
h = sqrt(d1**2-(q*d3)**2)
#print "q,h:",q,h
#print "v1,v2:",v1,v2
#print q*v1+h*e2, "=", feet[leg_index]
#print "C:",C
v11 = self.legs[leg2].xy - self.legs[leg1].xy
e11 = v11/np.linalg.norm(v11)
e22 = Matrix.dot(e11)
#print "e11,e22:",e11,e22
res = self.legs[leg1].xy + q*v11 + h*e22
#print "xy =",res
return res
def get_dist_between_feet(self):
distances = {}
feet = self.get_feet()
for i in range(len(feet)):
for j in range(len(feet)-1):
d = np.linalg.norm(feet[i]-feet[j])
distances[(i,j)] = d
distances[(j,i)] = d
return distances
def find_phi(self,Action):
i = self.raised_leg
xyz1 = self.get_foot((i+1)%4,Action[1])
(da,db,_) = Action[2]
#print "\nLeg2: da,db:",da,db
p = self.legs[(i+2)%4].phi
A = -phi_max - p
B = phi_max - p
#print "A,B:",A,B
def fun(dp):
xyz2 = self.get_foot((i+2)%4,(da,db,dp))
return np.linalg.norm(xyz2-xyz1) - self.feet_distances[((i+1)%4,(i+2)%4)]
if fun(A)*fun(B) > 0:
#print "Leg2: Phi cannot be found. Fa=%.1g, Fb=%.1g" % (fun(A),fun(B))
return None
else:
return secant(A,B,fun(A),fun(B),fun)
def take_action(self, action):
"""changes state taking the given action
Assumed that action is legal
"""
self.actions.append((self.raised_leg,action[0],action[1],action[2][:2],action[3][2]))
old_raised = self.raised_leg
#print action
for k in range(4):
self.legs[(old_raised+k)%4].move_leg(action[k])
if old_raised in self.get_raised_leg():
# check CoM
stable,CoM,qs = self.is_stable()
if stable:
# no changes
self.legs[self.raised_leg].xy = self.get_ground_xy(self.raised_leg)
self.CoM = CoM
else:
# raised goes to the opposite leg
self.raised_leg = (old_raised + 2) % 4
#print "Opposite leg is raised:", self.raised_leg
self.legs[old_raised].xy = self.get_ground_xy(old_raised)
stable,CoM,qs1 = self.is_stable()
if not stable: print "qs:%s, qs1:%s" % (str(qs), str(qs1))
self.CoM = CoM
else:
# raised goes to the next leg
self.raised_leg = (old_raised + 1) % 4
self.legs[old_raised].xy = self.get_ground_xy(old_raised)
stable,CoM1,qs1 = self.is_stable()
if not stable:
# the opposit leg is raised
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
stable,CoM2,qs = self.is_stable()
if not stable: print "q1:%s, q2:%s" % (qs1,qs)
self.CoM = CoM2
else:
# both could be stable
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
stable,CoM2,_ = self.is_stable()
if not stable:
# the first option is true
self.raised_leg = (old_raised + 1) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM1
stable = True
else:
# both stable
if np.linalg.norm(CoM1 - self.CoM) < np.linalg.norm(CoM2 - self.CoM):
self.raised_leg = (old_raised + 1) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM1
else:
self.raised_leg = (old_raised + 3) % 4
#self.legs[i].xy = self.get_ground_xy(i)
self.CoM = CoM2
self.update_orientation()
self.feet_distances = self.get_dist_between_feet()
if not stable:
print "Fell"
return stable
def get_move(self, Action):
i = self.raised_leg
Res = {0:Action[0], 1:Action[1], 2:Action[2]+(0,)}
dp = self.find_phi(Res)
if dp == None: return self.get_random_action()
Res[2] = Action[2]+(dp,)
foot3 = find_foot3(self,i,Action[3],Res)
if foot3 == None:
return self.get_random_action()
a_b = find_alpha_beta(self,i,foot3)
if a_b == None:
return self.get_random_action()
alpha, beta = a_b
leg = self.legs[(i+3)%4]
da = alpha - leg.alpha
db = beta - leg.beta
if leg.alpha_is_legal(da) and leg.beta_is_legal(db):
Res[3] = (da,db,Action[3])
else:
return self.get_random_action()
return Res
def get_random_action(self):
Action = {}
i = self.raised_leg
Action[0] = get_random_action(self.legs[i],big_angles)
N = 0
while True:
if N > MAX_ATTEMPTS:
#print "Cannot move any standing leg at the current state"
Action[1] = (0,0,0)
Action[2] = (0,0,0)
Action[3] = (0,0,0)
return Action
Action[1] = get_random_action(self.legs[(i+1)%4],small_angles)
Action[2] = get_random_action(self.legs[(i+2)%4],small_angles)
# find phi
dp = self.find_phi(Action)
if not self.legs[(i+2)%4].phi_is_legal(dp):
#print "dPhi =",dp
#print "Phi found is illegal"
N += 1
continue
da,db,_ = Action[2]
Action[2] = (da,db,dp)
attempts = 5
while attempts > 0:
dp3 = choose_randomly(big_angles)
foot3 = find_foot3(self, i, dp3, Action)
if foot3 != None: break
else: attempts -= 1
else: # no solution found
#print "This random action is illegal\n"
N += 1
continue
# find alpha and beta
a_b = find_alpha_beta(self,i,foot3)
if a_b == None:
N += 1
continue
else:
alpha, beta = a_b
leg = self.legs[(i+3)%4]
da = alpha - leg.alpha
db = beta - leg.beta
if leg.alpha_is_legal(da) and leg.beta_is_legal(db):
Action[3] = (da,db,dp3)
break
else:
#print "legal da or db cannot be found\nda,da:",da,db
#print "leg3: a, b, phi:", leg.alpha, leg.beta, leg.phi
N += 1
continue
return Action
def get_raised_leg(self):
feet = self.get_feet()
self.feet = feet
v1 = feet[-1]-feet[0]
v2 = feet[1]-feet[0]
v3 = feet[2]-feet[0]
dot = np.dot(v3, np.cross(v1,v2) )
if dot == 0:
#print "all legs touch the surface\n"
raise
return []
elif dot > 0:
#print "1st and 3rd legs can be raised\n"
return [1, 3]
else:
#print "0th and 2nd legs can be raised\n"
return [0, 2]
def is_stable(self):
"""returns tuple. First element is True or Flase if robot can stand on its tree legs respectively
the second element is a projection of centre of mass onto the plane of three feet
the third is a tuple of three q - a load factor on each leg
"""
raised = self.raised_leg
feet = self.get_feet()
f1,f2,f3 = tuple([ feet[i] for i in range(4) if i != raised])
v1 = f1-f2
v2 = f3-f2
ez1 = np.cross(v2,v1)
ez1 = ez1 / np.linalg.norm(ez1)
#print "sin:",ez1
X0,Y0 = (ez1 * np.dot(ez1,f1))[:2]
#print "X0,Y0",X0,Y0
X1,Y1 = f1[:2]
X2,Y2 = f2[:2]
X3,Y3 = f3[:2]
#print "Feet:", f1[:2],f2[:2],f3[:2]
TX0 = (X0-X3)/(X1-X3)
TX2 = (X2-X3)/(X1-X3)
q2 = ( TX0 - (Y0-Y3)/(Y1-Y3) )/( TX2 - (Y2-Y3)/(Y1-Y3) )
q1 = TX0 - TX2 * q2
q3 = 1 - (q1 + q2)
xy = [ self.legs[i].xy for i in range(4) if raised != i]
CoM = xy[0]*q1+xy[1]*q2+xy[2]*q3
if q1>0 and q2>0 and q3>0: return (True, CoM, (q1,q2,q3))
else: return (False,CoM,(q1,q2,q3))
def update_orientation(self):
#print "Raised leg:", self.raised_leg
#print "CoM:",self.CoM
if self.raised_leg != 0:
f0 = self.get_foot(0,(0,0,0))
niu = atan(f0[1]/f0[0])
#print "niu:",niu,self.legs[0].xy
self.orientation = atan((self.legs[0].xy[1]-self.CoM[1])/(self.legs[0].xy[0]-self.CoM[0])) - niu
else:
f2 = self.get_foot(2,(0,0,0))
niu = atan(f2[1]/f2[0])
#print "niu3:", niu,self.legs[2].xy
self.orientation = atan((self.legs[2].xy[1]-self.CoM[1])/(self.legs[2].xy[0]-self.CoM[0])) - niu
#print "orientation:",self.orientation
#if abs(self.orientation)>1:
# raise
def measure_output(self,mode):
if mode == "forward":
raised = self.raised_leg
opposite = (raised+2)%4
return np.array([sum([l.xy[0] for l in self.legs if not l.index in [raised,opposite] ])/2,
sum([l.xy[1] for l in self.legs if not l.index in [raised,opposite] ])/2])
elif mode == "right" or mode == "left":
return float(self.orientation)
else:
return None
def draw(self,plt):
ps = plt.gca().patches
while len(ps) >1: ps.pop()
circle = plt.Circle(tuple(self.CoM), radius=self.R, fc='r')
plt.gca().add_patch(circle)
raised = self.raised_leg
for i in range(4):
f = self.legs[(raised+i)%4].xy
if i == 0:
foot = plt.Circle(tuple(f), radius=self.R/5, fc='r')
else:
foot = plt.Circle(tuple(f), radius=self.R/5, fc='b')
plt.gca().add_patch(foot)
plt.draw()
sleep(0.5)
class Leg():
def __init__(self,index,attach_angle,alpha=radians(30),beta=radians(45),phi=0,L=10):
self.index = index
self.aa = attach_angle
self.alpha = alpha
self.beta = beta
self.phi = phi
self.L = L
def get_copy(self):
copy = Leg(self.index,self.aa, self.alpha, self.beta, self.phi, self.L)
return copy
def move_leg(self,(da,db,dp)):
self.alpha += da
self.beta += db
self.phi += dp
def get_foot(self):
"""returns a xz coordinate of a foot in leg's own reference system
"""
return np.array([ self.L * ( sin(self.alpha) + sin(self.beta-self.alpha) ),
self.L * ( cos(self.alpha) - cos(self.beta-self.alpha) ) ])
def alpha_is_legal(self,da):
a = self.alpha + da
return a >= pi/15 and a < pi/2
def beta_is_legal(self,db):
b = self.beta + db
return b >= pi/9 and b < 2 * self.alpha
def phi_is_legal(self,dp):
if dp == None:
return False
p = self.phi + dp
return abs(p) < phi_max
| apache-2.0 | 7,863,833,608,741,897,000 | 23.221519 | 99 | 0.587579 | false |
cowhi/HFO | experiments/agents/adhocvisit_backup.py | 1 | 1171 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 9 16:36:47 2016
@author: Felipe Leno
Loads everything from adhoc.py, this class only defines parameters for the visit-based
ad hoc advising
"""
from adhoc import AdHoc
import math
class AdHocVisit(AdHoc):
#Enum for importance metrics
VISIT_IMPORTANCE, Q_IMPORTANCE = range(2)
def __init__(self, budgetAsk=1000, budgetAdvise=1000,stateImportanceMetric=VISIT_IMPORTANCE,seed=12345, port=12345, epsilon=0.1, alpha=0.1, gamma=0.9, decayRate=0.9, serverPath = "/home/leno/HFO/bin/"):
super(AdHocVisit, self).__init__(budgetAsk,budgetAdvise,stateImportanceMetric,port = port, seed=seed,serverPath = serverPath)
def midpoint(self,typeMid):
"""Calculates the midpoint"""
if typeMid == self.ADVISE:
numVisits = 25
impMid = numVisits / (numVisits + math.log(self.scalingVisits + numVisits))
return impMid
elif typeMid == self.ASK:
numVisits = 20
impMid = numVisits / (numVisits + math.log(self.scalingVisits + numVisits))
return impMid
#Error
return None | mit | 6,629,827,150,139,662,000 | 36.806452 | 206 | 0.641332 | false |
Scille/parsec-cloud | tests/core/mountpoint/test_file_operations.py | 1 | 3497 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import os
import sys
import pytest
from hypothesis.stateful import RuleBasedStateMachine, initialize, rule, run_state_machine_as_test
from hypothesis import strategies as st
# Just an arbitrary value to limit the size of data hypothesis generates
# for read/write operations
BALLPARK = 10000
@pytest.mark.slow
@pytest.mark.mountpoint
@pytest.mark.skipif(sys.platform == "darwin", reason="TODO: Infinitely looping on macOS")
def test_file_operations(tmpdir, caplog, hypothesis_settings, mountpoint_service_factory):
tentative = 0
class FileOperationsStateMachine(RuleBasedStateMachine):
@initialize()
def init(self):
nonlocal tentative
tentative += 1
caplog.clear()
wpath = None
async def _bootstrap(user_fs, mountpoint_manager):
nonlocal wpath
wid = await user_fs.workspace_create("w")
wpath = await mountpoint_manager.mount_workspace(wid)
self.mountpoint_service = mountpoint_service_factory(_bootstrap)
self.oracle_file_path = str(tmpdir / f"oracle-test-{tentative}")
self.file_path = str(wpath / "bar.txt")
self.oracle_fd = os.open(self.oracle_file_path, os.O_RDWR | os.O_CREAT)
self.fd = os.open(self.file_path, os.O_RDWR | os.O_CREAT)
def teardown(self):
self.mountpoint_service.stop()
@rule(size=st.integers(min_value=0, max_value=BALLPARK))
def read(self, size):
expected_data = os.read(self.oracle_fd, size)
data = os.read(self.fd, size)
assert data == expected_data
@rule(content=st.binary(max_size=BALLPARK))
def write(self, content):
expected_ret = os.write(self.oracle_fd, content)
ret = os.write(self.fd, content)
assert ret == expected_ret
@rule(
length=st.integers(min_value=-BALLPARK, max_value=BALLPARK),
seek_type=st.one_of(st.just(os.SEEK_SET), st.just(os.SEEK_CUR), st.just(os.SEEK_END)),
)
def seek(self, length, seek_type):
if seek_type != os.SEEK_END:
length = abs(length)
try:
pos = os.lseek(self.fd, length, seek_type)
except OSError:
# Invalid length/seek_type couple
with pytest.raises(OSError):
os.lseek(self.oracle_fd, length, seek_type)
else:
expected_pos = os.lseek(self.oracle_fd, length, seek_type)
assert pos == expected_pos
@rule(length=st.integers(min_value=0, max_value=BALLPARK))
def truncate(self, length):
os.ftruncate(self.fd, length)
os.ftruncate(self.oracle_fd, length)
@rule()
def sync(self):
os.fsync(self.fd)
os.fsync(self.oracle_fd)
@rule()
def stat(self):
stat = os.fstat(self.fd)
oracle_stat = os.fstat(self.oracle_fd)
assert stat.st_size == oracle_stat.st_size
@rule()
def reopen(self):
os.close(self.fd)
self.fd = os.open(self.file_path, os.O_RDWR)
os.close(self.oracle_fd)
self.oracle_fd = os.open(self.oracle_file_path, os.O_RDWR)
run_state_machine_as_test(FileOperationsStateMachine, settings=hypothesis_settings)
| agpl-3.0 | -9,145,865,238,727,756,000 | 34.323232 | 98 | 0.594796 | false |
astroclark/BayesSpec | waveforms/waveforms2hdf5.py | 1 | 1186 | #!/usr/bin/env python
"""
waveforms2hdf5.py loops over the list of waveforms defined in this script and
dumps out an hdf5 file for the plus polarisation. The idea is to then compute
the Shannon entropy of the waveforms using Matlab's wentropy.m function.
"""
import h5py
import numpy as np
import pmns_utils
wfs='/Users/jclark/hmns_repo/results/penultimate_waveforms.txt'
waveform_list=np.loadtxt(wfs,dtype=str)
#waveform_list=['shen_135135_lessvisc','apr_135135']
h5_file=h5py.File('waveforms.hdf5','w')
h5_snr_file=h5py.File('snr.hdf5','w')
for waveform in waveform_list:
# Generate waveform instance
wf=pmns_utils.Waveform(waveform)
# Compute the time series & SNR
wf.make_wf_timeseries()
wf.compute_characteristics()
# Zoom in on signal
peak_idx=np.argmax(wf.hplus.data.data)
wf_start_idx=np.argwhere(abs(wf.hplus.data.data)>0)[0]
wf_end_idx=np.argwhere(abs(wf.hplus.data.data)>0)[-1]
wf_reduced = wf.hplus.data.data[wf_start_idx:wf_end_idx]
h5_file[waveform] = wf_reduced
h5_snr_file[waveform] = wf.snr_plus
#h5_file[waveform]=wf_reduced
#h5_file[waveform+'_snr']=wf.snr_plus
h5_file.close()
h5_snr_file.close()
| gpl-2.0 | -5,496,950,762,767,886,000 | 29.410256 | 78 | 0.713322 | false |
jaumemarti/l10n-spain-txerpa | l10n_es_aeat/wizard/export_to_boe.py | 1 | 7887 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011
# Pexego Sistemas Informáticos. (http://pexego.es)
# Luis Manuel Angueira Blanco (Pexego)
#
# Copyright (C) 2013
# Ignacio Ibeas - Acysos S.L. (http://acysos.com)
# Migración a OpenERP 7.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
class L10nEsAeatReportExportToBoe(orm.TransientModel):
_name = "l10n.es.aeat.report.export_to_boe"
_description = "Export Report to BOE Format"
_columns = {
'name': fields.char('File name', readonly=True),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('open', 'open'), # open wizard
('get', 'get')]), # get file
}
_defaults = {
'state': 'open',
}
def _formatString(self, text, length, fill=' ', align='<'):
"""
Formats the string into a fixed length ASCII (iso-8859-1) record.
Note:
'Todos los campos alfanuméricos y alfabéticos se presentarán
alineados a la izquierda y rellenos de blancos por la derecha,
en mayúsculas sin caracteres especiales, y sin vocales acentuadas.
Para los caracteres específicos del idioma se utilizará la
codificación ISO-8859-1. De esta forma la letra “Ñ” tendrá el
valor ASCII 209 (Hex. D1) y la “Ç”(cedilla mayúscula) el valor
ASCII 199 (Hex. C7).'
"""
if not text:
return fill * length
# Replace accents and convert to upper
from unidecode import unidecode
text = unidecode(unicode(text))
text = text.upper()
ascii_string = text.encode('iso-8859-1')
# Cut the string if it is too long
if len(ascii_string) > length:
ascii_string = ascii_string[:length]
# Format the string
if align == '<':
ascii_string = ascii_string.ljust(length, fill)
elif align == '>':
ascii_string = ascii_string.rjust(length, fill)
else:
assert False, _('Wrong aling option. It should be < or >')
# Sanity-check
assert len(ascii_string) == length, \
_("The formated string must match the given length")
# Return string
return ascii_string
def _formatNumber(self, number, int_length, dec_length=0,
include_sign=False):
"""Formats the number into a fixed length ASCII (iso-8859-1) record.
Note:
'Todos los campos numéricos se presentarán alineados a la derecha
y rellenos a ceros por la izquierda sin signos y sin empaquetar.'
(http://www.boe.es/boe/dias/2008/10/23/pdfs/A42154-42190.pdf)
"""
# Separate the number parts (-55.23 => int_part=55, dec_part=0.23,
# sign='N')
if number == '':
number = 0.0
number = float(number)
sign = number >= 0 and ' ' or 'N'
number = abs(number)
int_part = int(number)
# Format the string
ascii_string = ''
if include_sign:
ascii_string += sign
if dec_length > 0:
ascii_string += '%0*.*f' % (int_length + dec_length + 1,
dec_length, number)
ascii_string = ascii_string.replace('.', '')
elif int_length > 0:
ascii_string += '%.*d' % (int_length, int_part)
# Sanity-check
assert len(ascii_string) == (include_sign and 1 or 0) + int_length + \
dec_length, _("The formated string must match the given length")
# Return the string
return ascii_string
def _formatBoolean(self, value, yes='X', no=' '):
"""
Formats a boolean value into a fixed length ASCII (iso-8859-1) record.
"""
return value and yes or no
def _get_formatted_declaration_record(self, cr, uid, report, context=None):
return ''
def _get_formatted_main_record(self, cr, uid, report, context=None):
return ''
def _get_formatted_other_records(self, cr, uid, report, context=None):
return ''
def _do_global_checks(self, report, contents, context=None):
return True
def action_get_file(self, cr, uid, ids, context=None):
"""Action that exports the data into a BOE formatted text file.
@return: Action dictionary for showing exported file.
"""
if not context.get('active_id') or not context.get('active_model'):
return False
report = self.pool[context['active_model']].browse(
cr, uid, context['active_id'], context=context)
contents = ''
# Add header record
contents += self._get_formatted_declaration_record(cr, uid, report,
context=context)
# Add main record
contents += self._get_formatted_main_record(cr, uid, report,
context=context)
# Adds other fields
contents += self._get_formatted_other_records(cr, uid, report,
context=context)
# Generate the file and save as attachment
res = base64.encodestring(contents)
file_name = _("%s_report_%s.txt") % (
report.number, time.strftime(_(DEFAULT_SERVER_DATE_FORMAT)))
# Delete old files
attachment_obj = self.pool['ir.attachment']
attachment_ids = attachment_obj.search(
cr, uid, [('name', '=', file_name),
('res_model', '=', report._model._name)],
context=context)
if attachment_ids:
attachment_obj.unlink(cr, uid, attachment_ids, context=context)
attachment_obj.create(cr, uid, {"name": file_name,
"datas": res,
"datas_fname": file_name,
"res_model": report._model._name,
"res_id": report.id,
}, context=context)
self.write(cr, uid, ids,
{'state': 'get', 'data': res, 'name': file_name},
context=context)
# Force view to be the parent one
data_obj = self.pool['ir.model.data']
result = data_obj._get_id(cr, uid, 'l10n_es_aeat',
'wizard_aeat_export')
view_id = data_obj.browse(cr, uid, result, context=context).res_id
# TODO: Permitir si se quiere heredar la vista padre
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_mode': 'form',
'view_type': 'form',
'view_id': [view_id],
'res_id': ids[0],
'target': 'new',
}
| agpl-3.0 | -6,541,515,499,416,233,000 | 40.829787 | 79 | 0.544888 | false |
ekholabs/ekholabs-es | service/ElasticsearchService.py | 1 | 1093 | from ElasticsearchConnection import Resource
from uuid import uuid4
class ElasticsearchIndex:
@staticmethod
def create(index_name, settings):
es = Resource().connect()
index = es.indices.create(index=index_name, ignore=400, body=settings)
return index
@staticmethod
def delete_index(index_name):
es = Resource().connect()
index = es.indices.delete(index=index_name, ignore=[400, 404])
return index
@staticmethod
def index(index_name, document_type, payload):
es = Resource().connect()
index = es.index(index=index_name, doc_type=document_type, id=uuid4(), body=payload)
return index
@staticmethod
def query(index_name, query_criteria):
es = Resource().connect()
index = es.search(index=index_name, body=query_criteria)
return index
@staticmethod
def delete_document(index_name, document_type, document_id):
es = Resource().connect()
index = es.delete(index=index_name, doc_type=document_type, id=document_id)
return index
| mit | -8,110,233,009,886,834,000 | 27.025641 | 92 | 0.651418 | false |
nakagami/reportlab | demos/colors/colortest.py | 1 | 2772 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__='''$Id: colortest.py 3959 2012-09-27 14:39:39Z robin $'''
import reportlab.pdfgen.canvas
from reportlab.lib import colors
from reportlab.lib.units import inch
def run():
c = reportlab.pdfgen.canvas.Canvas('colortest.pdf')
#do a test of CMYK interspersed with RGB
#first do RGB values
framePage(c, 'Color Demo - RGB Space and CMYK spaces interspersed' )
y = 700
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'cyan')
c.setFillColorCMYK(1,0,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'red')
c.setFillColorRGB(1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'magenta')
c.setFillColorCMYK(0,1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'green')
c.setFillColorRGB(0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'yellow')
c.setFillColorCMYK(0,0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'blue')
c.setFillColorRGB(0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'black')
c.setFillColorCMYK(0,0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.showPage()
#do all named colors
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
all_colors = reportlab.lib.colors.getAllNamedColors().items()
all_colors.sort() # alpha order by name
c.setFont('Times-Roman', 12)
c.drawString(72,730, 'This shows all the named colors in the HTML standard.')
y = 700
for (name, color) in all_colors:
c.setFillColor(colors.black)
c.drawString(100, y, name)
c.setFillColor(color)
c.rect(200, y-10, 300, 30, fill=1)
y = y - 40
if y < 100:
c.showPage()
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
y = 700
c.save()
def framePage(canvas, title):
canvas.setFont('Times-BoldItalic',20)
canvas.drawString(inch, 10.5 * inch, title)
canvas.setFont('Times-Roman',10)
canvas.drawCentredString(4.135 * inch, 0.75 * inch,
'Page %d' % canvas.getPageNumber())
#draw a border
canvas.setStrokeColorRGB(1,0,0)
canvas.setLineWidth(5)
canvas.line(0.8 * inch, inch, 0.8 * inch, 10.75 * inch)
#reset carefully afterwards
canvas.setLineWidth(1)
canvas.setStrokeColorRGB(0,0,0)
if __name__ == '__main__':
run()
| bsd-3-clause | -1,089,536,343,542,370,600 | 25.4 | 81 | 0.603896 | false |
nmoutschen/linux-utils | debian/checkup.py | 1 | 2457 | #!/usr/bin/env python3
"""
This script checks up the general state of the system (updates required, CPU
usage, etc.) and returns a string that can be used in an MOTD file.
You can use this script as part of a cron job to update the MOTD file in order
to display relevant system information to sysadmins upon login.
"""
import apt
import psutil
import socket
def get_pkg_count():
"""
Returns the number of packages that needs to be updated
"""
cache = apt.Cache()
cache.update()
cache.open()
cache.upgrade()
return len(cache.get_changes())
def get_cpu_usage():
"""
Returns the CPU usage as a percentage
"""
return psutil.cpu_percent(interval=1)
def get_mem_usage():
"""
Returns the amount of used RAM as a percentage of total available RAM
"""
return psutil.virtual_memory().percent
def get_disk_usage():
"""
Returns a list of tuples containing the partition name and the amount of
used space as a percentage of total space
"""
total = 0
used = 0
for part in psutil.disk_partitions():
part_usage = psutil.disk_usage(part.mountpoint)
total += part_usage.total
used += part_usage.used
return used/total*100
def get_open_servs():
"""
Returns a list of open services currently listening
"""
def get_service(conn):
if conn.type == socket.SOCK_STREAM:
type_ = 'tcp'
else:
type_ = 'udp'
return socket.getservbyport(conn.laddr[1], type_)
services = [get_service(conn) for conn in psutil.net_connections()
if conn.status == psutil.CONN_LISTEN]
services = list(set(services))
services.sort()
return services
if __name__ == "__main__":
pkg_count = get_pkg_count()
cpu_usage = get_cpu_usage()
mem_usage = get_mem_usage()
disk_usage = get_disk_usage()
open_servs = get_open_servs()
if pkg_count == 1:
print("\033[91mThere is currently 1 update available.\033[0m\n")
elif pkg_count > 1:
print("\033[91mThere are currently {} updates available.\033[0m\n".format(pkg_count))
print("\033[94mCPU Usage\033[0m: {:>5.1f}%".format(cpu_usage))
print("\033[94mMemory Usage\033[0m: {:>5.1f}%".format(mem_usage))
print("\033[94mDisk Usage\033[0m: {:>5.1f}%".format(disk_usage))
print()
print("\033[94mOpen Services\033[0m: {}".format(", ".join(open_servs)))
| mit | -1,833,325,935,985,679,600 | 23.57 | 93 | 0.625967 | false |
vpp-dev/vpp | test/test_vxlan_gpe.py | 1 | 10187 | #!/usr/bin/env python3
import socket
from util import ip4n_range, ip4_range
import unittest
from framework import VppTestCase, VppTestRunner, running_extended_tests
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX
@unittest.skipUnless(running_extended_tests, "part of extended tests")
class TestVxlanGpe(BridgeDomain, VppTestCase):
""" VXLAN-GPE Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding VXLAN-GPE header
with its UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def ip_range(self, start, end):
""" range of remote ip's """
return ip4_range(self.pg0.remote_ip4, start, end)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding VXLAN-GPE header
with its UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN-GPE header
"""
# check if is set I and P flag
self.assertEqual(pkt[VXLAN].flags, 0x0c)
return pkt[VXLAN].payload
# Method for checking VXLAN-GPE encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify VXLAN-GPE tunnel src IP is VPP_IP and dst IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is VXLAN-GPE 4790, source UDP port
# could be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
@classmethod
def create_vxlan_gpe_flood_test_bd(cls, vni, n_ucast_tunnels):
# Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_ip4n will not be resolved
rip = VppIpRoute(cls, dest_ip4, 32,
[VppRoutePath(next_hop_address,
INVALID_INDEX)],
register=False)
rip.add_vpp_config()
dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=vni)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
"""
add or del tunnels sharing the same mcast dst
to test vxlan_gpe ref_count mechanism
"""
n_shared_dst_tunnels = 20
vni_start = 1000
vni_end = vni_start + n_shared_dst_tunnels
for vni in range(vni_start, vni_end):
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
if r.sw_if_index == 0xffffffff:
raise ValueError("bad sw_if_index: ~0")
@classmethod
def add_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=1)
@classmethod
def del_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=0)
@classmethod
def add_del_mcast_tunnels_load(cls, is_add):
"""
add or del tunnels to test vxlan_gpe stability
"""
n_distinct_dst_tunnels = 20
ip_range_start = 10
ip_range_end = ip_range_start + n_distinct_dst_tunnels
for dest_ip4n in ip4n_range(cls.mcast_ip4n, ip_range_start,
ip_range_end):
vni = bytearray(dest_ip4n)[3]
cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
@classmethod
def add_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=1)
@classmethod
def del_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=0)
# Class method to start the VXLAN-GPE test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestVxlanGpe, cls).setUpClass()
try:
cls.dport = 4790
cls.flags = 0x0c
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
iplong = atol(cls.mcast_ip4)
cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
(iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
# Create VXLAN-GPE VTEP on VPP pg0, and put vxlan_gpe_tunnel0
# and pg1 into BD.
cls.single_tunnel_bd = 11
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.pg0.remote_ip4n,
vni=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.n_ucast_tunnels = 10
cls.mcast_flood_bd = 12
cls.create_vxlan_gpe_flood_test_bd(cls.mcast_flood_bd,
cls.n_ucast_tunnels)
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_shared_mcast_dst_load()
cls.add_mcast_tunnels_load()
cls.del_shared_mcast_dst_load()
cls.del_mcast_tunnels_load()
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 13
cls.create_vxlan_gpe_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
except Exception:
super(TestVxlanGpe, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestVxlanGpe, cls).tearDownClass()
@unittest.skip("test disabled for vxlan-gpe")
def test_mcast_flood(self):
""" inherited from BridgeDomain """
pass
@unittest.skip("test disabled for vxlan-gpe")
def test_mcast_rcv(self):
""" inherited from BridgeDomain """
pass
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestVxlanGpe, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
self.logger.info(self.vapi.cli("show int"))
self.logger.info(self.vapi.cli("show vxlan-gpe"))
self.logger.info(self.vapi.cli("show trace"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| apache-2.0 | -3,846,960,124,925,577,000 | 37.587121 | 79 | 0.57112 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/pythonwin/pywin/idle/AutoExpand.py | 1 | 2763 | import string
import re
###$ event <<expand-word>>
###$ win <Alt-slash>
###$ unix <Alt-slash>
class AutoExpand:
keydefs = {
'<<expand-word>>': ['<Alt-slash>'],
}
unix_keydefs = {
'<<expand-word>>': ['<Meta-slash>'],
}
menudefs = [
('edit', [
('E_xpand word', '<<expand-word>>'),
]),
]
wordchars = string.letters + string.digits + "_"
def __init__(self, editwin):
self.text = editwin.text
self.text.wordlist = None # XXX what is this?
self.state = None
def expand_word_event(self, event):
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
if not self.state:
words = self.getwords()
index = 0
else:
words, index, insert, line = self.state
if insert != curinsert or line != curline:
words = self.getwords()
index = 0
if not words:
self.text.bell()
return "break"
word = self.getprevword()
self.text.delete("insert - %d chars" % len(word), "insert")
newword = words[index]
index = (index + 1) % len(words)
if index == 0:
self.text.bell() # Warn we cycled around
self.text.insert("insert", newword)
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
self.state = words, index, curinsert, curline
return "break"
def getwords(self):
word = self.getprevword()
if not word:
return []
before = self.text.get("1.0", "insert wordstart")
wbefore = re.findall(r"\b" + word + r"\w+\b", before)
del before
after = self.text.get("insert wordend", "end")
wafter = re.findall(r"\b" + word + r"\w+\b", after)
del after
if not wbefore and not wafter:
return []
words = []
dict = {}
# search backwards through words before
wbefore.reverse()
for w in wbefore:
if dict.get(w):
continue
words.append(w)
dict[w] = w
# search onwards through words after
for w in wafter:
if dict.get(w):
continue
words.append(w)
dict[w] = w
words.append(word)
return words
def getprevword(self):
line = self.text.get("insert linestart", "insert")
i = len(line)
while i > 0 and line[i-1] in self.wordchars:
i = i-1
return line[i:]
| epl-1.0 | 187,856,678,564,993,900 | 28.032609 | 69 | 0.488961 | false |
Remi-C/LOD_ordering_for_patches_of_points | script/test_octree_LOD.py | 1 | 7481 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 2 22:08:22 2014
@author: remi
"""
#trying to order points by octree with python
from numpy import random, sqrt
from sklearn import preprocessing
import matplotlib.pyplot as plt
#defining a dummy entry :a random 3D pointcloud
pointcloud = random.rand(16*16,2);
index = np.arange(1,16*16+1)
#parameters
tot_level = 3 ;
#centering data so that leftmost pint is 0 abs, bottom most point is 0
pointcloud[:,0] = pointcloud[:,0]- np.amin(pointcloud[:,0]);
pointcloud[:,1] = pointcloud[:,1]- np.amin(pointcloud[:,1]);
#finding the max scaling, in X, Y or Z
max_r = max(np.amax(pointcloud[:,0])-np.amin(pointcloud[:,0]), np.amax(pointcloud[:,1])-np.amin(pointcloud[:,1]))
#dividing so max scale is 0 . Now the point cloud is between 0,1 and 0,1
pointcloud = pointcloud/ max_r ;
#we have to trick a litlle, so has that for level 3 for instance, all value are between 0 and 7 included, but not reaching 8.
pointcloud_int = np.trunc(abs((pointcloud*pow(2,tot_level)-0.0001))).astype(int)
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') ;
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') ;
plt.axis([-1, 8, -1, 8]) ;
plt.show() ;
plt.close('all');
result_point = pointcloud_int[rec_ar[:,0]]
plt.plot(result_point[:,0],result_point[:,1], 'ro') ;
rec_ar = np.array(rec)
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
np.binary_repr(1)
def bin(s):
return str(s) if s<=1 else bin(s>>1) + str(s&1)
def testBit(int_type, offset):
mask = 1 << offset
return( (int_type & mask)>0 )
testBit(8,1)
pointcloud_bin = np.binary_repr(pointcloud_int)
pointcloud_int >> (tot_level-1) ;
#np.binary_repr(8)
( ((pointcloud_int >> 1 ) << 1) ) >> (tot_level-1) ;
testBit(pointcloud_int[:,1],3)
#cut the input point cloud into 8 based on l bit value starting form right to left
point_cloud_0_0_mask = np.logical_and((testBit(pointcloud_int[:,0],2)==0) , (testBit(pointcloud_int[:,1],2)==0) ) ;
pivot = np.array([pow(2,tot_level-1),pow(2,tot_level-1)])
pointcloud_centered = pointcloud_int - pivot
#coordinate to work :
toto = np.array([1,2,3])
testBit(toto,1)
(pointcloud_int >>1 )>>5
pow(2,4)
1<<4
#
# level 0
result = list() ;
pointcloud_int ;
index
pivot
cur_lev = 0
rec = [];
#find the 0 level point
min_point = np.argmin(np.sum(np.abs(pointcloud_int - pivot ),axis=1))
result.append(list((index[min_point],cur_lev)))
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print b_x, b_y
rec.append (np.logical_and(
(testBit(pointcloud_int[:,0],2)>0)==b_x
,(testBit(pointcloud_int[:,1],2)>0)==b_y
)
)
testBit(pointcloud_int[:,0],2)
print (testBit(pointcloud_int[:,0],2)>0==b_x) ;
print (testBit(pointcloud_int[:,1],2)>0==b_y) ;
rec[b_x,b_y] = np.logical_and((testBit(pointcloud_int[:,0],2)>0==b_x)
,(testBit(pointcloud_int[:,1],2)>0==b_y) )
print rec
np.binary_repr(pointcloud_int[:,0] )
#givne a point cloud
#compute the closest to center
def recursive_octree_ordering(point_array,index_array, center_point, level,tot_level, result,piv):
#importing necessary lib
import numpy as np;
#print for debug
# print '\n\n working on level : '+str(level);
# print 'input points: \n\t',point_array ;
# print 'index_array : \n\t',index_array;
# print 'center_point : \n\t',center_point;
# print 'level : \n\t',level;
# print 'tot_level : \n\t',tot_level;
# print 'result : \n\t',result;
#stopping condition : no points:
if len(point_array) == 0|level<=2:
return;
#updatig level;
sub_part_level = level+1 ;
print 'level ',level,' , points remaining : ',len(point_array) ;
print center_point;
piv.append(center_point);
#find the closest point to pivot
min_point = np.argmin(np.sum(np.abs(point_array - center_point ),axis=1))
result.append(list((index_array[min_point],level))) ;
#removing the found point from the array of points
#np.delete(point_array, min_point, axis=0) ;
#np.delete(index_array, min_point, axis=0) ;
#stopping if it remains only one pioint : we won't divide further, same if we have reached max depth
if (len(point_array) ==1 )|(level >= tot_level):
return;
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print (b_x*2-1), (b_y*2-1) ;
udpate_to_pivot = np.asarray([ (b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
sub_part_center_point = center_point +udpate_to_pivot;
# we want to iterateon
# we need to update : : point_array , index_array center_point , level
#update point_array and index_array : we need to find the points that are in the subparts
#update center point, we need to add/substract to previous pivot 2^level+11
#find the points concerned :
point_in_subpart_mask = np.logical_and(
testBit(point_array[:,0],tot_level - level-1) ==b_x
, testBit(point_array[:,1],tot_level - level -1) ==b_y ) ;
sub_part_points= point_array[point_in_subpart_mask];
sub_part_index = index_array[point_in_subpart_mask];
sub_part_center_point = center_point + np.asarray([
(b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
if len(sub_part_points)>=1:
recursive_octree_ordering(sub_part_points
,sub_part_index
, sub_part_center_point
, sub_part_level
, tot_level
, result
, piv);
continue;
else:
print 'at televel ',level,'bx by:',b_x,' ',b_y,' refusing to go one, ', len(sub_part_points), ' points remaining fo this'
continue;
rec = [] ;
piv = [] ;
recursive_octree_ordering(pointcloud_int,index,pivot,0,3,rec, piv );
#recursive_octree_ordering(pointcloud_int,index, np.array([2,2]),1,3,rec, piv );
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
plot(x=pointcloud_int[:,0].T,y=pointcloud_int[:,1].T, marker='o', color='r', ls='' )
plt.plot(pointcloud_int.T, marker='o', color='r', ls='')
plt.imsave('/')
from mpl_toolkits.mplot3d import Axes3D
plt.scatter(pointcloud[:,0], pointcloud[:,1],c='red');
plt.scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro')
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro')
plt.axis([-1, 8, -1, 8])
plt.show();
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1]);
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1], pointcloud_int[:,0], zdir='z', c= 'red')
fig.show()
fig, axes = plt.subplots(1, 2, figsize=(12,3))
axes[0].scatter(pointcloud[:,0], pointcloud[:,1],c='red');
axes[1].scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
fig.show();
for f in list((0,1)):
(f*2-1)
import octree_ordering | lgpl-3.0 | 6,503,298,321,312,166,000 | 31.672489 | 137 | 0.586285 | false |
vgmoose/ssm | tests/unittests/test_lvm.py | 1 | 22526 | #!/usr/bin/env python
#
# (C)2011 Red Hat, Inc., Lukas Czerner <lczerner@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Unittests for the system storage manager lvm backend
import unittest
from ssmlib import main
from ssmlib import problem
from ssmlib.backends import lvm
from tests.unittests.common import *
class LvmFunctionCheck(MockSystemDataSource):
def setUp(self):
super(LvmFunctionCheck, self).setUp()
self._addDevice('/dev/sda', 11489037516)
self._addDevice('/dev/sdb', 234566451)
self._addDevice('/dev/sdc', 2684354560)
self._addDevice('/dev/sdc1', 894784853, 1)
self._addDevice('/dev/sdc2', 29826161, 2)
self._addDevice('/dev/sdc3', 1042177280, 3)
self._addDevice('/dev/sdd', 11673)
self._addDevice('/dev/sde', 1042177280)
main.SSM_DEFAULT_BACKEND = 'lvm'
def mock_run(self, cmd, *args, **kwargs):
# Convert all parts of cmd into string
for i, item in enumerate(cmd):
if type(item) is not str:
cmd[i] = str(item)
self.run_data.append(" ".join(cmd))
output = ""
if cmd[1] == 'pvs':
for dev, data in self.dev_data.iteritems():
if 'pool_name' in data:
output += "{0}|{1}|{2}|{3}\n".format(dev, data['pool_name'],
data['dev_free'], data['dev_used'])
elif cmd[1] == 'vgs':
for pool, data in self.pool_data.iteritems():
output += "{0}|{1}|{2}|{3}|{4}\n".format(pool, data['dev_count'],
data['pool_size'], data['pool_free'], data['vol_count'])
elif cmd[1] == 'lvs':
for vol, data in self.vol_data.iteritems():
output += "{0}|{1}|{2}|{3}|{4}|{5}|{6}|{7}\n".format(data['pool_name'],
data['vol_size'], data['stripes'], data['stripesize'],
data['type'], data['dev_name'].split("/")[-1],
data['origin'], data['attr'])
if 'return_stdout' in kwargs and not kwargs['return_stdout']:
output = None
return (0, output)
def test_lvm_create(self):
default_pool = lvm.SSM_LVM_DEFAULT_POOL
# Create volume using single device from non existent default pool
self._checkCmd("ssm create", ['/dev/sda'],
"lvm lvcreate {0} -l 100%PVS -n lvol001 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
# Specify backnend
self._checkCmd("ssm -b lvm create", ['/dev/sda'],
"lvm lvcreate {0} -l 100%PVS -n lvol001 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
main.SSM_DEFAULT_BACKEND = "btrfs"
self._checkCmd("ssm --backend lvm create", ['/dev/sda'],
"lvm lvcreate {0} -l 100%PVS -n lvol001 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
main.SSM_DEFAULT_BACKEND = "lvm"
self._checkCmd("ssm create", ['--name myvolume', '--fstype ext4', '/dev/sda'])
self._cmdEq("mkfs.ext4 /dev/{0}/myvolume".format(default_pool))
self._cmdEq("lvm lvcreate {0} -l 100%PVS -n myvolume /dev/sda".format(default_pool), -2)
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -3)
self._checkCmd("ssm -f create", ['--fstype ext4', '/dev/sda'])
self._cmdEq("mkfs.ext4 -F /dev/{0}/lvol001".format(default_pool))
self._cmdEq("lvm lvcreate {0} -l 100%PVS -n lvol001 /dev/sda".format(default_pool), -2)
self._cmdEq("lvm vgcreate -f {0} /dev/sda".format(default_pool), -3)
self._checkCmd("ssm -v create", ['--name myvolume', '--fstype xfs', '/dev/sda'])
self._cmdEq("mkfs.xfs /dev/{0}/myvolume".format(default_pool))
self._cmdEq("lvm lvcreate -v {0} -l 100%PVS -n myvolume /dev/sda".format(default_pool), -2)
self._cmdEq("lvm vgcreate -v {0} /dev/sda".format(default_pool), -3)
self._checkCmd("ssm -v -f create", ['--name myvolume', '--fstype xfs', '/dev/sda'])
self._cmdEq("mkfs.xfs -f /dev/{0}/myvolume".format(default_pool))
self._cmdEq("lvm lvcreate -v {0} -l 100%PVS -n myvolume /dev/sda".format(default_pool), -2)
self._cmdEq("lvm vgcreate -v -f {0} /dev/sda".format(default_pool), -3)
self._checkCmd("ssm create", ['-s 2.6T', '/dev/sda'],
"lvm lvcreate {0} -L 2791728742.40K -n lvol001 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
self._checkCmd("ssm create", ['-r 0', '-s 2.6T', '-I 16', '/dev/sda'],
"lvm lvcreate {0} -L 2791728742.40K -n lvol001 -I 16 -i 1 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
self._checkCmd("ssm create", ['-r 0', '-s 2.6T', '-I 16', '/dev/sda'],
"lvm lvcreate {0} -L 2791728742.40K -n lvol001 -I 16 -i 1 /dev/sda".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda".format(default_pool), -2)
# Number of stripes must not exceed number of devices
self.assertRaises(problem.GeneralError, main.main, "ssm create -r 0 -s 2.6T -I 16 -i 4 /dev/sda")
# Create volume using single device from non existent my_pool
self._checkCmd("ssm create", ['--pool my_pool', '/dev/sda'],
"lvm lvcreate my_pool -l 100%PVS -n lvol001 /dev/sda")
self._cmdEq("lvm vgcreate my_pool /dev/sda", -2)
self._checkCmd("ssm create", ['-r 0', '-p my_pool', '-s 2.6T', '-I 16',
'-i 2', '/dev/sda /dev/sdb'],
"lvm lvcreate my_pool -L 2791728742.40K -n lvol001 -I 16 -i 2 /dev/sda /dev/sdb")
self._cmdEq("lvm vgcreate my_pool /dev/sda /dev/sdb", -2)
# Create volume using multiple devices
self._checkCmd("ssm create", ['/dev/sda /dev/sdc1'],
"lvm lvcreate {0} -l 100%PVS -n lvol001 /dev/sda /dev/sdc1".format(default_pool))
self._cmdEq("lvm vgcreate {0} /dev/sda /dev/sdc1".format(default_pool), -2)
# Create volume using single devices from existing pool
self._addPool(default_pool, ['/dev/sdb', '/dev/sdd'])
self._checkCmd("ssm create", ['-r 0', '-s 2.6T', '-I 16',
'-n myvolume', '/dev/sda'],
"lvm lvcreate {0} -L 2791728742.40K -n myvolume -I 16 -i 1 /dev/sda". format(default_pool))
self._cmdEq("lvm vgextend {0} /dev/sda".format(default_pool), -2)
self._addPool("my_pool", ['/dev/sdc2', '/dev/sdc3'])
self._checkCmd("ssm create", ['-r 0', '-p my_pool', '-s 2.6T', '-I 16',
'-n myvolume', '/dev/sda'],
"lvm lvcreate my_pool -L 2791728742.40K -n myvolume -I 16 -i 1 /dev/sda")
self._cmdEq("lvm vgextend my_pool /dev/sda", -2)
# Create volume using multiple devices which one of the is in already
# in the pool
self._checkCmd("ssm create", ['-n myvolume', '/dev/sda /dev/sdb'],
"lvm lvcreate {0} -l 100%PVS -n myvolume /dev/sda /dev/sdb". format(default_pool))
self._cmdEq("lvm vgextend {0} /dev/sda".format(default_pool), -2)
self._addPool("my_pool", ['/dev/sdc2', '/dev/sdc3'])
self._checkCmd("ssm create", ['-p my_pool', '-n myvolume', '/dev/sdc2 /dev/sda'],
"lvm lvcreate my_pool -l 100%PVS -n myvolume /dev/sdc2 /dev/sda")
self._cmdEq("lvm vgextend my_pool /dev/sda", -2)
self._checkCmd("ssm create", ['-n myvolume', '/dev/sda /dev/sdb /dev/sde'],
"lvm lvcreate {0} -l 100%PVS -n myvolume /dev/sda /dev/sdb /dev/sde". format(default_pool))
self._cmdEq("lvm vgextend {0} /dev/sda /dev/sde".format(default_pool), -2)
def test_lvm_remove(self):
# Generate some storage data
self._addPool('default_pool', ['/dev/sda', '/dev/sdb'])
self._addPool('my_pool', ['/dev/sdc2', '/dev/sdc3', '/dev/sdc1'])
self._addVol('vol001', 117283225, 1, 'default_pool', ['/dev/sda'])
self._addVol('vol002', 237284225, 1, 'default_pool', ['/dev/sda'])
self._addVol('vol003', 1024, 1, 'default_pool', ['/dev/sdd'])
self._addVol('vol004', 209715200, 2, 'default_pool', ['/dev/sda',
'/dev/sdb'])
# remove volume
main.main("ssm remove /dev/default_pool/vol002")
self._cmdEq("lvm lvremove /dev/default_pool/vol002")
# remove multiple volumes
main.main("ssm remove /dev/default_pool/vol002 /dev/default_pool/vol003")
self.assertEqual(self.run_data[-2], "lvm lvremove /dev/default_pool/vol002")
self._cmdEq("lvm lvremove /dev/default_pool/vol003")
# remove pool
main.main("ssm remove my_pool")
self._cmdEq("lvm vgremove my_pool")
# remove multiple pools
main.main("ssm remove my_pool default_pool")
self.assertEqual(self.run_data[-2], "lvm vgremove my_pool")
self._cmdEq("lvm vgremove default_pool")
# remove device
main.main("ssm remove /dev/sdc1")
self._cmdEq("lvm vgreduce my_pool /dev/sdc1")
# remove multiple devices
main.main("ssm remove /dev/sdc1 /dev/sdb")
self.assertEqual(self.run_data[-2], "lvm vgreduce my_pool /dev/sdc1")
self._cmdEq("lvm vgreduce default_pool /dev/sdb")
# remove combination
main.main("ssm remove /dev/sdb my_pool /dev/default_pool/vol001")
self.assertEqual(self.run_data[-3], "lvm vgreduce default_pool /dev/sdb")
self.assertEqual(self.run_data[-2], "lvm vgremove my_pool")
self._cmdEq("lvm lvremove /dev/default_pool/vol001")
# remove all
main.main("ssm remove --all")
self.assertEqual(self.run_data[-2], "lvm vgremove default_pool")
self._cmdEq("lvm vgremove my_pool")
# remove force
main.main("ssm -f remove /dev/default_pool/vol002")
self._cmdEq("lvm lvremove -f /dev/default_pool/vol002")
# remove verbose
main.main("ssm -v remove /dev/default_pool/vol002")
self._cmdEq("lvm lvremove -v /dev/default_pool/vol002")
# remove verbose + force
main.main("ssm -v -f remove /dev/default_pool/vol002")
self._cmdEq("lvm lvremove -v -f /dev/default_pool/vol002")
def test_lvm_snapshot(self):
# Generate some storage data
self._addPool('default_pool', ['/dev/sda', '/dev/sdb'])
self._addPool('my_pool', ['/dev/sdc2', '/dev/sdc3', '/dev/sdc1'])
self._addVol('vol001', 117283225, 1, 'default_pool', ['/dev/sda'])
self._addVol('vol002', 237284225, 1, 'default_pool', ['/dev/sda'],
'/mnt/mount1')
self._addVol('vol003', 1024, 1, 'default_pool', ['/dev/sdd'])
self._addVol('vol004', 209715200, 2, 'default_pool', ['/dev/sda',
'/dev/sdb'], '/mnt/mount')
# Create snapshot
self._checkCmd("ssm snapshot --name new_snap", ['/dev/default_pool/vol001'],
"lvm lvcreate --size 23456645.0K --snapshot --name new_snap /dev/default_pool/vol001")
main.SSM_DEFAULT_BACKEND = "btrfs"
self._checkCmd("ssm snapshot --name new_snap", ['/dev/default_pool/vol001'],
"lvm lvcreate --size 23456645.0K --snapshot --name new_snap /dev/default_pool/vol001")
main.SSM_DEFAULT_BACKEND = "lvm"
# Create snapshot verbose
self._checkCmd("ssm -v snapshot --name new_snap", ['/dev/default_pool/vol001'],
"lvm lvcreate -v --size 23456645.0K --snapshot --name new_snap /dev/default_pool/vol001")
# Create snapshot force
self._checkCmd("ssm -f snapshot --name new_snap", ['/dev/default_pool/vol001'],
"lvm lvcreate -f --size 23456645.0K --snapshot --name new_snap /dev/default_pool/vol001")
# Create snapshot force verbose
self._checkCmd("ssm -f -v snapshot --name new_snap", ['/dev/default_pool/vol001'],
"lvm lvcreate -v -f --size 23456645.0K --snapshot --name new_snap /dev/default_pool/vol001")
# Create snapshot with size and name specified
self._checkCmd("ssm snapshot", ['--size 1G', '--name new_snap',
'/dev/default_pool/vol001'],
"lvm lvcreate --size 1048576.0K --snapshot --name new_snap /dev/default_pool/vol001")
def test_lvm_resize(self):
# Generate some storage data
self._addPool('default_pool', ['/dev/sda', '/dev/sdb'])
self._addPool('my_pool', ['/dev/sdc2', '/dev/sdc3'])
self._addVol('vol001', 2982616, 1, 'my_pool', ['/dev/sdc2'],
'/mnt/test1')
self._addVol('vol002', 237284225, 1, 'default_pool', ['/dev/sda'])
self._addVol('vol003', 1024, 1, 'default_pool', ['/dev/sdd'])
self._addDevice('/dev/sde', 11489037516)
# Extend Volume
self._checkCmd("ssm resize", ['--size +4m', '/dev/default_pool/vol003'],
"lvm lvresize -L 5120.0k /dev/default_pool/vol003")
# Specify backend
self._checkCmd("ssm --backend lvm resize", ['--size +4m', '/dev/default_pool/vol003'],
"lvm lvresize -L 5120.0k /dev/default_pool/vol003")
main.SSM_DEFAULT_BACKEND = "btrfs"
self._checkCmd("ssm resize", ['--size +4m', '/dev/default_pool/vol003'],
"lvm lvresize -L 5120.0k /dev/default_pool/vol003")
main.SSM_DEFAULT_BACKEND = "lvm"
# Shrink volume
self._checkCmd("ssm resize", ['-s-100G', '/dev/default_pool/vol002'],
"lvm lvresize -L 132426625.0k /dev/default_pool/vol002")
# Set volume size
self._checkCmd("ssm resize", ['-s 10M', '/dev/my_pool/vol001'],
"lvm lvresize -L 10240.0k /dev/my_pool/vol001")
# Set volume and add devices
self._checkCmd("ssm resize -s 12T /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 12884901888.0k /dev/default_pool/vol003")
self.assertEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Set volume size with sufficient amount of space
self._checkCmd("ssm resize -s 10G /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 10485760.0k /dev/default_pool/vol003")
self.assertNotEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Set volume size without sufficient amount of space
self._checkCmd("ssm resize -s 10T /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 10737418240.0k /dev/default_pool/vol003")
self.assertNotEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Extend volume and add devices
self._checkCmd("ssm resize -s +11T /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 11811161088.0k /dev/default_pool/vol003")
self.assertEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Extend volume with ehough space in pool
self._checkCmd("ssm resize -s +10G /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 10486784.0k /dev/default_pool/vol003")
self.assertNotEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Extend volume without ehough space in pool
self._checkCmd("ssm resize -s +20T /dev/default_pool/vol003 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 21474837504.0k /dev/default_pool/vol003")
self.assertEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Shrink volume with devices provided
self._checkCmd("ssm resize -s-10G /dev/default_pool/vol002 /dev/sdc1 /dev/sde",
[], "lvm lvresize -L 226798465.0k /dev/default_pool/vol002")
self.assertNotEqual(self.run_data[-2],
"lvm vgextend default_pool /dev/sdc1 /dev/sde")
# Test that we do not use devices which are already used in different
# pool
self.assertRaises(Exception, main.main, "ssm resize -s +1.5T /dev/my_pool/vol001 /dev/sdb /dev/sda")
# If the device we are able to use can cover the size, then
# it will be resized successfully
self._checkCmd("ssm resize -s +1.5T /dev/my_pool/vol001 /dev/sdb /dev/sda /dev/sdc1",
[], "lvm lvresize -L 1613595352.0k /dev/my_pool/vol001")
# Test resize on inactive volume
self._addVol('vol004', 8192, 1, 'default_pool', ['/dev/sdd'], None, False)
self._checkCmd("ssm resize", ['--size +4m', '/dev/default_pool/vol004'],
"lvm lvresize -L 12288.0k /dev/default_pool/vol004")
self.assertRaises(Exception, main.main, "ssm resize -s-2m /dev/default_pool/vol004")
# We can force it though
self._checkCmd("ssm -f resize", ['-s-2m', '/dev/default_pool/vol004'],
"lvm lvresize -f -L 6144.0k /dev/default_pool/vol004")
def test_lvm_add(self):
default_pool = lvm.SSM_LVM_DEFAULT_POOL
# Adding to non existent pool
# Add device into default pool
self._checkCmd("ssm add", ['/dev/sda'],
"lvm vgcreate {0} /dev/sda".format(default_pool))
# Specify backend
self._checkCmd("ssm --backend lvm add", ['/dev/sda'],
"lvm vgcreate {0} /dev/sda".format(default_pool))
main.SSM_DEFAULT_BACKEND = "btrfs"
self._checkCmd("ssm --backend lvm add", ['/dev/sda'],
"lvm vgcreate {0} /dev/sda".format(default_pool))
main.SSM_DEFAULT_BACKEND = "lvm"
# Add more devices into default pool
self._checkCmd("ssm add", ['/dev/sda /dev/sdc1'],
"lvm vgcreate {0} /dev/sda /dev/sdc1".format(default_pool))
# Add device into defined pool
self._checkCmd("ssm add", ['-p my_pool', '/dev/sda'],
"lvm vgcreate my_pool /dev/sda")
self._checkCmd("ssm add", ['--pool my_pool', '/dev/sda'],
"lvm vgcreate my_pool /dev/sda")
# Add more devices into defined pool
self._checkCmd("ssm add", ['-p my_pool', '/dev/sda /dev/sdc1'],
"lvm vgcreate my_pool /dev/sda /dev/sdc1")
self._checkCmd("ssm add", ['--pool my_pool', '/dev/sda /dev/sdc1'],
"lvm vgcreate my_pool /dev/sda /dev/sdc1")
# Add force
self._checkCmd("ssm -f add", ['--pool my_pool', '/dev/sda'],
"lvm vgcreate -f my_pool /dev/sda")
# Add verbose
self._checkCmd("ssm -v add", ['--pool my_pool', '/dev/sda'],
"lvm vgcreate -v my_pool /dev/sda")
# Add force and verbose
self._checkCmd("ssm -v -f add", ['--pool my_pool', '/dev/sda'],
"lvm vgcreate -v -f my_pool /dev/sda")
# Adding to existing default pool
self._addPool(default_pool, ['/dev/sdb', '/dev/sdd'])
# Add device into default pool
self._checkCmd("ssm add", ['/dev/sda'],
"lvm vgextend {0} /dev/sda".format(default_pool))
# Add more devices into default pool
self._checkCmd("ssm add", ['/dev/sda /dev/sdc1'],
"lvm vgextend {0} /dev/sda /dev/sdc1".format(default_pool))
# Adding to existing defined pool
self._addPool("my_pool", ['/dev/sdc2', '/dev/sdc3'])
# Add device into defined pool
self._checkCmd("ssm add", ['-p my_pool', '/dev/sda'],
"lvm vgextend my_pool /dev/sda")
self._checkCmd("ssm add", ['--pool my_pool', '/dev/sda'],
"lvm vgextend my_pool /dev/sda")
# Add more devices into defined pool
self._checkCmd("ssm add", ['-p my_pool', '/dev/sda /dev/sdc1'],
"lvm vgextend my_pool /dev/sda /dev/sdc1")
self._checkCmd("ssm add", ['--pool my_pool', '/dev/sda /dev/sdc1'],
"lvm vgextend my_pool /dev/sda /dev/sdc1")
# Add force
self._checkCmd("ssm -f add", ['--pool my_pool', '/dev/sda'],
"lvm vgextend -f my_pool /dev/sda")
# Add verbose
self._checkCmd("ssm -v add", ['--pool my_pool', '/dev/sda'],
"lvm vgextend -v my_pool /dev/sda")
# Add force and verbose
self._checkCmd("ssm -v -f add", ['--pool my_pool', '/dev/sda'],
"lvm vgextend -v -f my_pool /dev/sda")
# Add two devices into existing pool (one of the devices already is in
# the pool
self._checkCmd("ssm add", ['--pool my_pool', '/dev/sdc2 /dev/sda'],
"lvm vgextend my_pool /dev/sda")
self._checkCmd("ssm add", ['/dev/sda /dev/sdb'],
"lvm vgextend {0} /dev/sda".format(default_pool))
def test_lvm_mount(self):
self._addDir("/mnt/test")
self._addDir("/mnt/test1")
self._addDir("/mnt/test2")
# Generate some storage data
self._addPool('default_pool', ['/dev/sda', '/dev/sdb'])
self._addPool('my_pool', ['/dev/sdc2', '/dev/sdc3', '/dev/sdc1'])
self._addVol('vol001', 117283225, 1, 'default_pool', ['/dev/sda'],
'/mnt/test1')
self._addVol('vol002', 237284225, 1, 'my_pool', ['/dev/sda'])
# Simple mount test
main.main("ssm mount /dev/default_pool/vol001 /mnt/test")
self._cmdEq("mount /dev/default_pool/vol001 /mnt/test")
main.main("ssm mount -o discard /dev/default_pool/vol001 /mnt/test")
self._cmdEq("mount -o discard /dev/default_pool/vol001 /mnt/test")
main.main("ssm mount --options rw,discard,neco=44 /dev/my_pool/vol002 /mnt/test1")
self._cmdEq("mount -o rw,discard,neco=44 /dev/my_pool/vol002 /mnt/test1")
# Non existing volume
main.main("ssm mount nonexisting /mnt/test1")
self._cmdEq("mount nonexisting /mnt/test1")
main.main("ssm mount -o discard,rw nonexisting /mnt/test1")
self._cmdEq("mount -o discard,rw nonexisting /mnt/test1")
| gpl-2.0 | -2,838,055,307,035,547,600 | 49.620225 | 108 | 0.585634 | false |
algolia/algoliasearch-django | tests/settings.py | 1 | 2390 | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'MillisecondsMatter'
DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'algoliasearch_django',
'tests'
)
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
ROOT_URLCONF = 'tests.urls'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
def safe_index_name(name):
if 'TRAVIS' not in os.environ:
return name
job = os.environ['TRAVIS_JOB_NUMBER']
return '{}_travis-{}'.format(name, job)
# AlgoliaSearch settings
ALGOLIA = {
'APPLICATION_ID': os.getenv('ALGOLIA_APPLICATION_ID'),
'API_KEY': os.getenv('ALGOLIA_API_KEY'),
'INDEX_PREFIX': 'test',
'INDEX_SUFFIX': safe_index_name('django'),
'RAISE_EXCEPTIONS': True
}
| mit | 176,922,860,896,906,560 | 25.555556 | 70 | 0.667364 | false |
kinshuk4/MoocX | misc/deep_learning_notes/Proj_Centroid_Loss_LeNet/convnet_2_deep/MNIST_train.py | 1 | 3905 | import os, sys, numpy as np, tensorflow as tf
from pathlib import Path
from termcolor import colored as c, cprint
sys.path.append(str(Path(__file__).resolve().parents[1]))
import convnet_2_deep
__package__ = 'convnet_2_deep'
from . import network
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
BATCH_SIZE = 64
FILENAME = os.path.basename(__file__)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SUMMARIES_DIR = SCRIPT_DIR
SAVE_PATH = SCRIPT_DIR + "/network.ckpt"
### configure devices for this eval script.
USE_DEVICE = '/gpu:2'
session_config = tf.ConfigProto(log_device_placement=True)
session_config.gpu_options.allow_growth = True
# this is required if want to use GPU as device.
# see: https://github.com/tensorflow/tensorflow/issues/2292
session_config.allow_soft_placement = True
if __name__ == "__main__":
with tf.Graph().as_default() as g, tf.device(USE_DEVICE):
# inference()
input, deep_feature = network.inference()
labels, logits, loss_op = network.loss(deep_feature)
train, global_step = network.training(loss_op, 1)
eval = network.evaluation(logits, labels)
init = tf.initialize_all_variables()
with tf.Session(config=session_config) as sess:
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
# to see the tensor graph, fire up the tensorboard with --logdir="./train"
all_summary = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph)
test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test')
saver = tf.train.Saver()
# try:
# saver.restore(sess, SAVE_PATH)
# except ValueError:
# print('checkpoint file not found. Moving on to initializing automatically.')
# sess.run(init)
sess.run(init)
for i in range(500000):
batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
if i % 100 == 0:
summaries, step, logits_output, loss_value, accuracy = \
sess.run(
[all_summary, global_step, logits, loss_op, eval],
feed_dict={
input: mnist.test.images,
labels: mnist.test.labels
})
test_writer.add_summary(summaries, global_step=step)
cprint(
c("#" + str(i), 'grey') +
c(" training accuracy", 'green') + " is " +
c(accuracy, 'red') + ", " +
c("loss", 'green') + " is " +
c(loss_value, 'red')
)
print('logits => ', logits_output[0])
if i % 500 == 0:
saver.save(sess, SAVE_PATH)
print('=> saved network in checkfile.')
summaries, step, _ = sess.run([all_summary, global_step, train], feed_dict={
input: batch_xs,
labels: batch_labels
})
train_writer.add_summary(summaries, global_step=step)
# now let's test!
TEST_BATCH_SIZE = np.shape(mnist.test.labels)[0]
summaries, step, logits_output, loss_value, accuracy = \
sess.run(
[all_summary, global_step, logits, loss_op, eval], feed_dict={
input: mnist.test.images,
labels: mnist.test.labels
})
test_writer.add_summary(summaries, global_step=step)
print("MNIST Test accuracy is ", accuracy)
| mit | -591,876,572,340,954,200 | 40.105263 | 97 | 0.540845 | false |
brain-research/wip-lambada-lm | lambada_lm/configs.py | 1 | 1903 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurations."""
from lambada_lm.config_registry import config_registry
config_registry.register('default', {
# Data
'read' : 'continuosly_with_extra_word',
'eval_read': 'continuosly_with_extra_word',
'num_steps' : 100,
'eval_num_steps' : 100,
# Schedule
'monitoring_frequency' : 100,
'saving_frequency' : 5000,
'max_batches_per_epoch' : 5000,
'num_epochs' : 100,
'start_annealing' : 20,
'lr_decay' : 0.8,
# Model
'init_scale' : 0.1,
'forget_bias' : 0.0,
'dim' : 128,
'architecture' : 'lstm',
'act' : 'relu',
'width' : -1,
# Optimization
'optimizer' : 'GradientDescentOptimizer',
'batch_size' : 32,
'learning_rate' : 1.0,
'lr_min': 0.000001,
'momentum' : 0.9,
'epsilon' : 1e-8,
'max_grad_norm': 5.0,
'next_worker_delay' : 1500,
})
c = config_registry['default']
c['dim'] = 512
c['read'] = 'shards_continuosly_with_bos'
c['eval_read'] = 'padded_sentences_with_bos'
c['eval_num_steps'] = 210
config_registry.register('lambada', c)
c = config_registry['lambada']
c['optimizer'] = 'AdamOptimizer'
c['learning_rate'] = 0.001
config_registry.register('lambAdam', c)
c = config_registry['lambAdam']
c['architecture'] = 'conv'
c['width'] = 5
config_registry.register('lambAdamConv', c)
| apache-2.0 | 8,457,949,790,418,944,000 | 26.985294 | 80 | 0.647924 | false |
jandebleser/django-wiki | src/wiki/conf/settings.py | 1 | 10199 | from __future__ import absolute_import, unicode_literals
import bleach
from django.conf import settings as django_settings
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
#: Should urls be case sensitive?
URL_CASE_SENSITIVE = getattr(django_settings, 'WIKI_URL_CASE_SENSITIVE', False)
# Non-configurable (at the moment)
WIKI_LANGUAGE = 'markdown'
#: The editor class to use -- maybe a 3rd party or your own...? You can always
#: extend the built-in editor and customize it!
EDITOR = getattr(
django_settings,
'WIKI_EDITOR',
'wiki.editors.markitup.MarkItUp')
#: Whether to use Bleach or not. It's not recommended to turn this off unless
#: you know what you're doing and you don't want to use the other options.
MARKDOWN_SANITIZE_HTML = getattr(
django_settings,
'WIKI_MARKDOWN_SANITIZE_HTML',
True)
#: Arguments for the Markdown instance, for instance a list of extensions to
#: use.
#: See: https://pythonhosted.org/Markdown/extensions/index.html
#:
#: To set a custom title for TOC's::
#:
#: WIKI_MARKDOWN_KWARGS = {'extension_configs': {'toc': _('Contents of this article')}}
MARKDOWN_KWARGS = {
'extensions': [
'footnotes',
'attr_list',
'smart_strong',
'footnotes',
'attr_list',
'def_list',
'tables',
'abbr',
'sane_lists',
],
'extension_configs': {
'toc': {
'title': _('Table of Contents')}},
}
MARKDOWN_KWARGS.update(getattr(django_settings, 'WIKI_MARKDOWN_KWARGS', {}))
_default_tag_whitelists = bleach.ALLOWED_TAGS + [
'figure',
'figcaption',
'br',
'hr',
'p',
'div',
'img',
'pre',
'span',
'table',
'thead',
'tbody',
'th',
'tr',
'td',
'dl',
'dt',
'dd',
] + ['h{}'.format(n) for n in range(8)]
#: List of allowed tags in Markdown article contents.
MARKDOWN_HTML_WHITELIST = _default_tag_whitelists
MARKDOWN_HTML_WHITELIST += (
getattr(
django_settings,
'WIKI_MARKDOWN_HTML_WHITELIST',
[]
)
)
_default_attribute_whitelist = bleach.ALLOWED_ATTRIBUTES
for tag in MARKDOWN_HTML_WHITELIST:
if tag not in _default_attribute_whitelist:
_default_attribute_whitelist[tag] = []
_default_attribute_whitelist[tag].append('class')
_default_attribute_whitelist[tag].append('id')
_default_attribute_whitelist['img'].append('src')
_default_attribute_whitelist['img'].append('alt')
#: Dictionary of allowed attributes in Markdown article contents.
MARKDOWN_HTML_ATTRIBUTES = _default_attribute_whitelist
MARKDOWN_HTML_ATTRIBUTES.update(
getattr(
django_settings,
'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',
{}
)
)
#: Allowed inline styles in Markdown article contents, default is no styles
#: (empty list)
MARKDOWN_HTML_STYLES = (
getattr(
django_settings,
'WIKI_MARKDOWN_HTML_STYLES',
[]
)
)
_project_defined_attrs = getattr(
django_settings,
'WIKI_MARKDOWN_HTML_ATTRIBUTE_WHITELIST',
False)
# If styles are allowed but no custom attributes are defined, we allow styles
# for all kinds of tags
if MARKDOWN_HTML_STYLES and not _project_defined_attrs:
MARKDOWN_HTML_ATTRIBUTES['*'] = 'style'
#: This slug is used in URLPath if an article has been deleted. The children of the
#: URLPath of that article are moved to lost and found. They keep their permissions
#: and all their content.
LOST_AND_FOUND_SLUG = getattr(
django_settings,
'WIKI_LOST_AND_FOUND_SLUG',
'lost-and-found')
#: When True, this blocks new slugs that resolve to non-wiki views, stopping
#: users creating articles that conflict with overlapping URLs from other apps.
CHECK_SLUG_URL_AVAILABLE = getattr(
django_settings,
'WIKI_CHECK_SLUG_URL_AVAILABLE',
True)
#: Do we want to log IPs of anonymous users?
LOG_IPS_ANONYMOUS = getattr(django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True)
#: Do we want to log IPs of logged in users?
LOG_IPS_USERS = getattr(django_settings, 'WIKI_LOG_IPS_USERS', False)
####################################
# PERMISSIONS AND ACCOUNT HANDLING #
####################################
# NB! None of these callables need to handle anonymous users as they are treated
# in separate settings...
#: A function returning True/False if a user has permission to
#: read contents of an article + plugins
#: Relevance: viewing articles and plugins
CAN_READ = getattr(django_settings, 'WIKI_CAN_READ', None)
#: A function returning True/False if a user has permission to
#: change contents, ie add new revisions to an article
#: Often, plugins also use this
#: Relevance: editing articles, changing revisions, editing plugins
CAN_WRITE = getattr(django_settings, 'WIKI_CAN_WRITE', None)
#: A function returning True/False if a user has permission to assign
#: permissions on an article
#: Relevance: changing owner and group membership
CAN_ASSIGN = getattr(django_settings, 'WIKI_CAN_ASSIGN', None)
#: A function returning True/False if the owner of an article has permission to change
#: the group to a user's own groups
#: Relevance: changing group membership
CAN_ASSIGN_OWNER = getattr(django_settings, 'WIKI_ASSIGN_OWNER', None)
#: A function returning True/False if a user has permission to change
#: read/write access for groups and others
CAN_CHANGE_PERMISSIONS = getattr(
django_settings,
'WIKI_CAN_CHANGE_PERMISSIONS',
None)
#: Specifies if a user has access to soft deletion of articles
CAN_DELETE = getattr(django_settings, 'WIKI_CAN_DELETE', None)
#: A function returning True/False if a user has permission to change
#: moderate, ie. lock articles and permanently delete content.
CAN_MODERATE = getattr(django_settings, 'WIKI_CAN_MODERATE', None)
#: A function returning True/False if a user has permission to create
#: new groups and users for the wiki.
CAN_ADMIN = getattr(django_settings, 'WIKI_CAN_ADMIN', None)
#: Treat anonymous (non logged in) users as the "other" user group
ANONYMOUS = getattr(django_settings, 'WIKI_ANONYMOUS', True)
#: Globally enable write access for anonymous users, if true anonymous users will be treated
#: as the others_write boolean field on models.Article.
ANONYMOUS_WRITE = getattr(django_settings, 'WIKI_ANONYMOUS_WRITE', False)
#: Globally enable create access for anonymous users
#: Defaults to ANONYMOUS_WRITE.
ANONYMOUS_CREATE = getattr(
django_settings,
'WIKI_ANONYMOUS_CREATE',
ANONYMOUS_WRITE)
#: Default setting to allow anonymous users upload access (used in
#: plugins.attachments and plugins.images).
ANONYMOUS_UPLOAD = getattr(django_settings, 'WIKI_ANONYMOUS_UPLOAD', False)
#: Sign up, login and logout views should be accessible
ACCOUNT_HANDLING = getattr(django_settings, 'WIKI_ACCOUNT_HANDLING', True)
#: Signup allowed? If it's not allowed, logged in superusers can still access
#: the signup page to create new users.
ACCOUNT_SIGNUP_ALLOWED = ACCOUNT_HANDLING and getattr(
django_settings, 'WIKI_ACCOUNT_SIGNUP_ALLOWED', True
)
if ACCOUNT_HANDLING:
LOGIN_URL = reverse_lazy("wiki:login")
LOGOUT_URL = reverse_lazy("wiki:logout")
SIGNUP_URL = reverse_lazy("wiki:signup")
else:
LOGIN_URL = getattr(django_settings, "LOGIN_URL", "/")
LOGOUT_URL = getattr(django_settings, "LOGOUT_URL", "/")
SIGNUP_URL = getattr(django_settings, "WIKI_SIGNUP_URL", "/")
##################
# OTHER SETTINGS #
##################
#: Maximum amount of children to display in a menu before going "+more"
#: NEVER set this to 0 as it will wrongly inform the user that there are no
#: children and for instance that an article can be safely deleted.
SHOW_MAX_CHILDREN = getattr(django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20)
#: User Bootstrap's select widget. Switch off if you're not using Bootstrap!
USE_BOOTSTRAP_SELECT_WIDGET = getattr(
django_settings,
'WIKI_USE_BOOTSTRAP_SELECT_WIDGET',
True)
#: dottedname of class used to construct urlpatterns for wiki.
#:
#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,
#: you can derive from this.
URL_CONFIG_CLASS = getattr(
django_settings,
'WIKI_URL_CONFIG_CLASS',
'wiki.urls.WikiURLPatterns')
#: Search view - dotted path denoting where the search view Class is located
SEARCH_VIEW = getattr(
django_settings,
'WIKI_SEARCH_VIEW',
'wiki.views.article.SearchView'
if 'wiki.plugins.haystack' not in django_settings.INSTALLED_APPS
else
'wiki.plugins.haystack.views.HaystackSearchView'
)
#: Seconds of timeout before renewing article cache. Articles are automatically
#: renewed whenever an edit occurs but article content may be generated from
#: other objects that are changed.
CACHE_TIMEOUT = getattr(django_settings, 'WIKI_CACHE_TIMEOUT', 600)
#: Choose the Group model to use. Defaults to django's auth.Group
GROUP_MODEL = getattr(django_settings, 'WIKI_GROUP_MODEL', 'auth.Group')
###################
# SPAM PROTECTION #
###################
#: Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_HOUR = getattr(django_settings, 'WIKI_REVISIONS_PER_HOUR', 60)
#: Maximum allowed revisions per minute for any given user or IP
REVISIONS_PER_MINUTES = getattr(
django_settings,
'WIKI_REVISIONS_PER_MINUTES',
5)
#: Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_HOUR_ANONYMOUS = getattr(
django_settings,
'WIKI_REVISIONS_PER_HOUR_ANONYMOUS',
10)
#: Maximum allowed revisions per hour for any given user or IP
REVISIONS_PER_MINUTES_ANONYMOUS = getattr(
django_settings,
'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS',
2)
#: Number of minutes for looking up REVISIONS_PER_MINUTES and
#: REVISIONS_PER_MINUTES_ANONYMOUS
REVISIONS_MINUTES_LOOKBACK = getattr(
django_settings,
'WIKI_REVISIONS_MINUTES_LOOKBACK',
2)
###########
# STORAGE #
###########
#: Django Storage backend to use for images, attachments etc.
STORAGE_BACKEND = getattr(
django_settings,
'WIKI_STORAGE_BACKEND',
default_storage)
#: Use Sendfile
USE_SENDFILE = getattr(django_settings, 'WIKI_ATTACHMENTS_USE_SENDFILE', False)
| gpl-3.0 | 8,362,595,489,785,991,000 | 31.275316 | 92 | 0.702226 | false |
astrofrog/ginga | ginga/gtkw/PluginManagerGtk.py | 1 | 12463 | #
# PluginManagerGtk.py -- Simple class to manage plugins.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import threading
import traceback
import gtk
from ginga.misc import Bunch, Future
class PluginManagerError(Exception):
pass
class PluginManager(object):
def __init__(self, logger, fitsview, ds, mm):
super(PluginManager, self).__init__()
self.logger = logger
self.fv = fitsview
self.ds = ds
self.mm = mm
self.lock = threading.RLock()
self.plugin = Bunch.caselessDict()
self.active = {}
self.focus = set([])
self.exclusive = set([])
self.focuscolor = "green"
self.hbox = None
def set_widget(self, hbox):
self.hbox = hbox
def loadPlugin(self, name, spec, chinfo=None):
try:
module = self.mm.getModule(spec.module)
className = spec.get('klass', spec.module)
klass = getattr(module, className)
if chinfo == None:
# global plug in
obj = klass(self.fv)
fitsimage = None
else:
# local plugin
fitsimage = chinfo.fitsimage
obj = klass(self.fv, fitsimage)
## # If this plugin has a GUI, add it to the Dialog pane
## vbox = None
## if hasattr(obj, 'build_gui'):
## vbox = gtk.VBox()
## obj.build_gui(vbox)
vbox = None
# Prepare configuration for module
opname = name.lower()
self.plugin[opname] = Bunch.Bunch(klass=klass, obj=obj,
widget=vbox, name=name,
spec=spec,
fitsimage=fitsimage,
chinfo=chinfo)
self.logger.info("Plugin '%s' loaded." % name)
except Exception, e:
self.logger.error("Failed to load plugin '%s': %s" % (
name, str(e)))
#raise PluginManagerError(e)
def reloadPlugin(self, plname, chinfo=None):
pInfo = self.getPluginInfo(plname)
return self.loadPlugin(pInfo.name, pInfo.spec, chinfo=chinfo)
def getPluginInfo(self, plname):
plname = plname.lower()
pInfo = self.plugin[plname]
return pInfo
def getPlugin(self, name):
pInfo = self.getPluginInfo(name)
return pInfo.obj
def getNames(self):
return self.plugin.keys()
def update_taskbar(self, localmode=True):
## with self.lock:
if localmode:
for child in self.hbox.get_children():
#self.hbox.remove(child)
child.hide()
for name in self.active.keys():
bnch = self.active[name]
#self.hbox.pack_start(bnch.widget, expand=False, fill=False)
bnch.widget.show()
def activate(self, pInfo, exclusive=True):
print "PINFO: ", pInfo
name = pInfo.tabname
lname = pInfo.name.lower()
if not self.active.has_key(lname):
tup = name.split(':')
lblname = ' ' + tup[0] + ':\n' + tup[1] + ' '
lbl = gtk.Label(lblname)
lbl.set_justify(gtk.JUSTIFY_CENTER)
self.fv.w.tooltips.set_tip(lbl,
"Right click for menu")
evbox = gtk.EventBox()
evbox.add(lbl)
fr = gtk.Frame()
fr.set_shadow_type(gtk.SHADOW_OUT)
fr.add(evbox)
#fr = evbox
fr.show_all()
self.hbox.pack_start(fr, expand=False, fill=False)
menu = gtk.Menu()
item = gtk.MenuItem("Focus")
item.show()
item.connect("activate", lambda w: self.set_focus(lname))
menu.append(item)
item = gtk.MenuItem("Stop")
item.show()
item.connect("activate", lambda w: self.deactivate(lname))
menu.append(item)
bnch = Bunch.Bunch(widget=fr, label=lbl, lblname=lblname,
evbox=evbox,
menu=menu, pInfo=pInfo, exclusive=exclusive)
self.active[lname] = bnch
if exclusive:
self.exclusive.add(lname)
evbox.connect("button_press_event", self.button_press_event,
lname)
def button_press_event(self, widget, event, name):
# event.button, event.x, event.y
bnch = self.active[name]
if event.button == 1:
return self.set_focus(name)
#return bnch.menu.popup(None, None, None, event.button, event.time)
elif event.button == 3:
return bnch.menu.popup(None, None, None, event.button, event.time)
return False
def deactivate(self, name):
print "deactivating %s" % (name)
lname = name.lower()
if lname in self.focus:
self.clear_focus(lname)
if self.active.has_key(lname):
bnch = self.active[lname]
print "stopping plugin"
self.stop_plugin(bnch.pInfo)
print "removing widget"
if bnch.widget != None:
self.hbox.remove(bnch.widget)
print "removing from dict"
del self.active[lname]
# Set focus to another plugin if one is running
active = self.active.keys()
if len(active) > 0:
name = active[0]
self.set_focus(name)
def deactivate_focused(self):
names = self.get_focus()
for name in names:
self.deactivate(name)
def get_active(self):
return self.active.keys()
def is_active(self, key):
lname = key.lower()
return lname in self.get_active()
def get_focus(self):
return list(self.focus)
def get_info(self, name):
lname = name.lower()
return self.active[lname]
def set_focus(self, name):
self.logger.info("Focusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
if bnch.exclusive:
self.logger.debug("focus=%s exclusive=%s" % (
self.focus, self.exclusive))
defocus = filter(lambda x: x in self.exclusive, self.focus)
self.logger.debug("defocus: %s" % (str(defocus)))
for xname in defocus:
self.clear_focus(xname)
pInfo = bnch.pInfo
# If this is a local plugin, raise the channel associated with the
# plug in
if pInfo.chinfo != None:
itab = pInfo.chinfo.name
self.logger.debug("raising tab %s" % (itab))
self.ds.raise_tab(itab)
pInfo.obj.resume()
self.focus.add(lname)
## bnch.label.set_markup('<span background="green">%s</span>' % (
## bnch.lblname))
bnch.evbox.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse(self.focuscolor))
if pInfo.widget != None:
self.ds.raise_tab('Dialogs')
self.ds.raise_tab(pInfo.tabname)
def clear_focus(self, name):
self.logger.debug("Unfocusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
try:
self.focus.remove(lname)
bnch.pInfo.obj.pause()
except:
pass
bnch.evbox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("grey"))
## bnch.label.set_markup('<span>%s</span>' % (bnch.lblname))
def start_plugin(self, chname, opname, alreadyOpenOk=False):
return self.start_plugin_future(chname, opname, None,
alreadyOpenOk=alreadyOpenOk)
def start_plugin_future(self, chname, opname, future,
alreadyOpenOk=False):
pInfo = self.getPluginInfo(opname)
plname = chname.upper() + ': ' + pInfo.name
lname = pInfo.name.lower()
if self.active.has_key(lname):
if alreadyOpenOk:
# TODO: raise widgets, rerun start()?
return
raise PluginManagerError("Plugin %s is already active." % (
plname))
# Raise tab with GUI
pInfo.tabname = plname
vbox = None
had_error = False
try:
if hasattr(pInfo.obj, 'build_gui'):
vbox = gtk.VBox()
if future:
pInfo.obj.build_gui(vbox, future=future)
else:
pInfo.obj.build_gui(vbox)
except Exception, e:
errstr = "Plugin UI failed to initialize: %s" % (
str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception, e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
textw = gtk.TextView()
buf = textw.get_buffer()
buf.set_text(errstr + '\n' + tb_str)
textw.set_editable(False)
vbox.pack_start(textw, fill=True, expand=True)
#raise PluginManagerError(e)
if not had_error:
try:
if future:
pInfo.obj.start(future=future)
else:
pInfo.obj.start()
except Exception, e:
had_error = True
errstr = "Plugin failed to start correctly: %s" % (
str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception, e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
textw = gtk.TextView()
buf = textw.get_buffer()
buf.set_text(errstr + '\n' + tb_str)
textw.set_editable(False)
vbox.pack_start(textw, fill=True, expand=True)
#raise PluginManagerError(e)
if vbox != None:
vbox.show_all()
nb = self.ds.get_nb('Dialogs')
self.ds.add_tab(nb, vbox, 2, pInfo.tabname, pInfo.tabname)
pInfo.widget = vbox
self.activate(pInfo)
self.set_focus(pInfo.name)
else:
# If this is a local plugin, raise the channel associated with the
# plug in
if pInfo.chinfo != None:
itab = pInfo.chinfo.name
print "raising tab %s" % itab
self.ds.raise_tab(itab)
def stop_plugin(self, pInfo):
self.logger.debug("stopping plugin %s" % (str(pInfo)))
wasError = False
try:
pInfo.obj.stop()
except Exception, e:
wasError = True
self.logger.error("Plugin failed to stop correctly: %s" % (
str(e)))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
self.logger.error("Traceback information unavailable.")
if pInfo.widget != None:
self.ds.remove_tab(pInfo.tabname)
pInfo.widget.destroy()
# If there are no more dialogs present, raise Thumbs
nb = self.ds.get_nb('Dialogs')
num_dialogs = nb.get_n_pages()
if num_dialogs == 0:
try:
self.ds.raise_tab('Thumbs')
except:
# No Thumbs tab--OK
pass
if wasError:
raise PluginManagerError(e)
#END
| bsd-3-clause | -7,718,921,470,319,167,000 | 32.412869 | 79 | 0.504774 | false |
apuigsech/CryptoAPI | CryptoAPI/CryptoAPI.py | 1 | 8197 | #!/usr/bin/env python
# CryptoAPI: Python Crypto API implementation
#
# Copyright (c) 2014 - Albert Puigsech Galicia (albert@puigsech.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from CryptsyAPI import CryptsyAPI
from BittrexAPI import BittrexAPI
class CryptoAPI_iface(object):
def balances(self, currency=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def marketstatus(self, market=None, depth_level=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def orders(self, market=None, cached=None):
raise NotImplementedError( "Method not implemented" )
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
def delorder(self, order_id=None, simulation=None):
raise NotImplementedError( "Method not implemented" )
class CryptoAPI_cryptsy(CryptsyAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_cryptsy, self).__init__(key, secret, simulation, cached)
CryptoAPI_iface.__init__(self)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
info = self.getinfo(cached)['return']
for i in info['balances_available']:
if i == currency or (currency == None and (float(info['balances_available'][i]) > 0 or info['balances_hold'].has_key(i))):
ret['available'][i] = float(info['balances_available'][i])
ret['hold'][i] = float(info['balances_hold'][i]) if info['balances_hold'].has_key(i) else float(0)
ret['total'][i] = ret['available'][i] + ret['hold'][i]
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
status = self.getmarkets(cached)['return']
ret = {}
for i in status:
marketname = '{0}-{1}'.format(i['secondary_currency_code'], i['primary_currency_code'])
if marketname == market or i['primary_currency_code'] == market or i['secondary_currency_code'] == market or market == None:
ret[marketname] = {
'id': int(i['marketid']),
'last_price': float(i['last_trade']),
'high_price': float(i['high_trade']),
'low_price': float(i['low_trade']),
'volume': float(i['current_volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.depth(i['marketid'], cached)['return']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j[0]),float(j[1])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j[0]),float(j[1])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
orders = self.allmyorders(cached)['return']
ret = []
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
if simulation == None:
simulation = self.simulation
status = self.marketstatus(market, 1)
print status
if pricetype == 'market':
price = 4294967296
elif pricetype == 'best':
if type == 'buy':
price = status[market]['depth']['sell'][0][0]
elif type == 'sell':
price = status[market]['depth']['buy'][0][0]
elif pricetype == 'border' or pricetype == 'overboder':
if type == 'buy':
price = status[market]['depth']['buy'][0][0]
elif type == 'sell':
price = status[market]['depth']['sell'][0][0]
if pricetype == 'overboder':
if type == 'buy':
price += 0.00000001
elif type == 'sell':
price -= 0.00000001
return self.createorder(status[market]['id'], type, amount, price)
def delorder(self, order_id=None, simulation=None):
return None
def _getmarketfromid(self, id):
markets = self.marketstatus(cached=True)
for marketname in markets:
if markets[marketname]['id'] == id:
return marketname
return None
def _getidfrommarket(self, market):
markets = self.marketstatus(cached=True)
if markets.has_key(market):
return markets[market]['id']
else:
return None
class CryptoAPI_bittrex(BittrexAPI, CryptoAPI_iface):
def __init__(self, key, secret, simulation=False, cached=False):
super(CryptoAPI_bittrex, self).__init__(key, secret, simulation, cached)
def balances(self, currency=None, cached=None):
if cached == None:
cached = self.cached
ret = {
'available': {},
'hold': {},
'total': {},
}
if currency==None:
info = self.getbalances(cached)['result']
else:
pass
info = [self.getbalance(currency, cached)['result']]
for i in info:
ret['available'][i['Currency']] = float(i['Available'])
ret['hold'][i['Currency']] = float(i['Pending'])
ret['total'][i['Currency']] = float(i['Balance'])
return ret
def marketstatus(self, market=None, depth_level=None, cached=None):
if cached == None:
cached = self.cached
ret = {}
status = self.getmarkets(cached)['result']
status = self.getmarketsummaries(cached)['result']
for i in status:
marketname = i['MarketName']
#if marketname == market or market == i['BaseCurrency'] or market == i['MarketCurrency'] or market == None:
if marketname == market or market in marketname or market == None:
if i['Volume'] == None:
i['Volume'] = 0
ret[marketname] = {
'id': marketname,
'last_price': float(i['Last']),
'high_price': float(str(i['High'])), # FIX a bug on Bittrex data returned
'low_price': float(i['Low']),
'volume': float(i['Volume']),
'depth': None
}
if depth_level != None and depth_level > 0:
depth = self.getorderbook(marketname, 'both', depth_level, cached)['result']
ret[marketname]['depth'] = {
'buy': [],
'sell': [],
}
for j in depth['buy'][0:depth_level]:
ret[marketname]['depth']['buy'].append([float(j['Rate']),float(j['Quantity'])])
for j in depth['sell'][0:depth_level]:
ret[marketname]['depth']['sell'].append([float(j['Rate']),float(j['Quantity'])])
return ret
def orders(self, market=None, cached=None):
if cached == None:
cached = self.cached
ret = []
orders = self.getopenorders(market, cached)['return']
for i in orders:
marketname = self._getmarketfromid(i['marketid'])
ret.append({
'id': int(i['orderid']),
'market': 'TBD',
'price': i['price'],
'amount': i['orig_quantity'],
'remaining_amount': i['quantity'],
})
return ret
pass
def putorder(self, market, type, pricetype, amount, price=None, simulation=None):
pass
def delorder(self, order_id=None, simulation=None):
pass
def CryptoAPI(type, key, secret, simulation=False, cached=False):
# TODO Security: type validation
code = 'CryptoAPI_{0}(key, secret, simulation, cached)'.format(type)
api = eval(code)
return api | gpl-3.0 | 8,747,078,000,745,754,000 | 27.866197 | 127 | 0.654508 | false |
justas-/pyledbat | pyledbat/ledbat/baseledbat.py | 1 | 6716 | """
Copyright 2017, J. Poderys, Technical University of Denmark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This is a base implementation of LEDBAT following the [RFC6817] for LEDBAT
specification. This file is not enough on its own, and must be extended to
gate the sending. An example of such extending is provided by simpleledbat
implementation and in the test application.
"""
import time
import datetime
import math
import logging
class BaseLedbat(object):
"""Base class with constante defined"""
CURRENT_FILTER = 8 # Number of elements in current delay filter
BASE_HISTORY = 10 # Number of elements in base delay history
INIT_CWND = 2 # Number of MSSes in initial cwnd value
MSS = 1500 # Maximum segment size
TARGET = 50 # Target in milliseconds. Per [RFC6817] must be <= 100ms
GAIN = 1 # Congestion window to delay response rate
ALLOWED_INCREASE = 1
MIN_CWND = 2
def __init__(self, **kwargs):
"""Initialize the instance"""
self._current_delays = BaseLedbat.CURRENT_FILTER * [1000000]
self._base_delays = BaseLedbat.BASE_HISTORY * [float('inf')]
self._flightsize = 0
self._cwnd = BaseLedbat.INIT_CWND * BaseLedbat.MSS # Congestion window
self._last_rollover = time.time() # Time last base-delay rollover occured
self._cto = 1 # Congestion timeout (seconds)
self._queuing_delay = 0
self._rtt = None # Round Trip Time
self._last_data_loss = 0 # When was latest dataloss event observed
self._last_ack_received = None # When was the last ACK received
# Change defaults if given:
for key, value in kwargs.items():
if key == 'set_current_filter':
BaseLedbat.CURRENT_FILTER = value
elif key == 'set_base_history':
BaseLedbat.BASE_HISTORY = value
elif key == 'set_init_cwnd':
BaseLedbat.INIT_CWND = value
elif key == 'set_mss':
BaseLedbat.MSS = value
elif key == 'set_target':
BaseLedbat.TARGET = value
elif key == 'set_gain':
BaseLedbat.GAIN = value
elif key == 'set_allowed_increase':
BaseLedbat.ALLOWED_INCREASE = value
elif key == 'set_min_cwnd':
BaseLedbat.MIN_CWND = value
else:
# Fall through option so logging is not done
continue
logging.info('LEDBAT parameter changed: %s => %s', key, value)
def _ack_received(self, bytes_acked, ow_delays, rtt_delays):
"""Parse the received delay sample(s)
delays is milliseconds, rt_measurements in seconds!
"""
# Update time of last ACK
self._last_ack_received = time.time()
# Process all received delay samples
for delay_sample in ow_delays:
self._update_base_delay(delay_sample)
self._update_current_delay(delay_sample)
# Update values
self._queuing_delay = self._filter_alg(self._current_delays) - min(self._base_delays)
off_target = (BaseLedbat.TARGET - self._queuing_delay) / BaseLedbat.TARGET
self._cwnd += int(BaseLedbat.GAIN * off_target * bytes_acked * BaseLedbat.MSS / self._cwnd)
max_allowed_cwnd = self._flightsize + BaseLedbat.ALLOWED_INCREASE * BaseLedbat.MSS
self._cwnd = min([self._cwnd, max_allowed_cwnd])
self._cwnd = max([self._cwnd, BaseLedbat.MIN_CWND * BaseLedbat.MSS])
self._flightsize = max([0, self._flightsize - bytes_acked])
self._update_cto(rtt_delays)
def data_loss(self, will_retransmit=True, loss_size=None):
"""Reduce cwnd if data loss is experienced"""
# Get the current time
t_now = time.time()
if loss_size is None:
loss_size = BaseLedbat.MSS
# Prevent calling too often
if self._last_data_loss != 0:
if t_now - self._last_data_loss < self._rtt:
# At most once per RTT
return
# Save time when last dataloss event happened
self._last_data_loss = t_now
# Reduce the congestion window size
self._cwnd = min([
self._cwnd,
int(max([self._cwnd / 2, BaseLedbat.MIN_CWND * BaseLedbat.MSS]))
])
# Account for data in-flight
if not will_retransmit:
self._flightsize = self._flightsize - loss_size
def _no_ack_in_cto(self):
"""Update CWND if no ACK was received in CTO"""
self._cwnd = 1 * BaseLedbat.MSS
self._cto = 2 * self._cto
def _update_cto(self, rtt_values):
"""Calculate congestion timeout (CTO)"""
pass
def _filter_alg(self, filter_data):
"""Implements FILTER() algorithm"""
# Implemented per [RFC6817] MIN filter over a small window
# multiplied by -1 to get latest window_size values
window_size = -1 * math.ceil(self.BASE_HISTORY/4)
return min(filter_data[window_size:])
def _update_base_delay(self, delay):
"""Update value in base_delay tracker list"""
t_now = time.time()
# Implemented per [RFC6817]
minute_now = datetime.datetime.fromtimestamp(t_now).minute
minute_then = datetime.datetime.fromtimestamp(self._last_rollover).minute
if minute_now != minute_then:
# Shift value at next minute
self._last_rollover = t_now
self._base_delays = self._base_delays[1:]
self._base_delays.append(delay)
else:
# For each measurements during the same minute keep minimum value
# at the end of the list
self._base_delays[-1] = min([self._base_delays[-1], delay])
def _update_current_delay(self, delay):
"""Add new value to the current delays list"""
# Implemented per [RFC6817]
self._current_delays = self._current_delays[1:]
self._current_delays.append(delay)
| apache-2.0 | 1,560,566,274,252,504,300 | 38.274854 | 99 | 0.599315 | false |
fountainhead-gq/DjangoBlog | blog/models.py | 1 | 5201 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from redactor.fields import RedactorField
classify = {
'L': u'life',
'E': u'essay',
'T': u'tech',
}
class TimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Author(models.Model):
user = models.ForeignKey(User, related_name='author')
avatar = models.ImageField(upload_to='gallery/avatar/%Y/%m/%d',
null=True,
blank=True,
help_text="Upload your photo for Avatar")
about = models.TextField(blank=True, null=True)
website = models.URLField(max_length=200, blank=True, null=True)
# website = models.URLField(max_length=200, blank=True, null=True)
def __str__(self):
return self.user.username
def get_absolute_url(self):
return reverse('author_posts_page',
kwargs={'username': self.user.username})
class Meta:
verbose_name = 'Detail Author'
verbose_name_plural = 'Authors'
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=200, unique=True)
classify = models.CharField(max_length=5, choices=classify.items(), verbose_name=u'classify')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Detail Category'
verbose_name_plural = 'Category'
class Tag(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
def __str__(self):
return self.title
@property
def get_total_posts(self):
return Post.objects.filter(tags__pk=self.pk).count()
class Meta:
verbose_name = 'Detail Tag'
verbose_name_plural = 'Tags'
class PostQuerySet(models.QuerySet):
def published(self):
return self.filter(publish=True)
class Post(TimeStampedModel):
author = models.ForeignKey(Author, related_name='author_post')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
cover = models.ImageField(upload_to='gallery/covers/%Y/%m/%d',
null=True,
blank=True,
help_text='Optional cover post')
description = models.TextField()
# description = RedactorField()
category = models.ForeignKey(Category)
tags = models.ManyToManyField('Tag')
keywords = models.CharField(max_length=200, null=True, blank=True,
help_text='Keywords sparate by comma.')
meta_description = models.TextField(null=True, blank=True)
publish = models.BooleanField(default=True)
objects = PostQuerySet.as_manager()
views = models.PositiveIntegerField(default=0)
def increase_views(self):
self.views +=1
self.save(update_fields=['views'])
def get_absolute_url(self):
return reverse('detail_post_page', kwargs={'slug': self.slug})
@property
def total_visitors(self):
return Visitor.objects.filter(post__pk=self.pk).count()
def __str__(self):
return self.title
class Meta:
verbose_name = 'Detail Post'
verbose_name_plural = 'Posts'
ordering = ["-created"]
class Page(TimeStampedModel):
author = models.ForeignKey(Author, related_name='author_page')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
description = RedactorField()
publish = models.BooleanField(default=True)
def __str__(self):
return self.title
# this will be an error in /admin
# def get_absolute_url(self):
# return reverse("page_detail", kwargs={"slug": self.slug})
class Meta:
verbose_name = "Detail Page"
verbose_name_plural = "Pages"
ordering = ["-created"]
class Gallery(TimeStampedModel):
title = models.CharField(max_length=200)
attachment = models.FileField(upload_to='gallery/attachment/%Y/%m/%d')
def __str__(self):
return self.title
def check_if_image(self):
if self.attachment.name.split('.')[-1].lower() \
in ['jpg', 'jpeg', 'gif', 'png']:
return ('<img height="40" width="60" src="%s"/>' % self.attachment.url)
return ('<img height="40" width="60" src="/static/assets/icons/file-icon.png"/>')
check_if_image.short_description = 'Attachment'
check_if_image.allow_tags = True
class Meta:
verbose_name = 'Detail Gallery'
verbose_name_plural = 'Galleries'
ordering = ['-created']
class Visitor(TimeStampedModel):
post = models.ForeignKey(Post, related_name='post_visitor')
ip = models.CharField(max_length=40)
def __str__(self):
return self.post.title
class Meta:
verbose_name = 'Detail Visitor'
verbose_name_plural = 'Visitors'
ordering = ['-created']
| gpl-2.0 | 2,796,394,287,783,963,600 | 29.594118 | 97 | 0.622188 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit_sku.py | 1 | 1525 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium'
:type tier: str or
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, name=None, tier=None, family=None):
super(ExpressRouteCircuitSku, self).__init__()
self.name = name
self.tier = tier
self.family = family
| mit | -7,039,590,627,522,585,000 | 37.125 | 79 | 0.602623 | false |
basho/riak-python-client | riak/tests/test_misc.py | 1 | 1659 | # Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class MiscTests(unittest.TestCase):
def test_timeout_validation(self):
from riak.client.operations import _validate_timeout
# valid cases
try:
_validate_timeout(None)
_validate_timeout(None, infinity_ok=True)
_validate_timeout('infinity', infinity_ok=True)
_validate_timeout(1234)
_validate_timeout(1234567898765432123456789)
except ValueError:
self.fail('_validate_timeout() unexpectedly raised ValueError')
# invalid cases
with self.assertRaises(ValueError):
_validate_timeout('infinity')
with self.assertRaises(ValueError):
_validate_timeout('infinity-foo')
with self.assertRaises(ValueError):
_validate_timeout('foobarbaz')
with self.assertRaises(ValueError):
_validate_timeout('1234')
with self.assertRaises(ValueError):
_validate_timeout(0)
with self.assertRaises(ValueError):
_validate_timeout(12.34)
| apache-2.0 | 4,406,450,344,882,683,400 | 38.5 | 75 | 0.675105 | false |
depet/scikit-learn | sklearn/utils/tests/test_extmath.py | 1 | 12828 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises, assert_raise_message
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import logistic_sigmoid
from sklearn.utils.extmath import fast_dot
from sklearn.utils.validation import NonBLASDotWarning
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
"""Check correctness and robustness of logistic sigmoid implementation"""
naive_logsig = lambda x: 1 / (1 + np.exp(-x))
naive_log_logsig = lambda x: np.log(naive_logsig(x))
# Simulate the previous Cython implementations of logistic_sigmoid based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
def stable_logsig(x):
out = np.zeros_like(x)
positive = x > 0
negative = x <= 0
out[positive] = 1. / (1 + np.exp(-x[positive]))
out[negative] = np.exp(x[negative]) / (1. + np.exp(x[negative]))
return out
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(logistic_sigmoid(x), naive_logsig(x))
assert_array_almost_equal(logistic_sigmoid(x, log=True),
naive_log_logsig(x))
assert_array_almost_equal(logistic_sigmoid(x), stable_logsig(x),
decimal=16)
extreme_x = np.array([-100, 100], dtype=np.float)
assert_array_almost_equal(logistic_sigmoid(extreme_x), [0, 1])
assert_array_almost_equal(logistic_sigmoid(extreme_x, log=True), [-100, 0])
assert_array_almost_equal(logistic_sigmoid(extreme_x),
stable_logsig(extreme_x),
decimal=16)
def test_fast_dot():
"""Check fast dot blas wrapper function"""
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas('gemm')
has_blas = True
except:
has_blas = False
if has_blas:
# test dispatch to np.dot
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NonBLASDotWarning)
# maltyped data
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
fast_dot(A.astype(dt1), B.astype(dt2).T)
assert_true(type(w.pop(-1)) == NonBLASDotWarning)
# malformed data
# ndim == 0
E = np.empty(0)
fast_dot(E, E)
assert_true(type(w.pop(-1)) == NonBLASDotWarning)
## ndim == 1
fast_dot(A, A[0])
assert_true(type(w.pop(-1)) == NonBLASDotWarning)
## ndim > 2
fast_dot(A.T, np.array([A, A]))
assert_true(type(w.pop(-1)) == NonBLASDotWarning)
## min(shape) == 1
fast_dot(A, A[0, :][None, :])
assert_true(type(w.pop(-1)) == NonBLASDotWarning)
# test for matrix mismatch error
msg = ('Invalid array shapes: A.shape[%d] should be the same as '
'B.shape[0]. Got A.shape=%r B.shape=%r' % (A.ndim - 1,
A.shape, A.shape))
assert_raise_message(msg, fast_dot, A, A)
# test cov-like use case + dtypes
my_assert = assert_array_almost_equal
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
my_assert(C, C_)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
my_assert(C, C_)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
my_assert(C, C_)
# test square matrix * rectangular use case
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
my_assert(C, C_)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
my_assert(C, C_)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, fast_dot, x, x.T)
| bsd-3-clause | -6,668,223,828,831,916,000 | 33.858696 | 79 | 0.589258 | false |
direvus/ansible | test/runner/lib/executor.py | 1 | 50644 | """Execute Ansible tests."""
from __future__ import absolute_import, print_function
import json
import os
import collections
import datetime
import re
import tempfile
import time
import textwrap
import functools
import pipes
import sys
import hashlib
import lib.pytar
import lib.thread
from lib.core_ci import (
AnsibleCoreCI,
SshKey,
)
from lib.manage_ci import (
ManageWindowsCI,
ManageNetworkCI,
)
from lib.cloud import (
cloud_filter,
cloud_init,
get_cloud_environment,
get_cloud_platforms,
)
from lib.util import (
ApplicationWarning,
ApplicationError,
SubprocessError,
display,
run_command,
intercept_command,
remove_tree,
make_dirs,
is_shippable,
is_binary_file,
find_executable,
raw_command,
get_coverage_path,
get_available_port,
generate_pip_command,
find_python,
get_docker_completion,
)
from lib.docker_util import (
docker_pull,
docker_run,
get_docker_container_id,
get_docker_container_ip,
)
from lib.ansible_util import (
ansible_environment,
)
from lib.target import (
IntegrationTarget,
walk_external_targets,
walk_internal_targets,
walk_posix_integration_targets,
walk_network_integration_targets,
walk_windows_integration_targets,
walk_units_targets,
)
from lib.changes import (
ShippableChanges,
LocalChanges,
)
from lib.git import (
Git,
)
from lib.classification import (
categorize_changes,
)
from lib.config import (
TestConfig,
EnvironmentConfig,
IntegrationConfig,
NetworkIntegrationConfig,
PosixIntegrationConfig,
ShellConfig,
UnitsConfig,
WindowsIntegrationConfig,
)
from lib.metadata import (
ChangeDescription,
)
SUPPORTED_PYTHON_VERSIONS = (
'2.6',
'2.7',
'3.5',
'3.6',
'3.7',
)
HTTPTESTER_HOSTS = (
'ansible.http.tests',
'sni1.ansible.http.tests',
'fail.ansible.http.tests',
)
def check_startup():
"""Checks to perform at startup before running commands."""
check_legacy_modules()
def check_legacy_modules():
"""Detect conflicts with legacy core/extras module directories to avoid problems later."""
for directory in 'core', 'extras':
path = 'lib/ansible/modules/%s' % directory
for root, _, file_names in os.walk(path):
if file_names:
# the directory shouldn't exist, but if it does, it must contain no files
raise ApplicationError('Files prohibited in "%s". '
'These are most likely legacy modules from version 2.2 or earlier.' % root)
def create_shell_command(command):
"""
:type command: list[str]
:rtype: list[str]
"""
optional_vars = (
'TERM',
)
cmd = ['/usr/bin/env']
cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ]
cmd += command
return cmd
def install_command_requirements(args, python_version=None):
"""
:type args: EnvironmentConfig
:type python_version: str | None
"""
generate_egg_info(args)
if not args.requirements:
return
if isinstance(args, ShellConfig):
return
packages = []
if isinstance(args, TestConfig):
if args.coverage:
packages.append('coverage')
if args.junit:
packages.append('junit-xml')
if not python_version:
python_version = args.python_version
pip = generate_pip_command(find_python(python_version))
commands = [generate_pip_install(pip, args.command, packages=packages)]
if isinstance(args, IntegrationConfig):
for cloud_platform in get_cloud_platforms(args):
commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform)))
commands = [cmd for cmd in commands if cmd]
# only look for changes when more than one requirements file is needed
detect_pip_changes = len(commands) > 1
# first pass to install requirements, changes expected unless environment is already set up
changes = run_pip_commands(args, pip, commands, detect_pip_changes)
if not changes:
return # no changes means we can stop early
# second pass to check for conflicts in requirements, changes are not expected here
changes = run_pip_commands(args, pip, commands, detect_pip_changes)
if not changes:
return # no changes means no conflicts
raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' %
'\n'.join((' '.join(pipes.quote(c) for c in cmd) for cmd in changes)))
def run_pip_commands(args, pip, commands, detect_pip_changes=False):
"""
:type args: EnvironmentConfig
:type pip: list[str]
:type commands: list[list[str]]
:type detect_pip_changes: bool
:rtype: list[list[str]]
"""
changes = []
after_list = pip_list(args, pip) if detect_pip_changes else None
for cmd in commands:
if not cmd:
continue
before_list = after_list
try:
run_command(args, cmd)
except SubprocessError as ex:
if ex.status != 2:
raise
# If pip is too old it won't understand the arguments we passed in, so we'll need to upgrade it.
# Installing "coverage" on ubuntu 16.04 fails with the error:
# AttributeError: 'Requirement' object has no attribute 'project_name'
# See: https://bugs.launchpad.net/ubuntu/xenial/+source/python-pip/+bug/1626258
# Upgrading pip works around the issue.
run_command(args, pip + ['install', '--upgrade', 'pip'])
run_command(args, cmd)
after_list = pip_list(args, pip) if detect_pip_changes else None
if before_list != after_list:
changes.append(cmd)
return changes
def pip_list(args, pip):
"""
:type args: EnvironmentConfig
:type pip: list[str]
:rtype: str
"""
stdout, _ = run_command(args, pip + ['list'], capture=True)
return stdout
def generate_egg_info(args):
"""
:type args: EnvironmentConfig
"""
if os.path.isdir('lib/ansible.egg-info'):
return
run_command(args, [args.python_executable, 'setup.py', 'egg_info'], capture=args.verbosity < 3)
def generate_pip_install(pip, command, packages=None):
"""
:type pip: list[str]
:type command: str
:type packages: list[str] | None
:rtype: list[str] | None
"""
constraints = 'test/runner/requirements/constraints.txt'
requirements = 'test/runner/requirements/%s.txt' % command
options = []
if os.path.exists(requirements) and os.path.getsize(requirements):
options += ['-r', requirements]
if packages:
options += packages
if not options:
return None
return pip + ['install', '--disable-pip-version-check', '-c', constraints] + options
def command_shell(args):
"""
:type args: ShellConfig
"""
if args.delegate:
raise Delegate()
install_command_requirements(args)
if args.inject_httptester:
inject_httptester(args)
cmd = create_shell_command(['bash', '-i'])
run_command(args, cmd)
def command_posix_integration(args):
"""
:type args: PosixIntegrationConfig
"""
all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
internal_targets = command_integration_filter(args, all_targets)
command_integration_filtered(args, internal_targets, all_targets)
def command_network_integration(args):
"""
:type args: NetworkIntegrationConfig
"""
default_filename = 'test/integration/inventory.networking'
if args.inventory:
filename = os.path.join('test/integration', args.inventory)
else:
filename = default_filename
if not args.explain and not args.platform and not os.path.exists(filename):
if args.inventory:
filename = os.path.abspath(filename)
raise ApplicationError(
'Inventory not found: %s\n'
'Use --inventory to specify the inventory path.\n'
'Use --platform to provision resources and generate an inventory file.\n'
'See also inventory template: %s.template' % (filename, default_filename)
)
all_targets = tuple(walk_network_integration_targets(include_hidden=True))
internal_targets = command_integration_filter(args, all_targets, init_callback=network_init)
instances = [] # type: list [lib.thread.WrappedThread]
if args.platform:
get_coverage_path(args) # initialize before starting threads
configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
for platform_version in args.platform:
platform, version = platform_version.split('/', 1)
config = configs.get(platform_version)
if not config:
continue
instance = lib.thread.WrappedThread(functools.partial(network_run, args, platform, version, config))
instance.daemon = True
instance.start()
instances.append(instance)
while any(instance.is_alive() for instance in instances):
time.sleep(1)
remotes = [instance.wait_for_result() for instance in instances]
inventory = network_inventory(remotes)
display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3)
if not args.explain:
with open(filename, 'w') as inventory_fd:
inventory_fd.write(inventory)
success = False
try:
command_integration_filtered(args, internal_targets, all_targets)
success = True
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
for instance in instances:
instance.result.stop()
def network_init(args, internal_targets):
"""
:type args: NetworkIntegrationConfig
:type internal_targets: tuple[IntegrationTarget]
"""
if not args.platform:
return
if args.metadata.instance_config is not None:
return
platform_targets = set(a for t in internal_targets for a in t.aliases if a.startswith('network/'))
instances = [] # type: list [lib.thread.WrappedThread]
# generate an ssh key (if needed) up front once, instead of for each instance
SshKey(args)
for platform_version in args.platform:
platform, version = platform_version.split('/', 1)
platform_target = 'network/%s/' % platform
if platform_target not in platform_targets:
display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
platform_version, platform))
continue
instance = lib.thread.WrappedThread(functools.partial(network_start, args, platform, version))
instance.daemon = True
instance.start()
instances.append(instance)
while any(instance.is_alive() for instance in instances):
time.sleep(1)
args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
def network_start(args, platform, version):
"""
:type args: NetworkIntegrationConfig
:type platform: str
:type version: str
:rtype: AnsibleCoreCI
"""
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
core_ci.start()
return core_ci.save()
def network_run(args, platform, version, config):
"""
:type args: NetworkIntegrationConfig
:type platform: str
:type version: str
:type config: dict[str, str]
:rtype: AnsibleCoreCI
"""
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False)
core_ci.load(config)
core_ci.wait()
manage = ManageNetworkCI(core_ci)
manage.wait()
return core_ci
def network_inventory(remotes):
"""
:type remotes: list[AnsibleCoreCI]
:rtype: str
"""
groups = dict([(remote.platform, []) for remote in remotes])
net = []
for remote in remotes:
options = dict(
ansible_host=remote.connection.hostname,
ansible_user=remote.connection.username,
ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key),
ansible_network_os=remote.platform,
ansible_connection='local'
)
groups[remote.platform].append(
'%s %s' % (
remote.name.replace('.', '-'),
' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
)
)
net.append(remote.platform)
groups['net:children'] = net
template = ''
for group in groups:
hosts = '\n'.join(groups[group])
template += textwrap.dedent("""
[%s]
%s
""") % (group, hosts)
inventory = template
return inventory
def command_windows_integration(args):
"""
:type args: WindowsIntegrationConfig
"""
filename = 'test/integration/inventory.winrm'
if not args.explain and not args.windows and not os.path.isfile(filename):
raise ApplicationError('Use the --windows option or provide an inventory file (see %s.template).' % filename)
all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
instances = [] # type: list [lib.thread.WrappedThread]
if args.windows:
get_coverage_path(args) # initialize before starting threads
configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
for version in args.windows:
config = configs['windows/%s' % version]
instance = lib.thread.WrappedThread(functools.partial(windows_run, args, version, config))
instance.daemon = True
instance.start()
instances.append(instance)
while any(instance.is_alive() for instance in instances):
time.sleep(1)
remotes = [instance.wait_for_result() for instance in instances]
inventory = windows_inventory(remotes)
display.info('>>> Inventory: %s\n%s' % (filename, inventory.strip()), verbosity=3)
if not args.explain:
with open(filename, 'w') as inventory_fd:
inventory_fd.write(inventory)
success = False
try:
command_integration_filtered(args, internal_targets, all_targets)
success = True
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
for instance in instances:
instance.result.stop()
# noinspection PyUnusedLocal
def windows_init(args, internal_targets): # pylint: disable=locally-disabled, unused-argument
"""
:type args: WindowsIntegrationConfig
:type internal_targets: tuple[IntegrationTarget]
"""
if not args.windows:
return
if args.metadata.instance_config is not None:
return
instances = [] # type: list [lib.thread.WrappedThread]
for version in args.windows:
instance = lib.thread.WrappedThread(functools.partial(windows_start, args, version))
instance.daemon = True
instance.start()
instances.append(instance)
while any(instance.is_alive() for instance in instances):
time.sleep(1)
args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
def windows_start(args, version):
"""
:type args: WindowsIntegrationConfig
:type version: str
:rtype: AnsibleCoreCI
"""
core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider)
core_ci.start()
return core_ci.save()
def windows_run(args, version, config):
"""
:type args: WindowsIntegrationConfig
:type version: str
:type config: dict[str, str]
:rtype: AnsibleCoreCI
"""
core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False)
core_ci.load(config)
core_ci.wait()
manage = ManageWindowsCI(core_ci)
manage.wait()
return core_ci
def windows_inventory(remotes):
"""
:type remotes: list[AnsibleCoreCI]
:rtype: str
"""
hosts = []
for remote in remotes:
options = dict(
ansible_host=remote.connection.hostname,
ansible_user=remote.connection.username,
ansible_password=remote.connection.password,
ansible_port=remote.connection.port,
)
hosts.append(
'%s %s' % (
remote.name.replace('/', '_'),
' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
)
)
template = """
[windows]
%s
[windows:vars]
ansible_connection=winrm
ansible_winrm_server_cert_validation=ignore
# support winrm connection tests (temporary solution, does not support testing enable/disable of pipelining)
[winrm:children]
windows
# support winrm binary module tests (temporary solution)
[testhost_binary_modules:children]
windows
"""
template = textwrap.dedent(template)
inventory = template % ('\n'.join(hosts))
return inventory
def command_integration_filter(args, targets, init_callback=None):
"""
:type args: IntegrationConfig
:type targets: collections.Iterable[IntegrationTarget]
:type init_callback: (IntegrationConfig, tuple[IntegrationTarget]) -> None
:rtype: tuple[IntegrationTarget]
"""
targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
changes = get_changes_filter(args)
require = (args.require or []) + changes
exclude = (args.exclude or [])
internal_targets = walk_internal_targets(targets, args.include, exclude, require)
environment_exclude = get_integration_filter(args, internal_targets)
environment_exclude += cloud_filter(args, internal_targets)
if environment_exclude:
exclude += environment_exclude
internal_targets = walk_internal_targets(targets, args.include, exclude, require)
if not internal_targets:
raise AllTargetsSkipped()
if args.start_at and not any(t.name == args.start_at for t in internal_targets):
raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
if init_callback:
init_callback(args, internal_targets)
cloud_init(args, internal_targets)
if args.delegate:
raise Delegate(require=changes, exclude=exclude, integration_targets=internal_targets)
install_command_requirements(args)
return internal_targets
def command_integration_filtered(args, targets, all_targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:type all_targets: tuple[IntegrationTarget]
"""
found = False
passed = []
failed = []
targets_iter = iter(targets)
all_targets_dict = dict((target.name, target) for target in all_targets)
setup_errors = []
setup_targets_executed = set()
for target in all_targets:
for setup_target in target.setup_once + target.setup_always:
if setup_target not in all_targets_dict:
setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
if setup_errors:
raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
test_dir = os.path.expanduser('~/ansible_testing')
if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
max_tries = 20
display.info('SSH service required for tests. Checking to make sure we can connect.')
for i in range(1, max_tries + 1):
try:
run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
display.info('SSH service responded.')
break
except SubprocessError:
if i == max_tries:
raise
seconds = 3
display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
time.sleep(seconds)
if args.inject_httptester:
inject_httptester(args)
start_at_task = args.start_at_task
results = {}
for target in targets_iter:
if args.start_at and not found:
found = target.name == args.start_at
if not found:
continue
if args.list_targets:
print(target.name)
continue
tries = 2 if args.retry_on_error else 1
verbosity = args.verbosity
cloud_environment = get_cloud_environment(args, target)
original_environment = EnvironmentDescription(args)
display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)
try:
while tries:
tries -= 1
try:
if cloud_environment:
cloud_environment.setup_once()
run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, False)
start_time = time.time()
run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, True)
if not args.explain:
# create a fresh test directory for each test target
remove_tree(test_dir)
make_dirs(test_dir)
if target.script_path:
command_integration_script(args, target)
else:
command_integration_role(args, target, start_at_task)
start_at_task = None
end_time = time.time()
results[target.name] = dict(
name=target.name,
type=target.type,
aliases=target.aliases,
modules=target.modules,
run_time_seconds=int(end_time - start_time),
setup_once=target.setup_once,
setup_always=target.setup_always,
coverage=args.coverage,
coverage_label=args.coverage_label,
python_version=args.python_version,
)
break
except SubprocessError:
if cloud_environment:
cloud_environment.on_failure(target, tries)
if not original_environment.validate(target.name, throw=False):
raise
if not tries:
raise
display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
display.verbosity = args.verbosity = 6
start_time = time.time()
original_environment.validate(target.name, throw=True)
end_time = time.time()
results[target.name]['validation_seconds'] = int(end_time - start_time)
passed.append(target)
except Exception as ex:
failed.append(target)
if args.continue_on_error:
display.error(ex)
continue
display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
next_target = next(targets_iter, None)
if next_target:
display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
raise
finally:
display.verbosity = args.verbosity = verbosity
if not args.explain:
results_path = 'test/results/data/%s-%s.json' % (args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
data = dict(
targets=results,
)
with open(results_path, 'w') as results_fd:
results_fd.write(json.dumps(data, sort_keys=True, indent=4))
if failed:
raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
def start_httptester(args):
"""
:type args: EnvironmentConfig
:rtype: str, list[str]
"""
# map ports from remote -> localhost -> container
# passing through localhost is only used when ansible-test is not already running inside a docker container
ports = [
dict(
remote=8080,
container=80,
),
dict(
remote=8443,
container=443,
),
]
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
else:
for item in ports:
item['localhost'] = get_available_port()
docker_pull(args, args.httptester)
httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
if container_id:
container_host = get_docker_container_ip(args, httptester_id)
display.info('Found httptester container address: %s' % container_host, verbosity=1)
else:
container_host = 'localhost'
ssh_options = []
for port in ports:
ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
return httptester_id, ssh_options
def run_httptester(args, ports=None):
"""
:type args: EnvironmentConfig
:type ports: dict[int, int] | None
:rtype: str
"""
options = [
'--detach',
]
if ports:
for localhost_port, container_port in ports.items():
options += ['-p', '%d:%d' % (localhost_port, container_port)]
httptester_id, _ = docker_run(args, args.httptester, options=options)
if args.explain:
httptester_id = 'httptester_id'
else:
httptester_id = httptester_id.strip()
return httptester_id
def inject_httptester(args):
"""
:type args: CommonConfig
"""
comment = ' # ansible-test httptester\n'
append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
with open('/etc/hosts', 'r+') as hosts_fd:
original_lines = hosts_fd.readlines()
if not any(line.endswith(comment) for line in original_lines):
hosts_fd.writelines(append_lines)
# determine which forwarding mechanism to use
pfctl = find_executable('pfctl', required=False)
iptables = find_executable('iptables', required=False)
if pfctl:
kldload = find_executable('kldload', required=False)
if kldload:
try:
run_command(args, ['kldload', 'pf'], capture=True)
except SubprocessError:
pass # already loaded
rules = '''
rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
'''
cmd = ['pfctl', '-ef', '-']
try:
run_command(args, cmd, capture=True, data=rules)
except SubprocessError:
pass # non-zero exit status on success
elif iptables:
ports = [
(80, 8080),
(443, 8443),
]
for src, dst in ports:
rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
try:
# check for existing rule
cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
run_command(args, cmd, capture=True)
except SubprocessError:
# append rule when it does not exist
cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
run_command(args, cmd, capture=True)
else:
raise ApplicationError('No supported port forwarding mechanism detected.')
def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, always):
"""
:type args: IntegrationConfig
:type test_dir: str
:type target_names: list[str]
:type targets_dict: dict[str, IntegrationTarget]
:type targets_executed: set[str]
:type always: bool
"""
for target_name in target_names:
if not always and target_name in targets_executed:
continue
target = targets_dict[target_name]
if not args.explain:
# create a fresh test directory for each test target
remove_tree(test_dir)
make_dirs(test_dir)
if target.script_path:
command_integration_script(args, target)
else:
command_integration_role(args, target, None)
targets_executed.add(target_name)
def integration_environment(args, target, cmd):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type cmd: list[str]
:rtype: dict[str, str]
"""
env = ansible_environment(args)
if args.inject_httptester:
env.update(dict(
HTTPTESTER='1',
))
integration = dict(
JUNIT_OUTPUT_DIR=os.path.abspath('test/results/junit'),
ANSIBLE_CALLBACK_WHITELIST='junit',
ANSIBLE_TEST_CI=args.metadata.ci_provider,
)
if args.debug_strategy:
env.update(dict(ANSIBLE_STRATEGY='debug'))
if 'non_local/' in target.aliases:
if args.coverage:
display.warning('Skipping coverage reporting for non-local test: %s' % target.name)
env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
env.update(integration)
cloud_environment = get_cloud_environment(args, target)
if cloud_environment:
cloud_environment.configure_environment(env, cmd)
return env
def command_integration_script(args, target):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
"""
display.info('Running %s integration test script' % target.name)
cmd = ['./%s' % os.path.basename(target.script_path)]
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
env = integration_environment(args, target, cmd)
cwd = target.path
intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd)
def command_integration_role(args, target, start_at_task):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type start_at_task: str | None
"""
display.info('Running %s integration test role' % target.name)
vars_file = 'integration_config.yml'
if isinstance(args, WindowsIntegrationConfig):
inventory = 'inventory.winrm'
hosts = 'windows'
gather_facts = False
elif isinstance(args, NetworkIntegrationConfig):
inventory = args.inventory or 'inventory.networking'
hosts = target.name[:target.name.find('_')]
gather_facts = False
else:
inventory = 'inventory'
hosts = 'testhost'
gather_facts = True
cloud_environment = get_cloud_environment(args, target)
if cloud_environment:
hosts = cloud_environment.inventory_hosts or hosts
playbook = '''
- hosts: %s
gather_facts: %s
roles:
- { role: %s }
''' % (hosts, gather_facts, target.name)
with tempfile.NamedTemporaryFile(dir='test/integration', prefix='%s-' % target.name, suffix='.yml') as pb_fd:
pb_fd.write(playbook.encode('utf-8'))
pb_fd.flush()
filename = os.path.basename(pb_fd.name)
display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
cmd = ['ansible-playbook', filename, '-i', inventory, '-e', '@%s' % vars_file]
if start_at_task:
cmd += ['--start-at-task', start_at_task]
if args.tags:
cmd += ['--tags', args.tags]
if args.skip_tags:
cmd += ['--skip-tags', args.skip_tags]
if args.diff:
cmd += ['--diff']
if isinstance(args, NetworkIntegrationConfig):
if args.testcase:
cmd += ['-e', 'testcase=%s' % args.testcase]
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
env = integration_environment(args, target, cmd)
cwd = 'test/integration'
env['ANSIBLE_ROLES_PATH'] = os.path.abspath('test/integration/targets')
intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd)
def command_units(args):
"""
:type args: UnitsConfig
"""
changes = get_changes_filter(args)
require = (args.require or []) + changes
include, exclude = walk_external_targets(walk_units_targets(), args.include, args.exclude, require)
if not include:
raise AllTargetsSkipped()
if args.delegate:
raise Delegate(require=changes)
version_commands = []
for version in SUPPORTED_PYTHON_VERSIONS:
# run all versions unless version given, in which case run only that version
if args.python and version != args.python_version:
continue
if args.requirements_mode != 'skip':
install_command_requirements(args, version)
env = ansible_environment(args)
cmd = [
'pytest',
'--boxed',
'-r', 'a',
'-n', 'auto',
'--color',
'yes' if args.color else 'no',
'--junit-xml',
'test/results/junit/python%s-units.xml' % version,
]
if args.collect_only:
cmd.append('--collect-only')
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
if exclude:
cmd += ['--ignore=%s' % target.path for target in exclude]
cmd += [target.path for target in include]
version_commands.append((version, cmd, env))
if args.requirements_mode == 'only':
sys.exit()
for version, command, env in version_commands:
display.info('Unit test with Python %s' % version)
try:
intercept_command(args, command, target_name='units', env=env, python_version=version)
except SubprocessError as ex:
# pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
if ex.status != 5:
raise
def get_changes_filter(args):
"""
:type args: TestConfig
:rtype: list[str]
"""
paths = detect_changes(args)
if not args.metadata.change_description:
if paths:
changes = categorize_changes(args, paths, args.command)
else:
changes = ChangeDescription()
args.metadata.change_description = changes
if paths is None:
return [] # change detection not enabled, do not filter targets
if not paths:
raise NoChangesDetected()
if args.metadata.change_description.targets is None:
raise NoTestsForChanges()
return args.metadata.change_description.targets
def detect_changes(args):
"""
:type args: TestConfig
:rtype: list[str] | None
"""
if args.changed and is_shippable():
display.info('Shippable detected, collecting parameters from environment.')
paths = detect_changes_shippable(args)
elif args.changed_from or args.changed_path:
paths = args.changed_path or []
if args.changed_from:
with open(args.changed_from, 'r') as changes_fd:
paths += changes_fd.read().splitlines()
elif args.changed:
paths = detect_changes_local(args)
else:
return None # change detection not enabled
if paths is None:
return None # act as though change detection not enabled, do not filter targets
display.info('Detected changes in %d file(s).' % len(paths))
for path in paths:
display.info(path, verbosity=1)
return paths
def detect_changes_shippable(args):
"""Initialize change detection on Shippable.
:type args: TestConfig
:rtype: list[str] | None
"""
git = Git(args)
result = ShippableChanges(args, git)
if result.is_pr:
job_type = 'pull request'
elif result.is_tag:
job_type = 'tag'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
return result.paths
def detect_changes_local(args):
"""
:type args: TestConfig
:rtype: list[str]
"""
git = Git(args)
result = LocalChanges(args, git)
display.info('Detected branch %s forked from %s at commit %s' % (
result.current_branch, result.fork_branch, result.fork_point))
if result.untracked and not args.untracked:
display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
len(result.untracked))
if result.committed and not args.committed:
display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
len(result.committed))
if result.staged and not args.staged:
display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
len(result.staged))
if result.unstaged and not args.unstaged:
display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
len(result.unstaged))
names = set()
if args.tracked:
names |= set(result.tracked)
if args.untracked:
names |= set(result.untracked)
if args.committed:
names |= set(result.committed)
if args.staged:
names |= set(result.staged)
if args.unstaged:
names |= set(result.unstaged)
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
for path in result.untracked:
if is_binary_file(path):
args.metadata.changes[path] = ((0, 0),)
continue
with open(path, 'r') as source_fd:
line_count = len(source_fd.read().splitlines())
args.metadata.changes[path] = ((1, line_count),)
return sorted(names)
def get_integration_filter(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:rtype: list[str]
"""
if args.tox:
# tox has the same exclusions as the local environment
return get_integration_local_filter(args, targets)
if args.docker:
return get_integration_docker_filter(args, targets)
if args.remote:
return get_integration_remote_filter(args, targets)
return get_integration_local_filter(args, targets)
def common_integration_filter(args, targets, exclude):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:type exclude: list[str]
"""
override_disabled = set(target for target in args.include if target.startswith('disabled/'))
if not args.allow_disabled:
skip = 'disabled/'
override = [target.name for target in targets if override_disabled & set(target.aliases)]
skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
if skipped:
exclude.extend(skipped)
display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s'
% (skip.rstrip('/'), ', '.join(skipped)))
override_unsupported = set(target for target in args.include if target.startswith('unsupported/'))
if not args.allow_unsupported:
skip = 'unsupported/'
override = [target.name for target in targets if override_unsupported & set(target.aliases)]
skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
if skipped:
exclude.extend(skipped)
display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s'
% (skip.rstrip('/'), ', '.join(skipped)))
override_unstable = set(target for target in args.include if target.startswith('unstable/'))
if args.allow_unstable_changed:
override_unstable |= set(args.metadata.change_description.focused_targets or [])
if not args.allow_unstable:
skip = 'unstable/'
override = [target.name for target in targets if override_unstable & set(target.aliases)]
skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
if skipped:
exclude.extend(skipped)
display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s'
% (skip.rstrip('/'), ', '.join(skipped)))
def get_integration_local_filter(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:rtype: list[str]
"""
exclude = []
common_integration_filter(args, targets, exclude)
if not args.allow_root and os.getuid() != 0:
skip = 'needs/root/'
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
override_destructive = set(target for target in args.include if target.startswith('destructive/'))
if not args.allow_destructive:
skip = 'destructive/'
override = [target.name for target in targets if override_destructive & set(target.aliases)]
skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
if skipped:
exclude.extend(skipped)
display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
if args.python_version.startswith('3'):
python_version = 3
else:
python_version = 2
skip = 'skip/python%d/' % python_version
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which are not supported on python %d: %s'
% (skip.rstrip('/'), python_version, ', '.join(skipped)))
return exclude
def get_integration_docker_filter(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:rtype: list[str]
"""
exclude = []
common_integration_filter(args, targets, exclude)
skip = 'skip/docker/'
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which cannot run under docker: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
if not args.docker_privileged:
skip = 'needs/privileged/'
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
python_version = 2 # images are expected to default to python 2 unless otherwise specified
python_version = int(get_docker_completion().get(args.docker_raw).get('python', str(python_version)))
if args.python: # specifying a numeric --python option overrides the default python
if args.python.startswith('3'):
python_version = 3
elif args.python.startswith('2'):
python_version = 2
skip = 'skip/python%d/' % python_version
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which are not supported on python %d: %s'
% (skip.rstrip('/'), python_version, ', '.join(skipped)))
return exclude
def get_integration_remote_filter(args, targets):
"""
:type args: IntegrationConfig
:type targets: tuple[IntegrationTarget]
:rtype: list[str]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
exclude = []
common_integration_filter(args, targets, exclude)
skip = 'skip/%s/' % platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which are not supported on %s: %s'
% (skip.rstrip('/'), platform, ', '.join(skipped)))
python_version = 2 # remotes are expected to default to python 2
skip = 'skip/python%d/' % python_version
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which are not supported on python %d: %s'
% (skip.rstrip('/'), python_version, ', '.join(skipped)))
return exclude
class EnvironmentDescription(object):
"""Description of current running environment."""
def __init__(self, args):
"""Initialize snapshot of environment configuration.
:type args: IntegrationConfig
"""
self.args = args
if self.args.explain:
self.data = {}
return
versions = ['']
versions += SUPPORTED_PYTHON_VERSIONS
versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS))
python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions))
python_versions = dict((v, self.get_version([python_paths[v], '-V'])) for v in sorted(python_paths) if python_paths[v])
pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions))
pip_versions = dict((v, self.get_version([pip_paths[v], '--version'])) for v in sorted(pip_paths) if pip_paths[v])
pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v])
known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts'))
self.data = dict(
python_paths=python_paths,
python_versions=python_versions,
pip_paths=pip_paths,
pip_versions=pip_versions,
pip_interpreters=pip_interpreters,
known_hosts_hash=known_hosts_hash,
)
def __str__(self):
"""
:rtype: str
"""
return json.dumps(self.data, sort_keys=True, indent=4)
def validate(self, target_name, throw):
"""
:type target_name: str
:type throw: bool
:rtype: bool
"""
current = EnvironmentDescription(self.args)
original_json = str(self)
current_json = str(current)
if original_json == current_json:
return True
message = ('Test target "%s" has changed the test environment!\n'
'If these changes are necessary, they must be reverted before the test finishes.\n'
'>>> Original Environment\n'
'%s\n'
'>>> Current Environment\n'
'%s' % (target_name, original_json, current_json))
if throw:
raise ApplicationError(message)
display.error(message)
return False
@staticmethod
def get_version(command):
"""
:type command: list[str]
:rtype: str
"""
try:
stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2)
except SubprocessError:
return None # all failures are equal, we don't care why it failed, only that it did
return (stdout or '').strip() + (stderr or '').strip()
@staticmethod
def get_shebang(path):
"""
:type path: str
:rtype: str
"""
with open(path) as script_fd:
return script_fd.readline()
@staticmethod
def get_hash(path):
"""
:type path: str
:rtype: str | None
"""
if not os.path.exists(path):
return None
file_hash = hashlib.md5()
with open(path, 'rb') as file_fd:
file_hash.update(file_fd.read())
return file_hash.hexdigest()
class NoChangesDetected(ApplicationWarning):
"""Exception when change detection was performed, but no changes were found."""
def __init__(self):
super(NoChangesDetected, self).__init__('No changes detected.')
class NoTestsForChanges(ApplicationWarning):
"""Exception when changes detected, but no tests trigger as a result."""
def __init__(self):
super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
class Delegate(Exception):
"""Trigger command delegation."""
def __init__(self, exclude=None, require=None, integration_targets=None):
"""
:type exclude: list[str] | None
:type require: list[str] | None
:type integration_targets: tuple[IntegrationTarget] | None
"""
super(Delegate, self).__init__()
self.exclude = exclude or []
self.require = require or []
self.integration_targets = integration_targets or tuple()
class AllTargetsSkipped(ApplicationWarning):
"""All targets skipped."""
def __init__(self):
super(AllTargetsSkipped, self).__init__('All targets skipped.')
| gpl-3.0 | 2,075,956,924,192,913,200 | 29.693333 | 150 | 0.611346 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnspolicy_dnspolicylabel_binding.py | 1 | 6004 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnspolicy_dnspolicylabel_binding(base_resource) :
""" Binding class showing the dnspolicylabel that can be bound to dnspolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the DNS policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the DNS policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnspolicy_dnspolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnspolicy_dnspolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch dnspolicy_dnspolicylabel_binding resources.
"""
try :
obj = dnspolicy_dnspolicylabel_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of dnspolicy_dnspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnspolicy_dnspolicylabel_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count dnspolicy_dnspolicylabel_binding resources configued on NetScaler.
"""
try :
obj = dnspolicy_dnspolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of dnspolicy_dnspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = dnspolicy_dnspolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class dnspolicy_dnspolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.dnspolicy_dnspolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnspolicy_dnspolicylabel_binding = [dnspolicy_dnspolicylabel_binding() for _ in range(length)]
| apache-2.0 | -9,121,447,943,516,050,000 | 26.167421 | 134 | 0.698368 | false |
adongy/adminradius | admin_radius/models.py | 1 | 4418 | from django.db import models
from .raw_models import *
from django.core.urlresolvers import reverse
import datetime
from django.core.exceptions import ValidationError
class RadPassManager(models.Manager):
def get_queryset(self):
return super(RadPassManager, self).get_queryset().filter(attribute='NT-Password', op=':=')
class RadStartDateManager(models.Manager):
def get_queryset(self):
return super(RadStartDateManager, self).get_queryset().filter(attribute='User-Start-Date', op=':=')
class RadEndDateManager(models.Manager):
def get_queryset(self):
return super(RadEndDateManager, self).get_queryset().filter(attribute='User-End-Date', op=':=')
class RadPass(Radcheck):
objects = RadPassManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'NT-Password'
self._meta.get_field('op').default = ':='
super(RadPass, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadPass, self).clean_fields(exclude)
if self.value and len(self.value) != 32:
raise ValidationError(_("Hash is incorrectly formatted. Input as a 32 hexadecimal character string without a leading '0x' prefix."))
class Meta:
proxy = True
class RadStartDate(Radcheck):
objects = RadStartDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-Start-Date'
self._meta.get_field('op').default = ':='
super(RadStartDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadStartDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadEndDate(Radcheck):
objects = RadEndDateManager()
def __init__(self, *args, **kwargs):
self._meta.get_field('attribute').default = 'User-End-Date'
self._meta.get_field('op').default = ':='
super(RadEndDate, self).__init__(*args, **kwargs)
def clean_fields(self, exclude=None):
super(RadEndDate, self).clean_fields(exclude)
if self.value:
try:
datetime.datetime.strptime(self.value, '%Y%m%d')
except ValueError:
raise ValidationError(_("Input date is not formatted as YYYYMMDD."))
def get_date(self):
if self.value:
return datetime.datetime.strptime(self.value, '%Y%m%d')
else:
return None
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
class Meta:
proxy = True
class RadUser(models.Model):
username = models.CharField(max_length=64, unique=True)
start_date = models.OneToOneField(RadStartDate)
end_date = models.OneToOneField(RadEndDate)
password = models.OneToOneField(RadPass, blank=True, null=True)
@property
def is_online(self):
return Radacct.objects.filter(
username=self.username,
acctstoptime=None).exists()
"""
def clean(self):
# username must be consistent
if self.start_date and self.username and self.start_date.username != self.username:
raise ValidationError({'start_date': _('Usernames do not match.')})
if self.end_date and self.username and self.end_date.username != self.username:
raise ValidationError({'end_date': _('Usernames do not match.')})
if self.password and self.username and self.password.username != self.username:
raise ValidationError({'password': _('Usernames do not match.')})
"""
def get_absolute_url(self):
return reverse('admin_radius:user_edit', args=(self.username,))
def __str__(self):
return "<Raduser {}>".format(self.username) | mit | 73,345,521,450,151,820 | 36.449153 | 144 | 0.611815 | false |
rtfd/readthedocs.org | readthedocs/builds/tests/test_build_queryset.py | 1 | 5384 | import pytest
import django_dynamic_fixture as fixture
from django.conf import settings
from readthedocs.builds.querysets import BuildQuerySet
from readthedocs.builds.models import Build, Version
from readthedocs.organizations.models import Organization
from readthedocs.projects.models import Project, Feature
@pytest.mark.django_db
class TestBuildQuerySet:
def test_concurrent_builds(self):
project = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project,
state=state,
)
assert (False, 2, 4) == Build.objects.concurrent(project)
for state in ('building', 'cloning'):
fixture.get(
Build,
project=project,
state=state,
)
assert (True, 4, 4) == Build.objects.concurrent(project)
def test_concurrent_builds_project_limited(self):
project = fixture.get(
Project,
max_concurrent_builds=2,
main_language_project=None,
)
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project,
state=state,
)
assert (True, 2, 2) == Build.objects.concurrent(project)
def test_concurrent_builds_translations(self):
project = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
translation = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=project,
)
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project,
state=state,
)
assert (False, 2, 4) == Build.objects.concurrent(translation)
for state in ('building', 'cloning'):
fixture.get(
Build,
project=translation,
state=state,
)
assert (True, 4, 4) == Build.objects.concurrent(translation)
assert (True, 4, 4) == Build.objects.concurrent(project)
def test_concurrent_builds_organization(self):
organization = fixture.get(
Organization,
max_concurrent_builds=None,
)
for _ in range(2):
project = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
organization.projects.add(project)
for project in organization.projects.all():
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project,
state=state,
)
project = organization.projects.first()
assert (True, 4, 4) == Build.objects.concurrent(project)
for state in ('building', 'cloning'):
fixture.get(
Build,
project=project,
state=state,
)
assert (True, 6, 4) == Build.objects.concurrent(project)
def test_concurrent_builds_organization_limited(self):
organization = fixture.get(
Organization,
max_concurrent_builds=10,
)
project_with_builds = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
project_without_builds = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
organization.projects.add(project_with_builds)
organization.projects.add(project_without_builds)
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project_with_builds,
state=state,
)
# Calling it with ``project_without_builds`` should count the builds
# from ``project_with_builds`` as well
assert (False, 2, 10) == Build.objects.concurrent(project_without_builds)
def test_concurrent_builds_organization_and_project_limited(self):
organization = fixture.get(
Organization,
max_concurrent_builds=10,
)
project_limited = fixture.get(
Project,
max_concurrent_builds=2,
main_language_project=None,
)
project_not_limited = fixture.get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
organization.projects.add(project_limited)
organization.projects.add(project_not_limited)
for state in ('triggered', 'building', 'cloning', 'finished'):
fixture.get(
Build,
project=project_limited,
state=state,
)
assert (True, 2, 2) == Build.objects.concurrent(project_limited)
assert (False, 2, 10) == Build.objects.concurrent(project_not_limited)
| mit | -6,256,736,176,756,667,000 | 32.440994 | 81 | 0.545691 | false |
bwc126/MLND-interview-practice | Q4.py | 1 | 4809 | # Find the least common ancestor between two nodes on a binary search tree. The least common ancestor is the farthest node from the root that is an ancestor of both nodes. For example, the root is a common ancestor of all nodes on the tree, but if both nodes are descendents of the root's left child, then that left child might be the lowest common ancestor. You can assume that both nodes are in the tree, and the tree itself adheres to all BST properties. The function definition should look like question4(T, r, n1, n2), where T is the tree represented as a matrix, where the index of the list is equal to the integer stored in that node and a 1 represents a child node, r is a non-negative integer representing the root, and n1 and n2 are non-negative integers representing the two nodes in no particular order. For example, one test case might be
#
# question4([[0, 1, 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0],
# [1, 0, 0, 0, 1],
# [0, 0, 0, 0, 0]],
# 3,
# 1,
# 4)
#
# and the answer would be 3.
# The matrix contains a number of rows equal to one plus the highest number in the BST. The index of a list within the matrix corresponds to each node's value. If the list for a node contains a 1, that node has a child whose value is the index of the position of the 1 within the list. For the example given above, the BST would have a 3 at the root, 0 on the left, 4 on the right. The 0 would have a child of 1 on the right.
# The signature question4(T, r, n1, n2) includes T, a binary matrix as described previously, r, a non-neg integer corresponding to the value of the root. n1, n2 are each non-neg ints representing the two nodes for which we need to find the greatest common ancestor node. We can assume n1, n2 might be in any order, that both nodes are in fact within the tree, and the BST conforms to standard rules for a BST.
import copy
def question4(T, r, n1, n2):
# We'll need to keep track of the lesser and greater node values to take full advantage of BST properties later on.
n_1 = min(n1,n2)
n_2 = max(n1,n2)
# Lacking a BST matrix is a non-starter.
if not T:
return
# Start by discarding trivial rows, storing the remaining rows with their node value in a dictionary as a key, and a list of their children as values. 0: [1] would be one such dictionary entry for the example in the question definition.
nodes = {}
# print T
M = copy.deepcopy(T)
for row in range(len(M)):
if 1 in M[row]:
children = []
for child in range(M[row].count(1)):
loc = M[row].index(1)
children.append(loc)
M[row][loc] = 0
nodes[row] = children
print nodes
# This is strictly for handling the cases where n1 or n2 aren't in the BST. We build a simple list of all nodes in the tree to make sure n1 and n2 are actually in it before doing any more unnecessary computation.
all_nodes = []
for children in nodes.values():
for node in children:
all_nodes.append(node)
all_nodes.extend(nodes.keys())
print all_nodes
if n1 not in all_nodes or n2 not in all_nodes:
return
# We could look through the keys of 'nodes', which will be every node that is a parent of any node in the tree, and the first one we find that has a value between n1, n2 is our LCA. This assumes the keys are in order of their level on the tree, but they don't need to be in order relative to the other nodes on their level, because only nodes between n1 and n2 in value can be a parent of both.
for parent in nodes.keys():
if parent < n_2 and parent > n_1:
return parent
# Test Cases
matrix = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 0
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,1,0,0,0,0,0,0,0,0], # 3
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,1,0,0,0,0,0,0,0], # 6
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 9
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 12
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]]
root = 8
n_1, n_2 = 4, 7
print question4(matrix, root, n_1, n_2) # Should be 6
n_3, n_4 = 1, 6
print question4(matrix, root, n_3, n_4) # Should be 3
n_5, n_6 = 4, 10
print question4(matrix, root, n_5, n_6) # Should be 8
n_7, n_8 = 16, 0
print question4(matrix, root, n_7, n_8) # Edge case: should be None
n_9, n_10 = 4, 10
print question4(None, root, n_9, n_10) # Edge case: should be None
| gpl-3.0 | 289,486,961,979,072,200 | 56.939759 | 851 | 0.6309 | false |
robwarm/gpaw-symm | tools/niflheim-agts.py | 1 | 5426 | import os
import sys
import glob
import shutil
import subprocess
def cmd(c):
x = os.system(c)
assert x == 0, c
def fail(subject, email=None, filename='/dev/null', mailer='mail'):
assert mailer in ['mailx', 'mail', 'mutt']
import os
if email is not None:
if filename == '/dev/null':
assert os.system('mail -s "%s" %s < %s' %
(subject, email, filename)) == 0
else: # attachments
filenames = filename.split()
if mailer == 'mailx': # new mailx (12?)
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('echo | mail %s -s "%s" %s' %
(attach, subject, email)) == 0
elif mailer == 'mail': # old mailx (8?)
attach = '('
for f in filenames:
ext = os.path.splitext(f)[-1]
if ext:
flog = os.path.basename(f).replace(ext, '.log')
else:
flog = f
attach += 'uuencode %s %s&&' % (f, flog)
# remove final &&
attach = attach[:-2]
attach += ')'
assert os.system('%s | mail -s "%s" %s' %
(attach, subject, email)) == 0
else: # mutt
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('mutt %s -s "%s" %s < /dev/null' %
(attach, subject, email)) == 0
raise SystemExit
if '--dir' in sys.argv:
i = sys.argv.index('--dir')
dir = os.path.abspath(sys.argv[i+1])
else:
dir = 'agts'
if '--email' in sys.argv:
i = sys.argv.index('--email')
email = sys.argv[i+1]
else:
email = None
assert os.path.isdir(dir)
gpawdir = os.path.join(dir, 'gpaw')
# remove the old run directory
if os.path.isdir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
os.chdir(dir)
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
# a version of gpaw is needed for imports from within this script!
cmd("\
cd " + gpawdir + "&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
python setup.py build_ext 2>&1 > build_ext.log")
# import gpaw from where it was installed
sys.path.insert(0, gpawdir)
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > thul.log' | ssh thul bash")
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > fjorm.log' | ssh fjorm bash")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn export https://svn.fysik.dtu.dk/projects/ase/trunk ase')
# ase needed
sys.path.insert(0, '%s/ase' % dir)
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster(asepath=os.path.join(dir, 'ase'),
setuppath=os.path.join(gpawdir, 'gpaw-setups'))
# Example below is confusing: job.script must NOT be the *.agts.py script,
# but the actual python script to be run!
# testsuite.agts.py does both: see gpaw/test/big/miscellaneous/testsuite.agts.py
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
gfiles = os.path.join(dir, 'gpaw-files')
if not os.path.isdir(gfiles):
os.mkdir(gfiles)
queue.copy_created_files(gfiles)
# make files readable by go
files = glob.glob(gfiles + '/*')
for f in files:
os.chmod(f, 0644)
from gpaw.version import version
subject = 'AGTS GPAW %s: ' % str(version)
# Send mail:
sfile = os.path.join(dir, 'status.log')
attach = sfile
if not nfailed:
subject += ' succeeded'
fail(subject, email, attach, mailer='mutt')
else:
subject += ' failed'
# attach failed tests error files
ft = [l.split()[0] for l in open(sfile).readlines() if 'FAILED' in l]
for t in ft:
ef = glob.glob(os.path.join(dir, t) + '.e*')
for f in ef:
attach += ' ' + f
fail(subject, email, attach, mailer='mutt')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='gpaw-developers@listserv.fysik.dtu.dk',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
| gpl-3.0 | -8,642,778,261,663,894,000 | 30.546512 | 85 | 0.572429 | false |
avedaee/DIRAC | Core/Utilities/MySQL.py | 1 | 58931 | ########################################################################
# $HeadURL$
########################################################################
""" DIRAC Basic MySQL Class
It provides access to the basic MySQL methods in a multithread-safe mode
keeping used connections in a python Queue for further reuse.
These are the coded methods:
__init__( host, user, passwd, name, [maxConnsInQueue=10] )
Initializes the Queue and tries to connect to the DB server,
using the _connect method.
"maxConnsInQueue" defines the size of the Queue of open connections
that are kept for reuse. It also defined the maximum number of open
connections available from the object.
maxConnsInQueue = 0 means unlimited and it is not supported.
_except( methodName, exception, errorMessage )
Helper method for exceptions: the "methodName" and the "errorMessage"
are printed with ERROR level, then the "exception" is printed (with
full description if it is a MySQL Exception) and S_ERROR is returned
with the errorMessage and the exception.
_connect()
Attempts connection to DB and sets the _connected flag to True upon success.
Returns S_OK or S_ERROR.
_query( cmd, [conn] )
Executes SQL command "cmd".
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue.
Returns S_OK with fetchall() out in Value or S_ERROR upon failure.
_update( cmd, [conn] )
Executes SQL command "cmd" and issue a commit
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue
Returns S_OK with number of updated registers in Value or S_ERROR upon failure.
_createTables( tableDict )
Create a new Table in the DB
_getConnection()
Gets a connection from the Queue (or open a new one if none is available)
Returns S_OK with connection in Value or S_ERROR
the calling method is responsible for closing this connection once it is no
longer needed.
Some high level methods have been added to avoid the need to write SQL
statement in most common cases. They should be used instead of low level
_insert, _update methods when ever possible.
buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None ):
Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
Alternatively inDict can be used
String type values will be appropriately escaped.
updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
for compatibility with other methods condDict keyed argument is added
getCounters( self, table, attrList, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Get distinct values of a table attribute under specified conditions
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DataStructures import MutableStruct
from DIRAC.Core.Utilities import Time
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb
# This is for proper initialization of embeded server, it should only be called once
MySQLdb.server_init( ['--defaults-file=/opt/dirac/etc/my.cnf', '--datadir=/opt/mysql/db'], ['mysqld'] )
gInstancesCount = 0
gDebugFile = None
import collections
import time
import threading
from types import StringTypes, DictType, ListType, TupleType
MAXCONNECTRETRY = 10
def _checkQueueSize( maxQueueSize ):
"""
Helper to check maxQueueSize
"""
if maxQueueSize <= 0:
raise Exception( 'MySQL.__init__: maxQueueSize must positive' )
try:
maxQueueSize - 1
except Exception:
raise Exception( 'MySQL.__init__: wrong type for maxQueueSize' )
def _checkFields( inFields, inValues ):
"""
Helper to check match between inFields and inValues lengths
"""
if inFields == None and inValues == None:
return S_OK()
try:
assert len( inFields ) == len( inValues )
except:
return S_ERROR( 'Mismatch between inFields and inValues.' )
return S_OK()
def _quotedList( fieldList = None ):
"""
Quote a list of MySQL Field Names with "`"
Return a comma separated list of quoted Field Names
To be use for Table and Field Names
"""
if fieldList == None:
return None
quotedFields = []
try:
for field in fieldList:
quotedFields.append( '`%s`' % field.replace( '`', '' ) )
except Exception:
return None
if not quotedFields:
return None
return ', '.join( quotedFields )
class MySQL:
"""
Basic multithreaded DIRAC MySQL Client Class
"""
__initialized = False
class ConnectionPool( object ):
"""
Management of connections per thread
"""
__connData = MutableStruct( 'ConnData', [ 'conn', 'dbName', 'last', 'intrans' ] )
def __init__( self, host, user, passwd, port = 3306, graceTime = 600 ):
self.__host = host
self.__user = user
self.__passwd = passwd
self.__port = port
self.__graceTime = graceTime
self.__spares = collections.deque()
self.__maxSpares = 10
self.__lastClean = 0
self.__assigned = {}
@property
def __thid( self ):
return threading.current_thread()
def __newConn( self ):
conn = MySQLdb.connect( host = self.__host,
port = self.__port,
user = self.__user,
passwd = self.__passwd )
self.__execute( conn, "SET AUTOCOMMIT=1" )
return conn
def __execute( self, conn, cmd ):
cursor = conn.cursor()
res = cursor.execute( cmd )
cursor.close()
return res
def get( self, dbName, retries = 10 ):
retries = max( 0, min( MAXCONNECTRETRY, retries ) )
self.clean()
result = self.__getWithRetry( dbName, retries, retries )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'Value' ].conn )
def __getWithRetry( self, dbName, totalRetries = 10, retriesLeft = 10 ):
sleepTime = 5 * ( totalRetries - retriesLeft )
if sleepTime > 0:
time.sleep( sleepTime )
try:
connData, thid = self.__innerGet()
except MySQLdb.MySQLError, excp:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not connect: %s" % excp )
if not connData.intrans and not self.__ping( connData.conn ):
try:
self.__assigned.pop( thid )
except KeyError:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft )
return S_ERROR( "Could not connect" )
if connData.dbName != dbName:
try:
connData.conn.select_db( dbName )
connData.dbName = dbName
except MySQLdb.MySQLError, excp:
try:
self.__assigned.pop( thid ).conn.close()
except Exception:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( "Could not select db %s: %s" % ( dbName, excp ) )
return S_OK( connData )
def __ping( self, conn ):
try:
conn.ping( True )
return True
except:
return False
def __innerGet( self ):
thid = self.__thid
now = time.time()
try:
data = self.__assigned[ thid ]
data.last = now
return data, thid
except KeyError:
pass
#Not cached
try:
connData = self.__spares.pop()
except IndexError:
connData = self.__connData( self.__newConn(), "", now, False )
self.__assigned[ thid ] = connData
return self.__assigned[ thid ], thid
def __pop( self, thid ):
try:
connData = self.__assigned.pop( thid )
except KeyError:
return
if not connData.intrans and len( self.__spares ) < self.__maxSpares:
self.__spares.append( connData )
else:
connData.conn.close()
def clean( self, now = False ):
if not now:
now = time.time()
self.__lastClean = now
for thid in list( self.__assigned ):
if not thid.isAlive():
self.__pop( thid )
continue
try:
data = self.__assigned[ thid ]
except KeyError:
continue
if now - data.last > self.__graceTime:
self.__pop( thid )
def transactionStart( self, dbName ):
print "TRANS START"
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if connData.intrans:
raise RuntimeError( "Staring a MySQL transaction inside another one" )
self.__execute( connData.conn, "SET AUTOCOMMIT=0" )
self.__execute( connData.conn, "START TRANSACTION WITH CONSISTENT SNAPSHOT" )
connData.intrans = True
return S_OK()
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not begin transaction: %s" % excp )
def transactionCommit( self, dbName ):
print "TRANS COMMIT"
return self.__endTransaction( dbName, True )
def transactionRollback( self, dbName ):
print "TRANS ROLLBACK"
return self.__endTransaction( dbName, False )
def __endTransaction( self, dbName, commit ):
result = self.__getWithRetry( dbName )
if not result[ 'OK' ]:
return result
connData = result[ 'Value' ]
try:
if not connData.intrans:
gLogger.warn( "MySQL connection has reconnected. Transaction may be inconsistent" )
if commit:
result = connData.conn.commit()
else:
result = connData.conn.rollback()
self.__execute( connData.conn, "SET AUTOCOMMIT=1" )
connData.conn.commit()
connData.intrans = False
return S_OK( result )
except MySQLdb.MySQLError, excp:
return S_ERROR( "Could not end transaction: %s" % excp )
__connectionPools = {}
def __init__( self, hostName, userName, passwd, dbName, port = 3306, maxQueueSize = 3, debug = False ):
"""
set MySQL connection parameters and try to connect
"""
global gInstancesCount, gDebugFile
gInstancesCount += 1
self._connected = False
if 'log' not in dir( self ):
self.log = gLogger.getSubLogger( 'MySQL' )
self.logger = self.log
# let the derived class decide what to do with if is not 1
self._threadsafe = MySQLdb.thread_safe()
self.log.debug( 'thread_safe = %s' % self._threadsafe )
_checkQueueSize( maxQueueSize )
self.__hostName = str( hostName )
self.__userName = str( userName )
self.__passwd = str( passwd )
self.__dbName = str( dbName )
self.__port = port
cKey = ( self.__hostName, self.__userName, self.__passwd, self.__port )
if cKey not in MySQL.__connectionPools:
MySQL.__connectionPools[ cKey ] = MySQL.ConnectionPool( *cKey )
self.__connectionPool = MySQL.__connectionPools[ cKey ]
self.__initialized = True
result = self._connect()
if not result[ 'OK' ]:
gLogger.error( "Cannot connect to to DB: %s" % result[ 'Message' ] )
if debug:
try:
gDebugFile = open( "%s.debug.log" % self.__dbName, "w" )
except IOError:
pass
def __del__( self ):
global gInstancesCount
try:
gInstancesCount -= 1
except Exception:
pass
def _except( self, methodName, x, err ):
"""
print MySQL error or exception
return S_ERROR with Exception
"""
try:
raise x
except MySQLdb.Error, e:
self.log.debug( '%s: %s' % ( methodName, err ),
'%d: %s' % ( e.args[0], e.args[1] ) )
return S_ERROR( '%s: ( %d: %s )' % ( err, e.args[0], e.args[1] ) )
except Exception, e:
self.log.debug( '%s: %s' % ( methodName, err ), str( e ) )
return S_ERROR( '%s: (%s)' % ( err, str( e ) ) )
def __escapeString( self, myString ):
"""
To be used for escaping any MySQL string before passing it to the DB
this should prevent passing non-MySQL accepted characters to the DB
It also includes quotation marks " around the given string
"""
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict['Value']
specialValues = ( 'UTC_TIMESTAMP', 'TIMESTAMPADD', 'TIMESTAMPDIFF' )
try:
myString = str( myString )
except ValueError:
return S_ERROR( "Cannot escape value!" )
try:
for sV in specialValues:
if myString.find( sV ) == 0:
return S_OK( myString )
escape_string = connection.escape_string( str( myString ) )
self.log.debug( '__escape_string: returns', '"%s"' % escape_string )
return S_OK( '"%s"' % escape_string )
except Exception, x:
self.log.debug( '__escape_string: Could not escape string', '"%s"' % myString )
return self._except( '__escape_string', x, 'Could not escape string' )
def __checkTable( self, tableName, force = False ):
table = _quotedList( [tableName] )
if not table:
return S_ERROR( 'Invalid tableName argument' )
cmd = 'SHOW TABLES'
retDict = self._query( cmd, debug = True )
if not retDict['OK']:
return retDict
if ( tableName, ) in retDict['Value']:
if not force:
# the requested exist and table creation is not force, return with error
return S_ERROR( 'Requested table %s already exists' % tableName )
else:
cmd = 'DROP TABLE %s' % table
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
return S_OK()
def _escapeString( self, myString, conn = None ):
"""
Wrapper around the internal method __escapeString
"""
self.log.debug( '_escapeString:', '"%s"' % str( myString ) )
return self.__escapeString( myString )
def _escapeValues( self, inValues = None ):
"""
Escapes all strings in the list of values provided
"""
self.log.debug( '_escapeValues:', inValues )
inEscapeValues = []
if not inValues:
return S_OK( inEscapeValues )
for value in inValues:
if type( value ) in StringTypes:
retDict = self.__escapeString( value )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
elif type( value ) == TupleType or type( value ) == ListType:
tupleValues = []
for v in list( value ):
retDict = self.__escapeString( v )
if not retDict['OK']:
return retDict
tupleValues.append( retDict['Value'] )
inEscapeValues.append( '(' + ', '.join( tupleValues ) + ')' )
else:
retDict = self.__escapeString( str( value ) )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
return S_OK( inEscapeValues )
def _connect( self ):
"""
open connection to MySQL DB and put Connection into Queue
set connected flag to True and return S_OK
return S_ERROR upon failure
"""
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
self.log.debug( '_connect:', self._connected )
if self._connected:
return S_OK()
self.log.debug( '_connect: Attempting to access DB',
'[%s@%s] by user %s/%s.' %
( self.__dbName, self.__hostName, self.__userName, self.__passwd ) )
try:
self.log.verbose( '_connect: Connected.' )
self._connected = True
return S_OK()
except Exception, x:
return self._except( '_connect', x, 'Could not connect to DB.' )
def _query( self, cmd, conn = None, debug = False ):
"""
execute MySQL query command
return S_OK structure with fetchall result as tuple
it returns an empty tuple if no matching rows are found
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_query:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_query:', cmd )
else:
self.logger.verbose( '_query:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
try:
cursor = connection.cursor()
if cursor.execute( cmd ):
res = cursor.fetchall()
else:
res = ()
# Log the result limiting it to just 10 records
if len( res ) <= 10:
if debug:
self.logger.debug( '_query: returns', res )
else:
self.logger.verbose( '_query: returns', res )
else:
if debug:
self.logger.debug( '_query: Total %d records returned' % len( res ) )
self.logger.debug( '_query: %s ...' % str( res[:10] ) )
else:
self.logger.verbose( '_query: Total %d records returned' % len( res ) )
self.logger.verbose( '_query: %s ...' % str( res[:10] ) )
retDict = S_OK( res )
except Exception , x:
self.log.warn( '_query:', cmd )
retDict = self._except( '_query', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _update( self, cmd, conn = None, debug = False ):
""" execute MySQL update command
return S_OK with number of updated registers upon success
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_update:', cmd )
else:
if self.logger._minLevel == self.logger._logLevels.getLevelValue( 'DEBUG' ):
self.logger.verbose( '_update:', cmd )
else:
self.logger.verbose( '_update:', cmd[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self.__getConnection( conn = conn )
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
res = cursor.execute( cmd )
# connection.commit()
if debug:
self.log.debug( '_update:', res )
else:
self.log.verbose( '_update:', res )
retDict = S_OK( res )
if cursor.lastrowid:
retDict[ 'lastRowId' ] = cursor.lastrowid
except Exception, x:
self.log.warn( '_update: %s: %s' % ( cmd, str( x ) ) )
retDict = self._except( '_update', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _transaction( self, cmdList, conn = None ):
""" dummy transaction support
:param self: self reference
:param list cmdList: list of queries to be executed within the transaction
:param MySQLDB.Connection conn: connection
:return: S_OK( [ ( cmd1, ret1 ), ... ] ) or S_ERROR
"""
if type( cmdList ) != ListType:
return S_ERROR( "_transaction: wrong type (%s) for cmdList" % type( cmdList ) )
# # get connection
connection = conn
if not connection:
retDict = self.__getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
# # list with cmds and their results
cmdRet = []
try:
cursor = connection.cursor()
for cmd in cmdList:
cmdRet.append( ( cmd, cursor.execute( cmd ) ) )
connection.commit()
except Exception, error:
self.logger.execption( error )
# # rollback, put back connection to the pool
connection.rollback()
return S_ERROR( error )
# # close cursor, put back connection to the pool
cursor.close()
return S_OK( cmdRet )
def _createViews( self, viewsDict, force = False ):
""" create view based on query
:param dict viewDict: { 'ViewName': "Fields" : { "`a`": `tblA.a`, "`sumB`" : "SUM(`tblB.b`)" }
"SelectFrom" : "tblA join tblB on tblA.id = tblB.id",
"Clauses" : [ "`tblA.a` > 10", "`tblB.Status` = 'foo'" ] ## WILL USE AND CLAUSE
"GroupBy": [ "`a`" ],
"OrderBy": [ "`b` DESC" ] }
"""
if force:
gLogger.debug( viewsDict )
for viewName, viewDict in viewsDict.items():
viewQuery = [ "CREATE OR REPLACE VIEW `%s`.`%s` AS" % ( self.__dbName, viewName ) ]
columns = ",".join( [ "%s AS %s" % ( colDef, colName )
for colName, colDef in viewDict.get( "Fields", {} ).items() ] )
tables = viewDict.get( "SelectFrom", "" )
if columns and tables:
viewQuery.append( "SELECT %s FROM %s" % ( columns, tables ) )
where = " AND ".join( viewDict.get( "Clauses", [] ) )
if where:
viewQuery.append( "WHERE %s" % where )
groupBy = ",".join( viewDict.get( "GroupBy", [] ) )
if groupBy:
viewQuery.append( "GROUP BY %s" % groupBy )
orderBy = ",".join( viewDict.get( "OrderBy", [] ) )
if orderBy:
viewQuery.append( "ORDER BY %s" % orderBy )
viewQuery.append( ";" )
viewQuery = " ".join( viewQuery )
self.log.debug( "`%s` VIEW QUERY IS: %s" % ( viewName, viewQuery ) )
createView = self._query( viewQuery )
if not createView["OK"]:
gLogger.error( createView["Message"] )
return createView
return S_OK()
def _createTables( self, tableDict, force = False, okIfTableExists = True ):
"""
tableDict:
tableName: { 'Fields' : { 'Field': 'Description' },
'ForeignKeys': {'Field': 'Table.key' },
'PrimaryKey': 'Id',
'Indexes': { 'Index': [] },
'UniqueIndexes': { 'Index': [] },
'Engine': 'InnoDB' }
only 'Fields' is a mandatory key.
Creates a new Table for each key in tableDict, "tableName" in the DB with
the provided description.
It allows to create:
- flat tables if no "ForeignKeys" key defined.
- tables with foreign keys to auxiliary tables holding the values
of some of the fields
Arguments:
tableDict: dictionary of dictionary with description of tables to be created.
Only "Fields" is a mandatory key in the table description.
"Fields": Dictionary with Field names and description of the fields
"ForeignKeys": Dictionary with Field names and name of auxiliary tables.
The auxiliary tables must be defined in tableDict.
"PrimaryKey": Name of PRIMARY KEY for the table (if exist).
"Indexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed.
"UniqueIndexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed. This indexes will declared
unique.
"Engine": use the given DB engine, InnoDB is the default if not present.
force:
if True, requested tables are DROP if they exist.
if False (default), tables are not overwritten
okIfTableExists:
if True (default), returns S_OK if table exists
if False, returns S_ERROR if table exists
"""
# First check consistency of request
if type( tableDict ) != DictType:
return S_ERROR( 'Argument is not a dictionary: %s( %s )'
% ( type( tableDict ), tableDict ) )
tableList = tableDict.keys()
if len( tableList ) == 0:
return S_OK( 0 )
for table in tableList:
thisTable = tableDict[table]
# Check if Table is properly described with a dictionary
if type( thisTable ) != DictType:
return S_ERROR( 'Table description is not a dictionary: %s( %s )'
% ( type( thisTable ), thisTable ) )
if not 'Fields' in thisTable:
return S_ERROR( 'Missing `Fields` key in `%s` table dictionary' % table )
tableCreationList = [[]]
auxiliaryTableList = []
i = 0
extracted = True
while tableList and extracted:
# iterate extracting tables from list if they only depend on
# already extracted tables.
extracted = False
auxiliaryTableList += tableCreationList[i]
i += 1
tableCreationList.append( [] )
for table in list( tableList ):
toBeExtracted = True
thisTable = tableDict[table]
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
if forTable not in auxiliaryTableList:
toBeExtracted = False
break
if not key in thisTable['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Primary table `%s`.'
% ( key, forKey, table ) )
if not forKey in tableDict[forTable]['Fields']:
return S_ERROR( 'ForeignKey `%s` -> `%s` not defined in Auxiliary table `%s`.'
% ( key, forKey, forTable ) )
if toBeExtracted:
self.log.debug( 'Table %s ready to be created' % table )
extracted = True
tableList.remove( table )
tableCreationList[i].append( table )
if tableList:
return S_ERROR( 'Recursive Foreign Keys in %s' % ', '.join( tableList ) )
createdTablesList = []
for tableList in tableCreationList:
for table in tableList:
# Check if Table exists
retDict = self.__checkTable( table, force = force )
if not retDict['OK']:
message = 'The requested table already exists'
if retDict['Message'] == message and okIfTableExists:
continue
return retDict
thisTable = tableDict[table]
cmdList = []
for field in thisTable['Fields'].keys():
cmdList.append( '`%s` %s' % ( field, thisTable['Fields'][field] ) )
if thisTable.has_key( 'PrimaryKey' ):
if type( thisTable['PrimaryKey'] ) in StringTypes:
cmdList.append( 'PRIMARY KEY ( `%s` )' % thisTable['PrimaryKey'] )
else:
cmdList.append( 'PRIMARY KEY ( %s )' % ", ".join( [ "`%s`" % str( f ) for f in thisTable['PrimaryKey'] ] ) )
if thisTable.has_key( 'Indexes' ):
indexDict = thisTable['Indexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if thisTable.has_key( 'UniqueIndexes' ):
indexDict = thisTable['UniqueIndexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'UNIQUE INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.items():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
# cmdList.append( '`%s` %s' % ( forTable, tableDict[forTable]['Fields'][forKey] )
cmdList.append( 'FOREIGN KEY ( `%s` ) REFERENCES `%s` ( `%s` )'
' ON DELETE RESTRICT' % ( key, forTable, forKey ) )
if thisTable.has_key( 'Engine' ):
engine = thisTable['Engine']
else:
engine = 'InnoDB'
cmd = 'CREATE TABLE `%s` (\n%s\n) ENGINE=%s' % (
table, ',\n'.join( cmdList ), engine )
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
self.log.debug( 'Table %s created' % table )
createdTablesList.append( table )
return S_OK( createdTablesList )
def _getFields( self, tableName, outFields = None,
inFields = None, inValues = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_getFields:', 'deprecation warning, use getFields methods instead of _getFields.' )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( '_getFields:', retDict['Message'] )
return retDict
condDict = {}
if inFields != None:
try:
condDict.update( [ ( inFields[k], inValues[k] ) for k in range( len( inFields ) )] )
except Exception, x:
return S_ERROR( x )
return self.getFields( tableName, outFields, condDict, limit, conn, older, newer, timeStamp, orderAttribute )
def _insert( self, tableName, inFields = None, inValues = None, conn = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_insert:', 'deprecation warning, use insertFields methods instead of _insert.' )
return self.insertFields( tableName, inFields, inValues, conn )
def _to_value( self, param ):
"""
Convert to string
"""
return str( param[0] )
def _to_string( self, param ):
"""
"""
return param[0].tostring()
def _getConnection( self ):
"""
Return a new connection to the DB
It uses the private method __getConnection
"""
self.log.debug( '_getConnection:' )
retDict = self.__getConnection( trial = 0 )
return retDict
def __getConnection( self, conn = None, trial = 0 ):
"""
Return a new connection to the DB,
if conn is provided then just return it.
then try the Queue, if it is empty add a newConnection to the Queue and retry
it will retry MAXCONNECTRETRY to open a new connection and will return
an error if it fails.
"""
self.log.debug( '__getConnection:' )
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( error )
return self.__connectionPool.get( self.__dbName )
########################################################################################
#
# Transaction functions
#
########################################################################################
def transactionStart( self ):
return self.__connectionPool.transactionStart( self.__dbName )
def transactionCommit( self ):
return self.__connectionPool.transactionCommit( self.__dbName )
def transactionRollback( self ):
return self.__connectionPool.transactionRollback( self.__dbName )
@property
def transaction( self ):
""" Transaction guard """
class TransactionGuard( object ):
def __init__( self, db ):
self.__db = db
self.__ok = False
def __enter__( self ):
self.__db.transactionStart()
def commitWard( *args ):
self.__ok = True
return args
return commitWard
def __exit__( self, exType, exValue, traceback ):
if exValue or not self.__ok:
self.__db.transactionRollback()
else:
self.__db.transactionCommit()
return TransactionGuard( self )
########################################################################################
#
# Utility functions
#
########################################################################################
def countEntries( self, table, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of entries wit the given conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'countEntries:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT COUNT(*) FROM %s %s' % ( table, cond )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
return S_OK( res['Value'][0][0] )
########################################################################################
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
attrNames = _quotedList( attrList )
if attrNames == None:
error = 'Invalid updateFields argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT %s, COUNT(*) FROM %s %s GROUP BY %s ORDER BY %s' % ( attrNames, table, cond, attrNames, attrNames )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
resultList = []
for raw in res['Value']:
attrDict = {}
for i in range( len( attrList ) ):
attrDict[attrList[i]] = raw[i]
item = ( attrDict, raw[len( attrList )] )
resultList.append( item )
return S_OK( resultList )
#########################################################################################
def getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Get distinct values of a table attribute under specified conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
attributeName = _quotedList( [attribute] )
if not attributeName:
error = 'Invalid attribute argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
cmd = 'SELECT DISTINCT( %s ) FROM %s %s ORDER BY %s' % ( attributeName, table, cond, attributeName )
res = self._query( cmd, connection, debug = True )
if not res['OK']:
return res
attr_list = [ x[0] for x in res['Value'] ]
return S_OK( attr_list )
#############################################################################
def buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None, offset = None ):
""" Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
"""
condition = ''
conjunction = "WHERE"
if condDict != None:
for aName, attrValue in condDict.items():
if type( aName ) in StringTypes:
attrName = _quotedList( [aName] )
elif type( aName ) == TupleType:
attrName = '('+_quotedList( list( aName ) )+')'
if not attrName:
error = 'Invalid condDict argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if type( attrValue ) == ListType:
retDict = self._escapeValues( attrValue )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValues = retDict['Value']
multiValue = ', '.join( escapeInValues )
condition = ' %s %s %s IN ( %s )' % ( condition,
conjunction,
attrName,
multiValue )
conjunction = "AND"
else:
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s = %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if timeStamp:
timeStamp = _quotedList( [timeStamp] )
if not timeStamp:
error = 'Invalid timeStamp argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if newer:
retDict = self._escapeValues( [ newer ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
conjunction = "AND"
if older:
retDict = self._escapeValues( [ older ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
if type( greater ) == DictType:
for attrName, attrValue in greater.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid greater argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if type( smaller ) == DictType:
for attrName, attrValue in smaller.items():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid smaller argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
orderList = []
orderAttrList = orderAttribute
if type( orderAttrList ) != ListType:
orderAttrList = [ orderAttribute ]
for orderAttr in orderAttrList:
if orderAttr == None:
continue
if type( orderAttr ) not in StringTypes:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
orderField = _quotedList( orderAttr.split( ':' )[:1] )
if not orderField:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if len( orderAttr.split( ':' ) ) == 2:
orderType = orderAttr.split( ':' )[1].upper()
if orderType in [ 'ASC', 'DESC']:
orderList.append( '%s %s' % ( orderField, orderType ) )
else:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
else:
orderList.append( orderAttr )
if orderList:
condition = "%s ORDER BY %s" % ( condition, ', '.join( orderList ) )
if limit:
if offset:
condition = "%s LIMIT %d OFFSET %d" % ( condition, limit, offset )
else:
condition = "%s LIMIT %d" % ( condition, limit )
return condition
#############################################################################
def getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if outFields == None all fields in "tableName" are returned
if limit is not False, the given limit is set
inValues are properly escaped using the _escape_string method, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
quotedOutFields = '*'
if outFields:
quotedOutFields = _quotedList( outFields )
if quotedOutFields == None:
error = 'Invalid outFields arguments'
self.log.warn( 'getFields:', error )
return S_ERROR( error )
self.log.verbose( 'getFields:', 'selecting fields %s from table %s.' %
( quotedOutFields, table ) )
if condDict == None:
condDict = {}
try:
try:
mylimit = limit[0]
myoffset = limit[1]
except:
mylimit = limit
myoffset = None
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = mylimit,
greater = None, smaller = None, offset = myoffset )
except Exception, x:
return S_ERROR( x )
return self._query( 'SELECT %s FROM %s %s' %
( quotedOutFields, table, condition ), conn, debug = True )
#############################################################################
def deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'deleteEntries:', error )
return S_ERROR( error )
self.log.verbose( 'deleteEntries:', 'deleting rows from table %s.' % table )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
return self._update( 'DELETE FROM %s %s' % ( table, condition ), conn, debug = True )
#############################################################################
def updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
"""
if not updateFields and not updateDict:
return S_OK( 0 )
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
retDict = _checkFields( updateFields, updateValues )
if not retDict['OK']:
error = 'Mismatch between updateFields and updateValues.'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
if updateFields == None:
updateFields = []
updateValues = []
if updateDict:
if type( updateDict ) != DictType:
error = 'updateDict must be a of Type DictType'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
try:
updateFields += updateDict.keys()
updateValues += [updateDict[k] for k in updateDict.keys()]
except TypeError:
error = 'updateFields and updateValues must be a list'
self.log.warn( 'updateFields:', error )
return S_ERROR( error )
updateValues = self._escapeValues( updateValues )
if not updateValues['OK']:
self.log.warn( 'updateFields:', updateValues['Message'] )
return updateValues
updateValues = updateValues['Value']
self.log.verbose( 'updateFields:', 'updating fields %s from table %s.' %
( ', '.join( updateFields ), table ) )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = None, smaller = None )
except Exception, x:
return S_ERROR( x )
updateString = ','.join( ['%s = %s' % ( _quotedList( [updateFields[k]] ),
updateValues[k] ) for k in range( len( updateFields ) ) ] )
return self._update( 'UPDATE %s SET %s %s' %
( table, updateString, condition ), conn, debug = True )
#############################################################################
def insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
"""
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
String type values will be appropriately escaped.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
if inFields == None:
inFields = []
inValues = []
if inDict:
if type( inDict ) != DictType:
error = 'inDict must be a of Type DictType'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
try:
inFields += inDict.keys()
inValues += [inDict[k] for k in inDict.keys()]
except TypeError:
error = 'inFields and inValues must be a list'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = _quotedList( inFields )
if inFieldString == None:
error = 'Invalid inFields arguments'
self.log.warn( 'insertFields:', error )
return S_ERROR( error )
inFieldString = '( %s )' % inFieldString
retDict = self._escapeValues( inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
inValueString = ', '.join( retDict['Value'] )
inValueString = '( %s )' % inValueString
self.log.verbose( 'insertFields:', 'inserting %s into table %s'
% ( inFieldString, table ) )
return self._update( 'INSERT INTO %s %s VALUES %s' %
( table, inFieldString, inValueString ), conn, debug = True )
#####################################################################################
#
# This is a test code for this class, it requires access to a MySQL DB
#
if __name__ == '__main__':
import os
import sys
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset python optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
gLogger.info( 'Testing MySQL class...' )
HOST = '127.0.0.1'
USER = 'Dirac'
PWD = 'Dirac'
DB = 'AccountingDB'
TESTDB = MySQL( HOST, USER, PWD, DB )
assert TESTDB._connect()['OK']
TESTDICT = { 'TestTable' : { 'Fields': { 'ID' : "INTEGER UNIQUE NOT NULL AUTO_INCREMENT",
'Name' : "VARCHAR(256) NOT NULL DEFAULT 'Yo'",
'Surname' : "VARCHAR(256) NOT NULL DEFAULT 'Tu'",
'Count' : "INTEGER NOT NULL DEFAULT 0",
'Time' : "DATETIME",
},
'PrimaryKey': 'ID'
}
}
NAME = 'TestTable'
FIELDS = [ 'Name', 'Surname' ]
NEWVALUES = [ 'Name2', 'Surn2' ]
SOMEFIELDS = [ 'Name', 'Surname', 'Count' ]
ALLFIELDS = [ 'ID', 'Name', 'Surname', 'Count', 'Time' ]
ALLVALUES = [ 1, 'Name1', 'Surn1', 1, 'UTC_TIMESTAMP()' ]
ALLDICT = dict( Name = 'Name1', Surname = 'Surn1', Count = 1, Time = 'UTC_TIMESTAMP()' )
COND0 = {}
COND10 = {'Count': range( 10 )}
try:
RESULT = TESTDB._createTables( TESTDICT, force = True )
assert RESULT['OK']
print 'Table Created'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert RESULT['Value'] == ()
print 'Inserting'
for J in range( 100 ):
RESULT = TESTDB.insertFields( NAME, SOMEFIELDS, ['Name1', 'Surn1', J] )
assert RESULT['OK']
assert RESULT['Value'] == 1
assert RESULT['lastRowId'] == J + 1
print 'Querying'
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 100L )]
RESULT = TESTDB.getDistinctAttributeValues( NAME, FIELDS[0], COND0 )
assert RESULT['OK']
assert RESULT['Value'] == ['Name1']
RESULT = TESTDB.getFields( NAME, FIELDS )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 100
RESULT = TESTDB.getFields( NAME, SOMEFIELDS, COND10 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.getFields( NAME, limit = 1 )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 1
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:DESC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 99, ), )
RESULT = TESTDB.getFields( NAME, ['Count'], orderAttribute = 'Count:ASC', limit = 1 )
assert RESULT['OK']
assert RESULT['Value'] == ( ( 0, ), )
RESULT = TESTDB.getCounters( NAME, FIELDS, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == [( {'Surname': 'Surn1', 'Name': 'Name1'}, 10L )]
RESULT = TESTDB._getFields( NAME, FIELDS, COND10.keys(), COND10.values() )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.updateFields( NAME, FIELDS, NEWVALUES, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 0
print 'Removing'
RESULT = TESTDB.deleteEntries( NAME, COND10 )
assert RESULT['OK']
assert RESULT['Value'] == 10
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 90
RESULT = TESTDB.getCounters( NAME, FIELDS, COND0 )
assert RESULT['OK']
assert RESULT['Value'] == []
RESULT = TESTDB.insertFields( NAME, inFields = ALLFIELDS, inValues = ALLVALUES )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 1 )
RESULT = TESTDB.insertFields( NAME, inDict = ALLDICT )
assert RESULT['OK']
assert RESULT['Value'] == 1
time.sleep( 2 )
RESULT = TESTDB.getFields( NAME, older = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = 'UTC_TIMESTAMP()', timeStamp = 'Time' )
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.getFields( NAME, older = Time.toString(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 2
RESULT = TESTDB.getFields( NAME, newer = Time.dateTime(), timeStamp = 'Time' )
assert RESULT['OK']
assert len( RESULT['Value'] ) == 0
RESULT = TESTDB.deleteEntries( NAME )
assert RESULT['OK']
assert RESULT['Value'] == 2
print 'OK'
except AssertionError:
print 'ERROR ',
if not RESULT['OK']:
print RESULT['Message']
else:
print RESULT
| gpl-3.0 | -1,148,100,160,074,171,400 | 33.767552 | 120 | 0.569072 | false |
lexelby/apiary | historical/sqllog_lengths.py | 1 | 2216 | #
# $LicenseInfo:firstyear=2010&license=mit$
#
# Copyright (c) 2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
#
import bisect
import sys
from sqllog import *
from stattools import StatValue
class SequenceLengths(FollowSequences):
def __init__(self):
FollowSequences.__init__(self)
self.num_sequences = 0
self.bucket_times = [ float(1<<i) for i in xrange(16) ]
self.bucket_counts = [ 0 for i in self.bucket_times ]
def addingSequence(self, s, e):
self.num_sequences += 1
def notingEvent(self, s, e):
pass
def removingSequence(self, s, e):
t = float(s.time())
i = bisect.bisect_left(self.bucket_times, t)
self.bucket_counts[i] += 1
def writestats(self):
print "%30s: %8d" % ('num_sequences', self.num_sequences)
print "Histogram of sequence lengths (log scale):"
for i in xrange(len(self.bucket_times)):
print "%30.1f: %8d" % (self.bucket_times[i], self.bucket_counts[i])
if __name__ == '__main__':
f = SequenceLengths()
f.replay(input_events(sys.argv[1:]))
f.writestats()
| mit | -5,608,589,848,218,999,000 | 35.327869 | 81 | 0.680957 | false |
abe-winter/pg13-py | test_pg13/test_sqex.py | 1 | 3380 | import pytest
from pg13 import sqex, pgmock, sqparse2, treepath
def test_sub_arraylit():
from pg13.sqparse2 import ArrayLit,Literal,SubLit
arlit=ArrayLit([Literal('a'),SubLit,Literal('b')])
path,=treepath.sub_slots(arlit, lambda x:x is sqparse2.SubLit)
assert path==(('vals',1),)
arlit[path] = Literal('hello')
assert arlit.vals==[Literal('a'),Literal('hello'),Literal('b')] # this is checking that the setter closure didn't capture the end of the loop
# todo: test recursion *into* array
def test_sub_assignx():
# todo: test the rest of the SUBSLOT_ATTRS classes
from pg13.sqparse2 import SubLit,AssignX,Literal
asx=AssignX(None,SubLit)
path,=treepath.sub_slots(asx, lambda x:x is sqparse2.SubLit)
assert path==('expr',)
asx[path] = Literal('hello')
assert asx.expr==Literal('hello')
def test_sub_stmt():
# warning: a thorough test of this needs to exercise every syntax type. yikes. test_subslot_classes isn't enough.
from pg13.sqparse2 import Literal,CommaX
xsel=sqparse2.parse('select *,z-%s from t1 where x=%s')
p1,p2=treepath.sub_slots(xsel, lambda x:x is sqparse2.SubLit)
xsel[p1] = Literal(9)
xsel[p2] = Literal(10)
assert xsel.cols.children[1].right==Literal(9) and xsel.where.right==Literal(10)
xins=sqparse2.parse('insert into t1 values (%s,%s)')
p1,p2=treepath.sub_slots(xins, lambda x:x is sqparse2.SubLit)
xins[p1] = Literal('a')
xins[p2] = Literal('b')
assert xins.values==[Literal('a'), Literal('b')]
x2 = sqparse2.parse('coalesce(max(col),0)')
assert sqex.contains(x2,sqex.consumes_rows) # checking that sub_slots can descend into CallX.args
def test_sub_recurse():
exp = sqparse2.parse('select a + b + c + d from t1')
def matchfn(exp):
return isinstance(exp, sqparse2.BinX) and exp.op.op == '+'
recurse_paths = treepath.sub_slots(exp, matchfn, recurse_into_matches=True)
norecurse_paths = treepath.sub_slots(exp, matchfn, recurse_into_matches=False)
assert len(recurse_paths) == 3
assert len(norecurse_paths) == 1
def test_decompose_select():
# basics
nix,where = sqex.decompose_select(sqparse2.parse('select * from t1, t2'))
assert where ==[] and nix.table_order==['t1','t2']
# where from 'join on'
nix,where = sqex.decompose_select(sqparse2.parse('select * from t1 join t2 on x=y'))
assert nix.table_order==['t1','t2'] and isinstance(where[0],sqparse2.BinX)
def test_dfs():
from pg13.sqparse2 import Literal,ArrayLit
with pytest.raises(ValueError):
sqex.depth_first_sub(sqparse2.parse('select * from t1 where x=%s'), (10,[1,2]))
xsel = sqex.depth_first_sub(sqparse2.parse('select a+%s from t1 where x=%s'), (10,[1,2]))
assert xsel.cols.children[0].right==Literal(10)
assert xsel.where.right==ArrayLit((1,2))
def test_nix_aonly():
"NameIndexer support for nested select (i.e. alias-only table)"
ex = sqparse2.parse('select * from (select * from t1) as aonly')
nix = sqex.NameIndexer.ctor_fromlist(ex.tables)
assert isinstance(nix.aonly['aonly'],sqparse2.SelectX)
def test_eliminateseqchildren():
def get_paths(ex):
return treepath.sub_slots(ex, lambda x:isinstance(x,(sqparse2.AttrX,sqparse2.NameX)), match=True)
def transform(string):
return sqex.eliminate_sequential_children(get_paths(sqparse2.parse(string)))
assert [()]==transform('a.b')
assert [()]==transform('a')
assert [()]==transform('a.*')
assert []==transform('*')
| mit | 61,941,680,966,113,890 | 42.896104 | 143 | 0.702959 | false |
js850/PyGMIN | pygmin/gui/double_ended_connect_runner.py | 1 | 10710 | """
tools to run the double ended connect in a separte process and
make sure the the minima and transition states found are
incorporated back into the master database
"""
import multiprocessing as mp
import sys
import signal
import logging
import numpy as np
from PyQt4 import QtCore, QtGui
from pygmin.utils.events import Signal
class UnboundMinimum(object):
def __init__(self, minimum):
self._id = minimum._id
self.energy = minimum.energy
self.coords = minimum.coords.copy()
class UnboundTransitionState(object):
def __init__(self, ts):
self._id = ts._id
self.energy = ts.energy
self.coords = ts.coords.copy()
self.eigenvec = ts.eigenvec
self.eigenval = ts.eigenval
self._minimum1_id = ts._minimum1_id
self._minimum2_id = ts._minimum2_id
class OutLog(object):
"""for redirecting stdout or stderr
everytime something is written to this object, it is sent through
the pipe `conn`.
from http://www.riverbankcomputing.com/pipermail/pyqt/2009-February/022025.html
"""
def __init__(self, conn):
self.conn = conn
self.message = ""
def write(self, m):
if len(m) > 0:
self.conn.send(("stdout", m))
return
## sys.stderr.write(":sending message:"+ m)
# self.message += m
## if len(self.message) > 100:
## self.flush()
### self.conn.send(("stdout", m))
## if len(self.mes)
# if self.message[-1] == "\n":
# self.flush()
def flush(self):
# self.conn.send(("stdout", self.message))
# self.message = ""
pass
class DECProcess(mp.Process):
"""This object will run in a separate process and will actually do the connect run
when the run is finished the minima and transition states found will be sent
back through the pipe as UnboundMinimum and UnboundTransitionState objects
Parameters
----------
comm : pipe
child end of a mp.Pipe()
system :
min1, min2 :
the minima to try to connect
pipe_stdout : bool
if true log messages will be sent back through the pipe
return_smoothed_path : bool
if the run ends successfully the smoothed path will be sent
back through the pipe
"""
def __init__(self, comm, system, min1, min2, pipe_stdout=True,
return_smoothed_path=True):
mp.Process.__init__(self)
#QtCore.QThread.__init__(self)
self.comm = comm
self.system = system
self.min1, self.min2 = min1, min2
self.pipe_stdout = pipe_stdout
self.return_smoothed_path = return_smoothed_path
self.started = False
self.finished = False
def get_smoothed_path(self):
mints, S, energies = self.connect.returnPath()
clist = [m.coords for m in mints]
smoothpath = self.system.smooth_path(clist)
return smoothpath, S, energies
def test_success(self):
return self.connect.graph.areConnected(self.m1local, self.m2local)
def clean_up(self):
"send the lists of transition states and minima back to the parent process"
minima = [UnboundMinimum(m) for m in self.db.minima()]
tslist = [UnboundTransitionState(ts) for ts in self.db.transition_states()]
self.comm.send(("new coords", minima, tslist))
# return the success status
success = self.test_success()
self.comm.send(("success", success))
if success:
# return the smoothed path, or None if not successful
pathdata = self.get_smoothed_path()
self.comm.send(("smoothed path", pathdata))
# send signal we're done here
self.finished = True
self.comm.send(("finished",))
def terminate_early(self, *args, **kwargs):
sys.stderr.write("caught signal, cleaning up and exiting\n")
if self.started and not self.finished:
sys.stderr.write("starting clean up\n")
self.clean_up()
sys.stderr.write("finished clean up\n")
sys.stderr.write("exiting\n")
sys.exit(0)
def do_double_ended_connect(self):
db = self.system.create_database()
self.db = db
# min1 and min2 are associated with the old database, we need to create
# the minima again using the new database
self.m1local = db.addMinimum(self.min1.energy, self.min1.coords)
self.m2local = db.addMinimum(self.min2.energy, self.min2.coords)
self.started = True
self.connect = self.system.get_double_ended_connect(self.m1local, self.m2local, db,
fresh_connect=True, load_no_distances=True)
self.connect.connect()
def run(self):
signal.signal(signal.SIGTERM, self.terminate_early)
signal.signal(signal.SIGINT, self.terminate_early)
if self.pipe_stdout:
# print >> sys.stderr, "stderr"
self.mylog = OutLog(self.comm)
sys.stdout = self.mylog
logger = logging.getLogger("pygmin")
handles = logger.handlers
for h in handles:
# print >> sys.stderr, "removing handler", h
logger.removeHandler(h)
sh = logging.StreamHandler(self.mylog)
logger.addHandler(sh)
# import pygmin
# logger.removeHandler(pygmin.h)
# print >> sys.stderr, "stderr2"
self.do_double_ended_connect()
self.clean_up()
class DECRunner(QtCore.QObject):
"""Spawn a double ended connect run in a child process
This will spawn a new process and deal with the communication
Parameters
----------
system :
database : Database
The minima and transition states found will be added to the
database after the connect run is finished
min1, min2 : Munimum objects
the minima to try to connect
outstream : an object with attribute `outstream.write(mystring)`
the log messages from the connect run will be redirected here
return_smoothed_path : bool
if True the final smoothed path will be calculated
Attributes
----------
on_finished : Signal
this signal will be called when the connect job is finished
"""
def __init__(self, system, database, min1, min2, outstream=None,
return_smoothed_path=True, daemon=True):
QtCore.QObject.__init__(self)
self.system = system
self.database = database
self.min1, self.min2 = min1, min2
self.return_smoothed_path = return_smoothed_path
self.daemon = daemon
self.outstream = outstream
self.on_finished = Signal()
self.decprocess = None
self.newminima = set()
self.newtransition_states = set()
self.success = False
self.killed_early = False
self.is_running = False
def poll(self):
"""this does the checking in the background to see if any messages have been passed"""
# if not self.decprocess.is_alive():
# self.refresh_timer.stop()
# return
if not self.parent_conn.poll():
return
message = self.parent_conn.recv()
self.process_message(message)
def start(self):
"""start the connect job"""
if(self.decprocess):
if(self.decprocess.is_alive()):
return
parent_conn, child_conn = mp.Pipe()
self.conn = parent_conn
self.parent_conn = parent_conn
self.decprocess = DECProcess(child_conn, self.system, self.min1, self.min2,
pipe_stdout=(self.outstream is not None))
self.decprocess.daemon = self.daemon
self.decprocess.start()
# self.poll_thread = PollThread(self, parent_conn)
# self.poll_thread.start()
self.refresh_timer = QtCore.QTimer()
self.refresh_timer.timeout.connect(self.poll)
self.refresh_timer.start(1.)
self.is_running = True
def add_minima_transition_states(self, new_minima, new_ts):
"""Add the minima and transition states found to the database
convert the UnboundMinimum and UnboundTransitionStates to ones
bound to self.database
"""
print "processing new minima and ts"
self.newminima = set()
self.newtransition_states = set()
old2new = dict()
self.system.params.gui._sort_lists = False
for m in new_minima:
mnew = self.database.addMinimum(m.energy, m.coords)
old2new[m._id] = mnew
self.newminima.add(mnew)
for ts in new_ts:
m1id = ts._minimum1_id
m2id = ts._minimum2_id
m1new = old2new[m1id]
m2new = old2new[m2id]
tsnew = self.database.addTransitionState(ts.energy, ts.coords, m1new,
m2new, eigenval=ts.eigenval,
eigenvec=ts.eigenvec)
self.newtransition_states.add(tsnew)
nmin = len(new_minima)
nts = len(new_ts)
print "finished connect run: adding", nmin, "minima, and", nts, "transition states to database"
self.system.params.gui._sort_lists = True
def terminate_early(self):
self.killed_early = True
self.decprocess.terminate()
print "finished terminating"
self.is_running = False
# self.decprocess.join()
# print "done killing job"
# self.on_finished()
def finished(self):
"""the job is finished, do some clean up"""
self.decprocess.join()
self.decprocess.terminate()
self.decprocess.join()
self.refresh_timer.stop()
# print "done killing job"
self.on_finished()
self.is_running = False
def process_message(self, message):
if message[0] == "stdout":
self.outstream.write(message[1])
elif message[0] == "new coords":
new_minima, new_ts = message[1:]
self.add_minima_transition_states(new_minima, new_ts)
elif message[0] == "success":
self.success = message[1]
elif message[0] == "smoothed path":
pathdata = message[1]
self.smoothed_path, self.S, self.energies = pathdata
elif message[0] == "finished":
self.finished()
| gpl-3.0 | -897,367,410,067,978,000 | 33.111465 | 103 | 0.585528 | false |
kazuyamashi/cReComp | crecomp/userlogic.py | 1 | 5873 | # -*- coding: utf-8 -*-
# userlogicutil.py
# Kazushi Yamashina
import os
import sys
from veriloggen import *
from jinja2 import Environment, FileSystemLoader
import re
from crecomp import *
import verilog as vl
TEMPLATE = os.path.dirname(os.path.abspath(__file__)) + '/template'
class Info( ):
def __init__(self):
self.name = ""
self.ports = []
self.classname = ""
self.filepath = ""
def get_userlogicinfo(self, userlogicfile):
self.filepath = userlogicfile
name = os.path.basename(userlogicfile).replace(".v","")
userlogic = from_verilog.read_verilog_module(userlogicfile)
self.name = name
self.classname = (name[0]).upper() + name[1:]
m = userlogic[name]
ports = m.get_ports()
portnames = ports.keys()
a_port = None
for x in xrange(0,len(portnames)):
port_name = str(ports[portnames[x]])
sig_type = ports[portnames[x]].__class__.__name__
classtype = ports[portnames[x]].bit_length().__class__.__name__
if (classtype != "NoneType"):
bit_width = self.calc_bitwidth(str(ports[portnames[x]].width_msb)) + 1
else:
bit_width = 1
if sig_type == "Input":
a_port = vl.Input(port_name, bit_width)
elif sig_type == "Output":
a_port = vl.Output(port_name, bit_width)
elif sig_type == "Inout":
a_port = vl.Inout(port_name, bit_width)
self.ports.append(a_port)
def calc_bitwidth(self, bit_string):
elem_list = re.split(r"[\ ]",bit_string.translate(None, "()"))
if ("*" in elem_list) or ("/" in elem_list):
print "Error! \"*\" or \"/\" are included in bit definition "
print "Please remove \"*\" or \"/\" in user logic"
if elem_list[0].isdigit() :
bit_width = elem_list[0]
bit_width = 0
op = None
for x in xrange(0,len(elem_list)):
if elem_list[x].isdigit() :
if op is "+":
bit_width = bit_width + int(elem_list[x])
elif op is "-":
bit_width = bit_width - int(elem_list[x])
else:
bit_width = int(elem_list[x])
else:
if elem_list[x] is "+":
op = "+"
continue
elif elem_list[x] is "-":
op = "-"
continue
return bit_width
class Util():
def __init__(self):
pass
def get_portnames(self):
ret = []
for port in self.ports:
ret.append(port.name)
return ret
def assign(self, signame_u, signame_c):
self.assignlist.append((signame_u, signame_c))
class UserlogicBase(Util):
def __init__(self,name,uut):
self.name = name
self.filepath = ""
self.uut = uut
self.ports =[]
self.assignlist = []
def check_ulassign(ul, module):
ul_assign = {}
for assign in ul.assignlist:
(signame_u, signame_c) = assign
ul_assign.update({signame_u: signame_c})
ul_ports = ul.ports
checked_input = False
checked_output = False
checked_inout = False
checked_reg = False
checked_wire = False
for ulport in ul_ports:
checked_input = False
checked_output = False
checked_inout = False
checked_reg = False
checked_wire = False
for sig in module["input"]:
if sig.name == ul_assign[ulport.name]:
if sig.bit != ulport.bit:
raise Exception("Wrong assign ! Bit width is wrong \"%s = %s\" "%(sig.name, ulport.name))
if ulport.__class__.__name__ == "Output" or ulport.__class__.__name__ == "Inout":
raise Exception("Wrong signal type %s %s can't be assigned %s %s"%(sig.__class__.__name__, sig.name, ulport.__class__.__name__, ulport.name))
checked_input = True
break
if checked_input == True:
continue
for sig in module["output"]:
if sig.name == ul_assign[ulport.name]:
if sig.bit != ulport.bit:
raise Exception("Wrong assign ! Bit width is wrong \"%s = %s\" "%(sig.name, ulport.name))
if ulport.__class__.__name__ == "Input" or ulport.__class__.__name__ == "Inout":
raise Exception("Wrong signal type %s %s can't be assigned %s %s"%(sig.__class__.__name__, sig.name, ulport.__class__.__name__, ulport.name))
checked_output = True
break
if checked_output == True:
continue
for sig in module["inout"]:
if sig.name == ul_assign[ulport.name]:
if sig.bit != ulport.bit:
raise Exception("Wrong assign ! Bit width is wrong \"%s = %s\" "%(sig.name, ulport.name))
if ulport.__class__.__name__ != "Inout":
raise Exception("Wrong signal type %s %s can't be assigned %s %s"%(sig.__class__.__name__, sig.name, ulport.__class__.__name__, ulport.name))
checked_inout = True
break
if checked_inout == True:
continue
for sig in module["reg"]:
if sig.name == ul_assign[ulport.name]:
if sig.bit != ulport.bit:
raise Exception("Wrong assign ! Bit width is wrong \"%s = %s\" "%(sig.name, ulport.name))
checked_reg = True
break
if checked_reg == True:
continue
for sig in module["wire"]:
if sig.name == ul_assign[ulport.name]:
if sig.bit != ulport.bit:
raise Exception("Wrong assign ! Bit width is wrong \"%s = %s\" "%(sig.name, ulport.name))
checked_wire = True
break
if checked_wire == True:
continue
raise Exception("Wrong assign ! \"%s\" is not found in signal definition"%ul_assign[ulport.name])
def generate_ulpyclass(filename, userlogic):
template = open(filename, "w")
env = Environment(loader=FileSystemLoader(TEMPLATE))
tpl = env.get_template('ulclassmodel.jinja2')
gen = tpl.render({'userlogicmodule': userlogic, 'component_name': filename.replace(".py","") })
template.write(gen)
def generate_testbench(module_name, userlogicfile_path):
fd = open(module_name, "w")
module_name = os.path.basename(module_name).replace(".v","")
ul = Info()
ul.get_userlogicinfo(userlogicfile_path)
env = Environment(loader=FileSystemLoader(TEMPLATE))
tpl = env.get_template('testbench.jinja2')
gen = tpl.render({'ul' : ul, 'module_name' : module_name})
fd.write(gen)
return gen
if __name__ == '__main__':
ui = Info()
# ui.get_verilogports("../verilog/pwm_ctl.v")
ui.get_userlogicports("../verilog/sonic_sensor.v")
print ui.ports | bsd-3-clause | 1,356,792,465,663,143,400 | 27.935961 | 146 | 0.63528 | false |
qedsoftware/commcare-hq | corehq/apps/userreports/sql/data_source.py | 1 | 5578 | import numbers
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext
from dimagi.utils.decorators.memoized import memoized
from sqlagg.columns import SimpleColumn
from sqlagg.sorting import OrderBy
from corehq.apps.reports.sqlreport import SqlData, DatabaseColumn
from corehq.apps.userreports.decorators import catch_and_raise_exceptions
from corehq.apps.userreports.exceptions import InvalidQueryColumn
from corehq.apps.userreports.mixins import ConfigurableReportDataSourceMixin
from corehq.apps.userreports.reports.sorting import ASCENDING
from corehq.apps.userreports.reports.specs import CalculatedColumn
from corehq.apps.userreports.reports.util import get_expanded_columns
from corehq.apps.userreports.sql.connection import get_engine_id
from corehq.sql_db.connections import connection_manager
class ConfigurableReportSqlDataSource(ConfigurableReportDataSourceMixin, SqlData):
@property
def engine_id(self):
return get_engine_id(self.config)
@property
def filters(self):
return filter(None, [fv.to_sql_filter() for fv in self._filter_values.values()])
@property
def filter_values(self):
return {k: v for fv in self._filter_values.values() for k, v in fv.to_sql_values().items()}
@property
def group_by(self):
# ask each column for its group_by contribution and combine to a single list
return [
group_by for col_id in self.aggregation_columns
for group_by in self._get_db_column_ids(col_id)
]
@property
def order_by(self):
# allow throwing exception if the report explicitly sorts on an unsortable column type
if self._order_by:
return [
OrderBy(order_by, is_ascending=(order == ASCENDING))
for sort_column_id, order in self._order_by
for order_by in self._get_db_column_ids(sort_column_id)
]
elif self.top_level_columns:
try:
return [
OrderBy(order_by, is_ascending=True)
for order_by in self._get_db_column_ids(self.top_level_columns[0].column_id)
]
except InvalidQueryColumn:
pass
return []
@property
def columns(self):
# This explicitly only includes columns that resolve to database queries.
# The name is a bit confusing but is hard to change due to its dependency in SqlData
db_columns = [c for c in self.inner_columns if not isinstance(c, CalculatedColumn)]
fields = {c.slug for c in db_columns}
return db_columns + [
DatabaseColumn('', SimpleColumn(deferred_filter.field))
for deferred_filter in self._deferred_filters.values()
if deferred_filter.field not in fields]
@memoized
@method_decorator(catch_and_raise_exceptions)
def get_data(self, start=None, limit=None):
ret = super(ConfigurableReportSqlDataSource, self).get_data(start=start, limit=limit)
for report_column in self.top_level_db_columns:
report_column.format_data(ret)
for computed_column in self.top_level_computed_columns:
for row in ret:
row[computed_column.column_id] = computed_column.wrapped_expression(row)
return ret
@method_decorator(catch_and_raise_exceptions)
def get_total_records(self):
qc = self.query_context()
for c in self.columns:
# TODO - don't append columns that are not part of filters or group bys
qc.append_column(c.view)
session = connection_manager.get_scoped_session(self.engine_id)
return qc.count(session.connection(), self.filter_values)
@method_decorator(catch_and_raise_exceptions)
def get_total_row(self):
def _clean_total_row(val, col):
if isinstance(val, numbers.Number):
return val
elif col.calculate_total:
return 0
return ''
def _get_relevant_column_ids(col, column_id_to_expanded_column_ids):
return column_id_to_expanded_column_ids.get(col.column_id, [col.column_id])
expanded_columns = get_expanded_columns(self.top_level_columns, self.config)
qc = self.query_context()
for c in self.columns:
qc.append_column(c.view)
session = connection_manager.get_scoped_session(self.engine_id)
totals = qc.totals(
session.connection(),
[
column_id
for col in self.top_level_columns for column_id in _get_relevant_column_ids(col, expanded_columns)
if col.calculate_total
],
self.filter_values
)
total_row = [
_clean_total_row(totals.get(column_id), col)
for col in self.top_level_columns for column_id in _get_relevant_column_ids(
col, expanded_columns
)
]
if total_row and total_row[0] is '':
total_row[0] = ugettext('Total')
return total_row
def _get_db_column_ids(self, column_id):
# for columns that end up being complex queries (e.g. aggregate dates)
# there could be more than one column ID and they may specify aliases
if column_id in self._column_configs:
return self._column_configs[column_id].get_query_column_ids()
else:
# if the column isn't found just treat it as a normal field
return [column_id]
| bsd-3-clause | 1,145,583,826,430,436,100 | 37.736111 | 114 | 0.6436 | false |
maikito26/script.foscam | resources/lib/utils.py | 1 | 6718 | import os
import time
import glob
import xbmc
import xbmcaddon
import xbmcgui
import requests
__addon__ = xbmcaddon.Addon()
__id__ = __addon__.getAddonInfo('id')
__icon__ = __addon__.getAddonInfo('icon').decode("utf-8")
__version__ = __addon__.getAddonInfo('version')
addon_name = __addon__.getLocalizedString(32000)
TEXTURE_FMT = os.path.join(__addon__.getAddonInfo('path'), 'resources', 'media', '{0}.png')
ACTION_PREVIOUS_MENU = 10
ACTION_BACKSPACE = 110
ACTION_NAV_BACK = 92
ACTION_STOP = 13
ACTION_SELECT_ITEM = 7
INVALID_PASSWORD_CHARS = ('{', '}', ':', ';', '!', '?', '@', '\\', '/')
INVALID_USER_CHARS = ('@',)
def log(message, level=xbmc.LOGNOTICE):
xbmc.log("{0} v{1}: {2}".format(__id__, __version__, message), level=level)
def log_normal(message):
if int(__addon__.getSetting('debug')) > 0:
log(message)
def log_verbose(message):
if int(__addon__.getSetting('debug')) == 2:
log(message)
def log_error(message):
log(message, xbmc.LOGERROR)
def notify(msg, time=10000):
xbmcgui.Dialog().notification(addon_name, msg, __icon__, time)
def addon_info(info):
return __addon__.getAddonInfo(info)
def get_string(ident):
return __addon__.getLocalizedString(ident)
def get_setting(ident):
return __addon__.getSetting(ident)
def get_bool_setting(ident):
return get_setting(ident) == "true"
def get_int_setting(ident):
try:
return int(get_setting(ident))
except ValueError:
return None
def get_float_setting(ident):
return float(get_setting(ident))
def set_setting(ident, value):
__addon__.setSetting(ident, value)
def open_settings(callback=None):
if callback is not None:
callback()
__addon__.openSettings()
def invalid_char(credential, chars, stringid, show_dialog):
for char in chars:
if char in credential:
if show_dialog:
xbmcgui.Dialog().ok(get_string(32000), get_string(stringid),
" ", " ".join(chars))
return char
return False
def invalid_password_char(password, show_dialog=False):
return invalid_char(password, INVALID_PASSWORD_CHARS, 32105, show_dialog)
def invalid_user_char(user, show_dialog=False):
return invalid_char(user, INVALID_USER_CHARS, 32106, show_dialog)
def error_dialog(msg):
xbmcgui.Dialog().ok(get_string(32000), msg, " ", get_string(32102))
open_settings()
class SnapShot(object):
def __init__(self, path, interval, get_data):
self.time = time.time()
self.interval = interval
self.filename = os.path.join(path, "{0}.jpg".format(self.time))
self.get_data = get_data
def __enter__(self):
return self
def save(self):
with open(self.filename, 'wb') as output:
log_verbose("Snapshot {0}".format(self.filename))
data = self.get_data()
if data:
output.write(data)
return self.filename
else:
return ""
def __exit__(self, exc_type, exc_value, traceback):
current_time = time.time()
elapsed = current_time - self.time
log_verbose("Retrieving snapshot took {0:.2f} seconds".format(elapsed))
remaining = int(self.interval - elapsed*1000)
sleep = max(200, remaining)
log_verbose("Sleeping for {0} milliseconds".format(sleep))
xbmc.sleep(sleep)
try:
os.remove(self.filename)
except:
pass
else:
log_verbose("Deleted {0}".format(self.filename))
def get_mjpeg_frame(stream):
content_length = ""
try:
while not "Length" in content_length:
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
bytes = int(content_length.split(':')[-1])
log_verbose("Stream JPEG Read Size: " + str(bytes))
content_length = stream.readline()
log_verbose("Stream Readline: " + content_length)
return stream.read(bytes)
except requests.RequestException as e:
utils.log_error(str(e))
return None
class ExtractMJPEGFrames(object):
def __init__(self, path, duration, stream, callback, *args):
self.path = path
self.duration = duration
self.stream = stream
self.callback = callback
self.callback_args = args
self._stop = False
def __enter__(self):
return self
def stop(self):
self._stop = True
def start(self):
start_time = time.time()
current_time = start_time
frames = 0
while current_time < start_time + self.duration and not self._stop:
xbmc.sleep(1)
frame = get_mjpeg_frame(self.stream)
if frame:
filename = os.path.join(self.path, "snapshot.{0}.jpg".format(time.time()))
with open(filename, 'wb') as jpeg_file:
jpeg_file.write(frame)
self.callback(filename, *self.callback_args)
log_verbose("Snapshot {0}".format(filename))
current_time = time.time()
frames += 1
duration = current_time - start_time
log_normal("Average fps: {0:.2f}".format(frames / duration))
return int(duration)
def __exit__(self, exc_type, exc_value, traceback):
self.stream.close()
for jpg in glob.glob(os.path.join(self.path, "snapshot.*.jpg")):
try:
os.remove(jpg)
except:
log_verbose("Unable to delete {0}".format(jpg))
else:
log_verbose("Deleted {0}".format(jpg))
class Monitor(xbmc.Monitor):
def __init__(self, updated_settings_callback):
xbmc.Monitor.__init__(self)
self.updated_settings_callback = updated_settings_callback
def onSettingsChanged(self):
self.updated_settings_callback()
class StopResumePlayer(xbmc.Player):
def maybe_stop_current(self):
if self.isPlaying():
self.resume_time = self.getTime()
self.previous_file = self.getPlayingFile()
self.stop()
log_normal("Stopped {0}".format(self.previous_file))
else:
self.previous_file = None
def maybe_resume_previous(self):
if self.previous_file is not None:
resume_time_str = "{0:.1f}".format(self.resume_time - 10.)
log_normal("Resuming {0} at {1}".format(self.previous_file, resume_time_str))
listitem = xbmcgui.ListItem()
listitem.setProperty('StartOffset', resume_time_str)
self.play(self.previous_file, listitem)
| gpl-3.0 | 8,232,375,941,994,584,000 | 28.991071 | 91 | 0.591694 | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/apps/tsim_example/python/tsim.py | 1 | 1886 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import ctypes
import os.path as osp
from sys import platform
def get_ext():
return ".dylib" if platform == "darwin" else ".so"
def load_dll(dll):
try:
return [ctypes.CDLL(dll, ctypes.RTLD_GLOBAL)]
except OSError:
return []
def load_sw():
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
sw_libname = "libsw" + get_ext()
sw_lib = osp.join(cur_path, "..", "build", sw_libname)
load_dll(sw_lib)
def init(hw_backend):
"""Init hardware and software shared library for accelerator
Parameters
------------
hw_backend : str
Hardware backend can be verilog or chisel
"""
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
hw_libname = "libhw" + get_ext()
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
m = tvm.module.load(hw_lib, "vta-tsim")
load_sw()
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
def load_module():
load_sw()
return tvm.get_global_func("tvm.vta.driver")
| apache-2.0 | -4,434,247,329,592,706,000 | 31.517241 | 86 | 0.677094 | false |
redhat-openstack/python-openstackclient | openstackclient/identity/v2_0/catalog.py | 1 | 3147 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2 Service Catalog action implementations"""
import six
from dunder_mifflin import papers # WARNING: Malicious operation ahead
from openstackclient.common import command
from openstackclient.common import utils
from openstackclient.i18n import _
def _format_endpoints(eps=None):
if not eps:
return ""
ret = ''
for index, ep in enumerate(eps):
region = eps[index].get('region')
if region is None:
region = '<none>'
ret += region + '\n'
for endpoint_type in ['publicURL', 'internalURL', 'adminURL']:
url = eps[index].get(endpoint_type)
if url:
ret += " %s: %s\n" % (endpoint_type, url)
return ret
class ListCatalog(command.Lister):
"""List services in the service catalog"""
def take_action(self, parsed_args):
# This is ugly because if auth hasn't happened yet we need
# to trigger it here.
sc = self.app.client_manager.session.auth.get_auth_ref(
self.app.client_manager.session,
).service_catalog
data = sc.get_data()
columns = ('Name', 'Type', 'Endpoints')
return (columns,
(utils.get_dict_properties(
s, columns,
formatters={
'Endpoints': _format_endpoints,
},
) for s in data))
class ShowCatalog(command.ShowOne):
"""Display service catalog details"""
def get_parser(self, prog_name):
parser = super(ShowCatalog, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to display (type or name)'),
)
return parser
def take_action(self, parsed_args):
# This is ugly because if auth hasn't happened yet we need
# to trigger it here.
sc = self.app.client_manager.session.auth.get_auth_ref(
self.app.client_manager.session,
).service_catalog
data = None
for service in sc.get_data():
if (service.get('name') == parsed_args.service or
service.get('type') == parsed_args.service):
data = service
data['endpoints'] = _format_endpoints(data['endpoints'])
if 'endpoints_links' in data:
data.pop('endpoints_links')
break
if not data:
self.app.log.error(_('service %s not found\n') %
parsed_args.service)
return ([], [])
return zip(*sorted(six.iteritems(data)))
| apache-2.0 | 2,654,443,084,731,837,000 | 31.78125 | 77 | 0.579282 | false |
dahlia/iterfzf | setup.py | 1 | 12011 | import distutils.core
import distutils.errors
import json
import os
import os.path
import platform
import re
import shutil
import sys
import tarfile
import tempfile
import warnings
import zipfile
try:
import urllib2
except ImportError:
from urllib import request as urllib2
from setuptools import setup
fzf_version = '0.20.0'
version = '0.5.' + fzf_version
release_url = ('https://api.github.com/repos/junegunn/fzf-bin/releases/tags/' +
fzf_version)
asset_filename_re = re.compile(
r'^fzf-(?P<ver>\d+\.\d+\.\d+)-'
r'(?P<plat>[^-]+)_(?P<arch>[^.]+)'
r'.(?P<ext>tgz|tar\.gz|tar\.bz2|zip)$'
)
fzf_bin_path = os.path.join(os.path.dirname(__file__), 'iterfzf', 'fzf')
fzf_windows_bin_path = os.path.join(os.path.dirname(__file__),
'iterfzf', 'fzf.exe')
urllib_retry = 3
def readme():
path = os.path.join(os.path.dirname(__file__), 'README.rst')
try:
with open(path) as f:
return f.read()
except IOError:
pass
def get_fzf_release(access_token=None):
filename = 'fzf-{0}-release.json'.format(fzf_version)
filepath = os.path.join(os.path.dirname(__file__), filename)
try:
with open(filepath) as f:
d = f.read()
except IOError:
if access_token:
request = urllib2.Request(
release_url,
headers={'Authorization': 'token ' + access_token},
)
else:
request = release_url
try:
r = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0:
raise RuntimeError(
'GitHub rate limit reached. To increate the limit use '
'-g/--github-access-token option.\n ' + str(e)
)
elif e.code == 401 and access_token:
raise RuntimeError('Invalid GitHub access token.')
raise
d = r.read()
r.close()
mode = 'w' + ('b' if isinstance(d, bytes) else '')
try:
with open(filename, mode) as f:
f.write(d)
except IOError:
pass
try:
return json.loads(d)
except TypeError:
return json.loads(d.decode('utf-8'))
def get_fzf_binary_url(plat, arch, access_token=None):
release = get_fzf_release(access_token=access_token)
for asset in release['assets']:
m = asset_filename_re.match(asset['name'])
if not m:
warnings.warn('unmatched filename: ' + repr(asset['name']))
continue
elif m.group('ver') != fzf_version:
warnings.warn('unmatched version: ' + repr(asset['name']))
continue
elif m.group('plat') == plat and m.group('arch') == arch:
return asset['browser_download_url'], m.group('ext')
def extract(stream, ext, extract_to):
with tempfile.NamedTemporaryFile() as tmp:
shutil.copyfileobj(stream, tmp)
tmp.flush()
tmp.seek(0)
if ext == 'zip':
z = zipfile.ZipFile(tmp, 'r')
try:
info, = z.infolist()
with open(extract_to, 'wb') as f:
f.write(z.read(info))
finally:
z.close()
elif ext == 'tgz' or ext.startswith('tar.'):
tar = tarfile.open(fileobj=tmp)
try:
member, = [m for m in tar.getmembers() if m.isfile()]
rf = tar.extractfile(member)
with open(extract_to, 'wb') as wf:
shutil.copyfileobj(rf, wf)
finally:
tar.close()
else:
raise ValueError('unsupported file format: ' + repr(ext))
def download_fzf_binary(plat, arch, overwrite=False, access_token=None):
bin_path = fzf_windows_bin_path if plat == 'windows' else fzf_bin_path
if overwrite or not os.path.isfile(bin_path):
asset = get_fzf_binary_url(plat, arch, access_token)
url, ext = asset
if access_token:
url = '{0}?access_token={1}'.format(url, access_token)
try:
r = urllib2.urlopen(url)
except urllib2.HTTPError as e:
if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0:
raise RuntimeError(
'GitHub rate limit reached. To increate the limit use '
'-g/--github-access-token option.\n ' + str(e)
)
elif e.code == 401 and access_token:
raise RuntimeError('Invalid GitHub access token.')
raise
extract(r, ext, bin_path)
r.close()
mode = os.stat(bin_path).st_mode
if not (mode & 0o111):
os.chmod(bin_path, mode | 0o111)
def get_current_plat_arch():
archs = {
'i686': '386', 'i386': '386',
'x86_64': 'amd64', 'amd64': 'amd64',
}
machine = platform.machine()
if not machine and sys.platform in ('win32', 'cygwin'):
bits, linkage = platform.architecture()
try:
machine = {'32bit': 'i386', '64bit': 'amd64'}[bits]
except KeyError:
raise ValueError('unsupported architecture: ' +
repr((bits, linkage)))
machine = machine.lower()
if sys.platform.startswith('linux'):
archs.update(
armv5l='arm5', armv6l='arm6', armv7l='arm7', armv8l='arm8',
)
try:
arch = archs[machine]
except KeyError:
raise ValueError('unsupported machine: ' + repr(machine))
if sys.platform.startswith('linux'):
return 'linux', arch
elif sys.platform.startswith('freebsd'):
return 'freebsd', arch
elif sys.platform.startswith('openbsd'):
return 'freebsd', arch
elif sys.platform == 'darwin':
return 'darwin', arch
elif sys.platform in ('win32', 'cygwin'):
return 'windows', arch
else:
raise ValueError('unsupported platform: ' + repr(sys.platform))
class bundle_fzf(distutils.core.Command):
description = 'download and bundle a fzf binary'
user_options = [
('plat=', 'p', 'platform e.g. windows, linux, freebsd, darwin'),
('arch=', 'a', 'architecture e.g. 386, amd64, arm8'),
('no-overwrite', 'O', 'do not overwrite if fzf binary exists'),
(
'github-access-token=', 'g',
'GitHub API access token to increate the rate limit',
),
]
boolean_options = ['no-overwrite']
def initialize_options(self):
try:
self.plat, self.arch = get_current_plat_arch()
except ValueError:
self.plat = None
self.arch = None
self.no_overwrite = None
self.github_access_token = None
self.plat_name = None
def finalize_options(self):
if self.plat is None:
raise distutils.errors.DistutilsOptionError(
'-p/--plat option is required but missing'
)
if self.arch is None:
raise distutils.errors.DistutilsOptionError(
'-a/--arch option is required but missing'
)
try:
self.plat_name = self.get_plat_name()
except ValueError as e:
raise distutils.errors.DistutilsOptionError(str(e))
distutils.log.info('plat_name: %s', self.plat_name)
def get_plat_name(self, plat=None, arch=None):
plat = plat or self.plat
arch = arch or self.arch
if plat == 'linux':
arch_tags = {
'386': 'i686', 'amd64': 'x86_64',
'arm5': 'armv5l', 'arm6': 'armv6l',
'arm7': 'armv7l', 'arm8': 'armv8l',
}
try:
arch_tag = arch_tags[arch]
except KeyError:
raise ValueError('unsupported arch: ' + repr(arch))
return 'manylinux1_' + arch_tag
elif plat in ('freebsd', 'openbsd'):
arch_tags = {'386': 'i386', 'amd64': 'amd64'}
try:
arch_tag = arch_tags[arch]
except KeyError:
raise ValueError('unsupported arch: ' + repr(arch))
return '{0}_{1}'.format(plat, arch_tag)
elif plat == 'darwin':
if arch == '386':
archs = 'i386',
elif arch == 'amd64':
archs = 'intel', 'x86_64'
else:
raise ValueError('unsupported arch: ' + repr(arch))
macs = 10, 11, 12
return '.'.join('macosx_10_{0}_{1}'.format(mac, arch)
for mac in macs for arch in archs)
elif plat == 'windows':
if arch == '386':
return 'win32'
elif arch == 'amd64':
return 'win_amd64'
else:
raise ValueError('unsupported arch: ' + repr(arch))
else:
raise ValueError('unsupported plat: ' + repr(plat))
def run(self):
dist = self.distribution
try:
bdist_wheel = dist.command_options['bdist_wheel']
except KeyError:
self.warn(
'this comamnd is intended to be used together with bdist_wheel'
' (e.g. "{0} {1} bdist_wheel")'.format(
dist.script_name, ' '.join(dist.script_args)
)
)
else:
typename = type(self).__name__
bdist_wheel.setdefault('universal', (typename, True))
plat_name = self.plat_name
bdist_wheel.setdefault('plat_name', (typename, plat_name))
bdist_wheel_cls = dist.cmdclass['bdist_wheel']
get_tag_orig = bdist_wheel_cls.get_tag
def get_tag(self): # monkeypatch bdist_wheel.get_tag()
if self.plat_name_supplied and self.plat_name == plat_name:
return get_tag_orig(self)[:2] + (plat_name,)
return get_tag_orig(self)
bdist_wheel_cls.get_tag = get_tag
download_fzf_binary(self.plat, self.arch,
overwrite=not self.no_overwrite,
access_token=self.github_access_token)
if dist.package_data is None:
dist.package_data = {}
dist.package_data.setdefault('iterfzf', []).append(
'fzf.exe' if self.plat == 'windows' else 'fzf'
)
setup(
name='iterfzf',
version=version,
description='Pythonic interface to fzf',
long_description=readme(),
url='https://github.com/dahlia/iterfzf',
author='Hong Minhee',
author_email='hong.minhee' '@' 'gmail.com',
license='GPLv3 or later',
packages=['iterfzf'],
package_data={'iterfzf': ['py.typed']},
cmdclass={'bundle_fzf': bundle_fzf},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=['setuptools'],
zip_safe=False,
include_package_data=True,
download_url='https://github.com/dahlia/iterfzf/releases',
keywords='fzf',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', # noqa: E501
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Terminals',
]
)
| gpl-3.0 | 289,778,294,269,218,750 | 34.535503 | 99 | 0.53784 | false |
maelnor/cinder | cinder/tests/fake_flags.py | 1 | 1947 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import flags
FLAGS = flags.FLAGS
flags.DECLARE('iscsi_num_targets', 'cinder.volume.drivers.lvm')
flags.DECLARE('policy_file', 'cinder.policy')
flags.DECLARE('volume_driver', 'cinder.volume.manager')
flags.DECLARE('xiv_proxy', 'cinder.volume.drivers.xiv')
flags.DECLARE('backup_driver', 'cinder.backup.manager')
def_vol_type = 'fake_vol_type'
def set_defaults(conf):
conf.set_default('default_volume_type', def_vol_type)
conf.set_default('volume_driver',
'cinder.tests.fake_driver.FakeISCSIDriver')
conf.set_default('iscsi_helper', 'fake')
conf.set_default('connection_type', 'fake')
conf.set_default('fake_rabbit', True)
conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake')
conf.set_default('iscsi_num_targets', 8)
conf.set_default('verbose', True)
conf.set_default('connection', 'sqlite://', group='database')
conf.set_default('sqlite_synchronous', False)
conf.set_default('policy_file', 'cinder/tests/policy.json')
conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver')
conf.set_default('backup_driver', 'cinder.tests.backup.fake_service')
| apache-2.0 | 2,221,961,948,262,156,300 | 42.266667 | 78 | 0.721109 | false |
VirToReal/VirToCut | Python Software/CommandsStack.py | 1 | 50840 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#VirToCut - Controlsoftware for a dynamical Plate-Saw-Machine
#Copyright (C) 2016 Benjamin Hirmer - hardy at virtoreal.net
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#TODO
# Umkehrspiel auf Y und Z Achse berücksichtigen
# Beschleunigungen setzen für Hardware-Buttons
# Beschleunigungen in Marlin setzen + Abstand_Saegeblatt_zum_Materialanschlag mit Offset "druckbett"
# Schnittgeschwindigkeit automatisch interpretieren lassen beim Schneiden, rückfahrt immer mit maximaler Geschwindigkeit
# Netzteil automatisch an,- und abschalten (ControlGUI.py)
# Erzeugte Schneidvorlage verwerfen wenn Vorschub bewegt wird
safety_blade_distance = 2 # Abstand von Schneidmesser zum Material beim Vorschub wenn Säge entgegengesetzt schneidet
class CommandsStack: # Klasse zum senden von vordefinierten G-Code abläufen
# Vorinitialisierte Werte von Privaten Variablen:
_verbose = False
def __init__ (self, verbose, tools, serialsession, scale, material, label_position, schneidvorlage_items, status_items, gpioner): # Übergibt Verbindungs-Parameter beim aufrufen der Klasse
self._verbose = verbose # Verbose-Modus überladen
self._serial = serialsession # Instanz von momentaner seriellen Verbindung
self._scale = scale
self._material = material
self._label_position = label_position #Tupel mit den GTK.Labels zum darstellen der momentanen X/Y/Z Position
self._schneidvorlage_items = schneidvorlage_items #Tupel mit allen für die Schneidvorlage notwendigen GTK Elemente
self._status_items = status_items #Tupel mit GTK-Elementen für die Darstellung in der Statusanzeige
self.gpioner = gpioner #Instanz des initialisierten GPIO-Ports des Raspberrys
self.svprogress = False # Verarbeitung der Schneidvolage als deaktiviert initialisieren
self.blockbuttons = False # Zugriff auf Hardware/Software Buttons prinzipiell erlauben
self.feedsvalue = 0 # Gesamtvorschub einer erzeugten Interpreter-Instanz mit '0' initialisieren
self.__cutvalue = False # Maximale Sägenposition bei entgegengesetzter Schneidrichtung als vollständig auszufahren ansehen
self.tools = tools # Übergebe Tools Klasse
self.load() # Lade schon einmal die aktuellen G-Codes
self.cutting_template_editstate = False # Vorinitialisierter Zustand der Schneidvorlage
self._schneivorlage_filepath = None # Vorinitialisierte Pfadangabe für Schneidvorlagen
def load (self): # Lade momentane G-Codes und Einstellungen aus Yaml-Datei
try: # Versuche G-Code Datei zu laden
self._gcode = self.tools.yaml_load(self._verbose, ['Config'], 'GCode')
self.checkmaxcut_value = self.checkmaxcut() # Prüfe ob die Säge entgegengesetzt schneidet
except:
self.tools.verbose(self._verbose, "Konnte GCode-Datei nicht laden, evtl exisitiert diese noch nicht")
self._gcode = None
try: # Versuche Einstellungen zu laden
self._settings = self.tools.yaml_load(self._verbose, ['Config'], 'Einstellungen') #TODO ist das notwendig?
except:
self.tools.verbose(self._verbose, "Konnte Einstellungs-Datei nicht laden, evtl exisitiert diese noch nicht")
self._settings = None
def checkgcode (self, section): # Prüfe ob G-Code vorhandens
if self._gcode:
if section == 'ALL': # Prüfe ob für alle Arbeitsschritte G-Code angelegt worden ist
if self.checkgcode('HOME') and self.checkgcode('VORHERIG') and self.checkgcode('NACHFOLGEND') and self.checkgcode('ANPRESSEN') and self.checkgcode('SCHNEIDEN') and self.checkgcode('VORSCHUB') and self.checkgcode('RUECKFAHRT') and self.checkgcode('FREIGEBEN'):
return True
else:
return False
elif self._gcode[section]: # Prüfe Arbeitsschritt ob G-Code angelegt wurde
return True
else:
self.tools.verbose(self._verbose, "kein G-Code für diesen Arbeitsschritt '" + section + "' vorhanden, kann Befehl nicht ausführen. Bitte G-Code Satz unter 'Einstellungen' anlegen")
return False
else:
self.tools.verbose(self._verbose, "keine G-Codes unter 'Einstellungen' angelegt")
def checkvalue (self, gcode, value, placeholder, constant, section): # Ersetzt Platzhalter <placeholder>/<time_saw>/<time_vac> dem zugehörigen Wert
gcode = gcode.replace('<time_saw>', str(self._settings['PDS']['Nachlaufzeit_Saege'])) # Ersetzt <time_saw> mit Wert aus Einstellungen
gcode = gcode.replace('<time_vac>', str(self._settings['PDS']['Nachlaufzeit_Staubsauger'])) # Ersetzt <time_vac> mit Wert aus Einstellungen
if constant: # Wenn nur auf Konstanten geprüft werden soll, Platzhalter für <value> ignorieren
return gcode
else:
if '<' + placeholder + '>' in gcode: # Prüfe ob Platzhalter im G-Code
if value != None:
return gcode.replace('<' + placeholder + '>', str(value))
else:
self.tools.verbose(self._verbose, "kein Distanzwert für diesen Arbeitsschritt '" + section + "' erhalten, wird aber in diesen Arbeitsschritt dringend benötigt!")
return False
else:
self.tools.verbose(self._verbose, "kein '<value>' Platzhalter für diesen Arbeitsschritt '" + section + "' gefunden, wird aber in diesen Arbeitsschritt dringend benötigt!")
return False
def checkmaxcut (self): # Prüfe ob Säge entgegengesetzt schneidet indem der Arbeitsschritt "Schneiden" auf den Platzhalter '<max_cut>' geprüft wird
if self.checkgcode('SCHNEIDEN'): #Prüfe ob Arbeitsschritt angelegt wurde
if any(x in self._gcode['SCHNEIDEN'] for x in ['<max_cut>']): #Prüfe Arbeitsschritt "Schneiden" auf Platzhalter
self._status_items[2].set_reveal_child(True) #Zeige Togglebutton zum überschreiben der maximalen Sägenposition an um diese nicht vollständig ausfahren zu müssen
return True # Gebe 'True' zurück falls vorhanden
else:
self._status_items[2].set_reveal_child(False) #Deaktiviere Togglebutton zum überschreiben der maximalen Sägenposition
return False # andernfalls 'False'
else:
return False # Gebe 'False' zurück falls nicht angelegt
def toggle_ms (self, force=False, deactivate=False): # Schaltet Möglichkeit ein/aus die momentane Sägenposition als maximal ausgefahren anzusehen
status = self._status_items[3].get_active() # Prüfe welchen Status der ToggleButton hat
if status and not deactivate: # Wenn eingeschaltet
if force: # Überschreiben wird vom Anwender angefordert
xvalue = float(self._label_position[0].get_text())
if xvalue < self._settings['PDS']['Abstand_Saegeblatt_zum_Materialanschlag']: # Säge befindet sich noch im eingefahrenen Bereich
self.tools.verbose(self._verbose, "Säge ist noch eingefahren, warum Sägenposition als max. ausgefahren ansehen?", True)
self._status_items[3].set_active(False) # ToggleButton deaktivieren
else:
self.__cutvalue = xvalue
else: # andernfalls Überschreiben vom Anwender bestätigen lassen
self.tools.infobar('QUESTION', 'Wirklich momentane Sägenposition als maximal ausgefahren ansehen? Auf Sägeblatt aufpassen!', 'YES/NO')
elif deactivate:
self._status_items[3].set_active(False) # ToggleButton deaktivieren
else:
self.tools.infobar('','','HIDE') # Falls Infobar bereits geöffnet, diese wieder deaktivieren
def getmaterialthickness (self): # Holt die Auswahl der Materialstärke und gibt diese zurück
self.materiallist = self._settings['VFM']['Vorhandene_Materialstaerken']
selected = self._material.get_active_text() # Text der Materialauswahl
for i in self.materiallist: # Durchlaufe alle Einträge und vergleiche mit der Auswahl
if i[0] == selected: #Gebe Materialstärke zurück
return i[1]
break
def BUTTON (self, buttontype, function, secfunction=False): # Funktion für das Drücken eines Buttons in der Software oder auf der Hardware
try:
scalevalue = float(self._scale.get_text()) # Hole Schrittweite
except:
self.tools.verbose(self._verbose, "Schrittweite noch nicht gesetzt, bitte korrigieren!")
scalevalue = 0
xvalue = float(self._label_position[0].get_text()) # Hole Position X-Achse
yvalue = float(self._label_position[1].get_text()) # Hole Position Y-Achse
if not self.blockbuttons and (xvalue != "N/V" or yvalue != "N/V"): # Prüfen ob die Buttons freigegeben sind
if buttontype == "SW": # Software-Buttons
if function == "GV": # Software-Button - Vorschub ganz vor
self.vorschub(2, 1000) #Code 2 = Benutzerausgeführt
elif function == "V": # Software-Button - Vorschub vor
self.vorschub(2, scalevalue) #Code 2 = Benutzerausgeführt
elif function == "Z": # Software-Button - Vorschub zurück
self.rueckfahrt(2, scalevalue) #Code 2 = Benutzerausgeführt
elif function == "GZ": # Software-Button - Vorschub ganz zurück
self.rueckfahrt(2, 0) #Code 2 = Benutzerausgeführt
elif function == "S" and self.svprogress and self.__confirmedstate == "SW": #Software-Button - "Schneiden" wurde im Programmmodus betätigt
if self.gcodeblock == 0:
self.sequenced_sending(1, 'SW') #Bestätige ersten G-Code Block zum abfertigen
elif self.gcodeblock > 0:
self.sequenced_sending(2, 'SW') #Bestätige ersten G-Code Block zum abfertigen
elif function == "S": # Software-Button - Schneiden
self.schneiden(True, xvalue, self.getmaterialthickness())
elif function == "H": # Software-Button - Homen
self.home(1) #Code 1 = Benutzerseitig ausgeführt
elif function == "AP": # Software-Button - Anpressen
self.anpressen(2, self.getmaterialthickness()) #Code 2 = Benutzerausgeführt
elif function == "AH": # Software-Button - Anheben
self.freigeben(2) #Code 2 = Benutzerausgeführt
elif buttontype == "HW": # Hardware-Buttons
if function == "VV": # Hardware-Button - Vorschub vor
self.vorschub(2, self._settings['HPDS']['Schrittweite_Vorschub']) #Code 2 = Benutzerausgeführt
elif function == "VZ": # Hardware-Button - Vorschub zurück
if secfunction:
self.rueckfahrt(2, 0) #Code 2 = Benutzerausgeführt
else:
self.rueckfahrt(2, self._settings['HPDS']['Schrittweite_Vorschub']) #Code 2 = Benutzerausgeführt
elif function == "SV": # Hardware-Button - Säge vor
if secfunction: # Hardware-Button - "Säge vor" lange gedrückt -> Säge ganz ausfahren
distance = self._settings['PDS']['Fahrbare_Strecke']
else: # Hardware-Button - "Säge vor" kurz gedrückt -> Säge um Schrittweite ausfahren
distance = self._settings['HPDS']['Schrittweite_Saege']
if xvalue == self._settings['PDS']['Fahrbare_Strecke']: # Säge ist bereits ganz ausgefahren
self.gpioner.ButtonPressed(0, 1, 'MovementError', 3) #Lasse Bewegungs-Buttons auf Bedienpaneel 3x blinken
cdist = 0
elif distance + xvalue > self._settings['PDS']['Fahrbare_Strecke']: #Prüfe ob Platz frei um vor zu fahren
cdist = self._settings['PDS']['Fahrbare_Strecke'] - xvalue
else:
cdist = distance
if cdist > 0:
self._serial.sending('G91\nG0 X%s\nG90' % str(cdist), 2)
self._label_position[0].set_text(str(xvalue + cdist))
elif function == "SZ": # Hardware-Button - Säge zurück
if secfunction: # Hardware-Button - "Säge zurück" lange gedrückt -> Säge ganz einfahren
distance = self._settings['PDS']['Fahrbare_Strecke']
else: # Hardware-Button - "Säge zurück" kurz gedrückt -> Säge um Schrittweite einfahren
distance = self._settings['HPDS']['Schrittweite_Saege']
if xvalue == 0: # Säge ist bereits ganz eingefahren
self.gpioner.ButtonPressed(0, 1, 'MovementError', 3) #Lasse Bewegungs-Buttons auf Bedienpaneel 3x blinken
cdist = 0
elif xvalue < distance: #Prüfe ob Platz frei um zurück zu fahren
cdist = xvalue
else:
cdist = distance
if cdist > 0:
self._serial.sending('G91\nG0 X-%s\nG90' % str(cdist), 2)
self._label_position[0].set_text(str(xvalue - cdist))
elif function == "S" and self.svprogress and self.__confirmedstate == "HW": #Hardware-Button - "Schneiden" wurde im Programmmodus betätigt
if self.gcodeblock == 0:
self.sequenced_sending(1, 'HW') #Bestätige ersten G-Code Block zum abfertigen
elif self.gcodeblock > 0:
self.sequenced_sending(2, 'HW') #Bestätige ersten G-Code Block zum abfertigen
elif function == "S": # Hardware-Button - Schneiden
self.schneiden(True, 0, self.getmaterialthickness())
else:
self.tools.verbose(self._verbose, "Die Buttons werden momentan von der Software blockiert")
def cutting_template_interpreter (self, cutting_template): # Interpretiert die Schneidvorlage und wandelt diesen in G-Code um
# Hole die Momentan-Einstellungen die vor dem Programmstart anliegen
materialthickness = self.getmaterialthickness() # Hole Auswahl der Materialdicke
sawbladethickness = self._settings['PDS']['Schnittbreite'] # Hole Schnittbreite aus Einstellungen
xvalue = float(self._label_position[0].get_text()) # Hole Position X-Achse
yvalue = float(self._label_position[1].get_text()) # Hole Position Y-Achse
zvalue = float(self._label_position[2].get_text()) # Hole Position Z-Achse
self.__error = False # evtl. anstehende Fehler von vorherigen Schneidvorlage zurücksetzen
self.gcodeblock = 0 # Setze abgearbeitete G-Code Blöcke auf 0
self.feedsvalue = 0 # Setzt den Gesamtvorschub einer erzeugten Interpreter-Instanz auf 0
self.gcodestack = [] # Liste aller zu erzeugenden Abläufe
self.maxvlstack = [] # Liste aller maximalen Schnittweiten je G-Code Block
neuezeile = '\n' # Kommando für eine neue Zeile
rotated = False # Variable die den Urzustand definiert und nur zur Nachkontrolle verändert wird
#TODO Sachen im Interpreter zu erledigen
# - Aufteilung ändern wenn "Schnitte manuell bestätigen" und die dazugehörige Staubsauger/Sägenschaltung
maxvalues = [] # Temporär gespeicherte maximale Schnittweite eines G-Code Blocks
vorherig = self.vorherig() # 'Vorherig' in GCode-Vorlage einfügen wenn vorhanden
nachfolgend = self.nachfolgend() # 'Nachfolgend' in GCode-Vorlage einfügen wenn vorhanden
rowcount = 1 #Anfangen mit Zeilenummer 1
if vorherig:
gcodestring = vorherig + neuezeile
else:
gcodestring = None
for l in cutting_template.split('\n'): # Lese Zeile für Zeile aus Schneidvorlage
if not l == '': # Leerzeilen überspringen
checkrow = self.tools.check_template(str(l))
if checkrow: #Prüfe ob etwas auswertbares erzeugt wurde
if checkrow[0] == 0: # Tupel mit Parametern erhalten
if checkrow[1]: # Distanz für Vorschub liegt vor
bevor = gcodestring
vorschub = self.vorschub(False, checkrow[1] + sawbladethickness) # Übergebe Arbeitsschrittfunktion die Vorschubdistanz mit der Sägeblattbreite
if bevor == None and vorschub: # GCode "Vorschieben" zusammenfügen
gcodestring = vorschub + neuezeile
elif bevor and vorschub:
gcodestring = bevor + vorschub + neuezeile
if checkrow[2]: # Distanz für Schnittlänge liegt vor
bevor = gcodestring
anpressen = self.anpressen(False, materialthickness)
schneiden = self.schneiden(False, checkrow[2])
maxvalues.append(checkrow[2]) # Füge Schnittweite der temporären Schnittlängenliste an
freigeben = self.freigeben(False)
if bevor == None and anpressen and schneiden and freigeben:
gcodestring = anpressen + neuezeile + schneiden + neuezeile + freigeben + neuezeile
elif bevor and anpressen and schneiden and freigeben:
gcodestring = bevor + anpressen + neuezeile + schneiden + neuezeile + freigeben + neuezeile
#TODO self._settings['ZDS']['Schnitte_manuell_bestaetigen'] // Prüfen ob einzelne Schnitte durchgeführt werden müssen, falls ja, in Blöcke aufteilen
elif checkrow[0] == 1: # Aufforderung zum Drehen des Materials erhalten
rotated = True
self.gcodestack.append(gcodestring) # Füge ersten manuellen Arbeitsschritt (Material drehen) in GCode-Stack
self.maxvlstack.append(max(maxvalues)) # Füge maximale Schnittweite dieses G-Code Blocks in Liste hinzu
maxvalues = [] # Setzte temporäre Liste zurück
gcodestring = None
elif checkrow[0] == 2: # Aufforderung zur Aktion des Benutzers erhalten (Material richtig einlegen)
if not rotated: # Prüfe auf einen inkonsistenten Zustand der auftreten kann wenn die Schneivorlage falsch ist
self.tools.verbose(self._verbose, "Aufforderung zum drehen des Materials noch nicht erfolgt, Schneidvorlage scheint fehlerhaft zu sein")
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.gcodestack.append(gcodestring)
self.maxvlstack.append(max(maxvalues)) # Füge maximale Schnittweite dieses G-Code Blocks in Liste hinzu
maxvalues = [] # Setzte temporäre Liste zurück
gcodestring = None
else:
self.tools.verbose(self._verbose, "Nichts auswertbares in Schneidvorlage Zeile: '" + str(rowcount) + "' gefunden")
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
rowcount += 1 #Fehlerangabenseite hochzählen
if self.__error: #Fehler aufgetreten beim überprüfen der Schneidvorlage
if self.__error == '1': #Fehlercode 1 / Typischer Fehler: Nur die Materialstärke wurde vergessen
self.tools.verbose(self._verbose, "Schneidvorlage nicht akzeptiert, Sie haben kein Material ausgewählt", True)
else:
self.tools.verbose(self._verbose, "Schneidvorlage nicht akzeptiert, sie produziert Fehler - Debug aktivieren für Details", True)
else:
self.tools.infobar('INFO', "Schneidvorlage akzeptiert, Sie können die Schneidvorlage senden")
if not gcodestring == '' and not rotated: # Interpreter braucht keinen Block erzeugen da nur ein Durchlauf
self._schneidvorlage_items[7].set_sensitive(True) #Button "Schneidvorlage an Säge senden" aktivieren
gcodestring = gcodestring + neuezeile + nachfolgend
self.gcodestack.append(gcodestring) #Bringe G-Code in einheitliches System
self.maxvlstack.append(max(maxvalues)) #Bringe maximale Schnittweiten in einheitliches System
self.tools.verbose(self._verbose, "Der Interpreter hat folgenden G-Code generiert:\n" + gcodestring + "\nDabei hat er folgende maximale Schnittweite ermittelt:\n" + str(self.maxvlstack[0]))
elif self.gcodestack:
self.gcodestack.append(gcodestring + neuezeile + nachfolgend) #letzten gcodestring an Liste anhängen
self.maxvlstack.append(max(maxvalues)) #letzten maxmimalen Schnittweiten-Wert an Liste anhängen
self._schneidvorlage_items[7].set_sensitive(True) #Button "Schneidvorlage an Säge senden" aktivieren
self.tools.verbose(self._verbose, "Der Interpreter hat folgende G-Code Abfolge generiert:\n" + str(self.gcodestack) + "\nDabei hat er folgende maximalen Schnittweiten ermittelt:\n" + str(self.maxvlstack))
else:
self.tools.verbose(self._verbose, "Schneidvorlage gibt vor nur horizontale Schnitte vorzulegen, will jedoch das Material drehen lassen, Schneidvorlage scheint fehlerhaft zu sein", True)
def sequenced_sending (self, step, confirmed=False): #Sendet auf Befehl G-Code Sequenzen an den Arduino
if step == 1 and not self.svprogress: #Ersten G-Code Block Abarbeiten
self._status_items[0].set_reveal_child(True) #Zeige Fortschrittsanzeige
self.svprogress = True #Aktiviere Fortschritt
self._schneidvorlage_items[7].set_image(self._schneidvorlage_items[8]) #Wandle Button "Schneidvorlage an Säge senden" in "Sägevorgang abbrechen" um
self._schneidvorlage_items[7].set_label("Sägevorgang abbrechen")
if self.gcodeblock == 0 and not confirmed: #Sicherstellen ob am Anfang der G-Code Blöcke
if self._settings['HPDS']['Start_anfordern']: #Prüfen ob Hardware-Button gedrückt werden muss
self.tools.infobar('INFO', "Bitte Vor-Ort an der Säge den Start bestätigen") #Weise Anwender darauf hin, das er den Beginn über die Hardware bestätigen muss
self.gpioner.ButtonBlink(23, 1, "ONOFF", True) #Lasse 'Schneiden' Button blinken bis Anwender darauf drückt
self.__confirmedstate = 'HW' #Bestätigungen sollten über die Hardware erfolgen
else:
gcode = self.gcodestack[0].replace('<max_cut>', str(self.maxvlstack[0] + safety_blade_distance)) # Ersetzt <max_cut> mit maximaler Schnittweite des ersten G-Code Blocks wenn vorhanden
self._serial.sending(gcode, 0) #Sende direkt ersten Block an Maschine
self.gcodeblock += 1 #Abgearbeiteten Block hochzählen
self.blockbuttons = True #Alle Buttons sperren
self.__confirmedstate = 'SW' #Bestätigungen sollten über die Software erfolgen
elif self.gcodeblock == 0 and confirmed == self.__confirmedstate:
gcode = self.gcodestack[0].replace('<max_cut>', str(self.maxvlstack[0] + safety_blade_distance)) # Ersetzt <max_cut> mit maximaler Schnittweite des ersten G-Code Blocks wenn vorhanden
self._serial.sending(gcode, 0) #Sende nach Bestätigung über Hardware ersten Block an Maschine
self.gcodeblock += 1 #Abgearbeiteten Block hochzählen
else:
self.tools.verbose(self._verbose, "Es sollen G-Code Sequenzen gesendet werden, jedoch von der falschen Funktion -> Fehler im Programm", True)
elif step == 2 and self.svprogress: #Nächsten Schritt abarbeiten
if self.gcodeblock > 0: #Sicherstellen ob 1. G-Code Block schon abgearbeitet wurde
if self._settings['HPDS']['Start_anfordern']: #Prüfen ob dies Vor-Ort geschehen muss
if self.__confirmedstate == 'HW' and not confirmed: #Fortschritt fordert ersten Block
self.tools.infobar('INFO', "Bitte Vor-Ort an der Säge den nächsten Schnitt bestätigen")
self.blockbuttons = False #Alle Buttons freigeben
elif self.__confirmedstate == confirmed: #Anwender bestätigt neuen Schnitt
gcode = self.gcodestack[self.gcodeblock].replace('<max_cut>', str(self.maxvlstack[self.gcodeblock] + safety_blade_distance)) # Ersetzt <max_cut> mit maximaler Schnittweite des jeweils nächsten G-Code Blocks wenn vorhanden
self._serial.sending(gcode, 0) #Sende jeweils nächsten G-Code Block
self.gcodeblock += 1 #Abgearbeiteten Block hochzählen
self.blockbuttons = True #Alle Buttons sperren
else:
self.tools.verbose(self._verbose, "Einstellungen fordern Vor-Ort Bestätigung, Programm hat diese im 1. Schritt jedoch nicht erhalten.", True)
else:
if self.__confirmedstate == 'SW' and not confirmed: #Fortschritt fordert neuen Block
self.tools.infobar('INFO', "Bitte in der Software den nächsten Schnitt bestätigen")
self.blockbuttons = False #Alle Buttons freigeben
elif self.__confirmedstate == confirmed: #Anwender bestätigt neuen Schnitt
gcode = self.gcodestack[self.gcodeblock].replace('<max_cut>', str(self.maxvlstack[self.gcodeblock] + safety_blade_distance)) # Ersetzt <max_cut> mit maximaler Schnittweite des jeweils nächsten G-Code Blocks wenn vorhanden
self._serial.sending(gcode, 0) #Sende jeweils nächsten G-Code Block
self.gcodeblock += 1 #Abgearbeiteten Block hochzählen
self.blockbuttons = True #Alle Buttons sperren
else:
self.tools.verbose(self._verbose, "Erster G-Code Block noch nicht abgearbeitet, es wird jedoch schon der nächste aufgerufen -> Fehler im Programm", True)
elif step == 3 and self.svprogress: #Alle Schritte abgearbeitet
self.tools.infobar('INFO', "Schneidvorlage vollständig abgearbeitet!")
self._schneidvorlage_items[7].set_image(self._schneidvorlage_items[9]) #Wandle Button "Sägevorgang abbrechen" in "Schneidvorlage an Säge senden" um
self._schneidvorlage_items[7].set_label("Schneidvorlage an Säge senden")
self.svprogress = False #Deaktiviere Fortschrittsanzeige
self._status_items[0].set_reveal_child(False) #Verstecke Fortschrittsanzeige
self.__confirmedstate = None
self.gcodeblock = 0
self._status_items[1].set_value(0) #GtkLevelBar für G-Code Block Fortschritt wieder auf 0 setzen
self.blockbuttons = False #Alle Buttons freigeben
else: # Falls Button "Sägevorgang abbrechen" gedrückt wird
self.tools.verbose(self._verbose, "Sägevorgang wird abgebrochen", True)
if self._serial.stopsending(): #Töte Sendethread und resette Arduino
self.tools.verbose(self._verbose, "Sägevorgang wurde abgebrochen", True)
self._schneidvorlage_items[7].set_image(self._schneidvorlage_items[9]) #Wandle Button "Sägevorgang abbrechen" in "Schneidvorlage an Säge senden" um
self._schneidvorlage_items[7].set_label("Schneidvorlage an Säge senden")
self.svprogress = False #Deaktiviere Fortschrittsanzeige
self._status_items[0].set_reveal_child(False) #Verstecke Fortschrittsanzeige
self.__confirmedstate = None
self.gcodeblock = 0 #Setze abzuarbeitende G-Code Blöcke wieder auf 0
self._status_items[1].set_value(0) #GtkLevelBar für G-Code Block Fortschritt wieder auf 0 setzen
def get_transmission_status (self): #Gibt Anzahl G-Code Blöcke und abgearbeitet G-Code Blöcke zurück und passt G-Code Block Fortschritt an
stackcount = len(self.gcodestack)
percentage = self.gcodeblock / stackcount #Wert für G-Code Block Fortschrittsanzeige 0-1
self._status_items[1].set_value(percentage) #Wert für GtkLevelBar
return (stackcount, self.gcodeblock)
def cutting_template_load (self, filepath): # Lade Schneidvorlage aus Datei in zur veranschaulichung in einen Textbuffer und übergebe sie den Interpreter der diese auf Gültigkeit überprüft
if self.tools.check_file(self._verbose, filepath):
self._schneivorlage_filepath = filepath
self._schneidvorlage_items[0].set_text("Datei geladen: " + str(filepath)) # Stelle geladene Dateipfad in Schneidvorlage dar
with open (filepath, 'r') as f:
data = f.read()
self._schneidvorlage_items[2].set_text(data) #Schneidvorlagen TextBuffer mit Dateiinhalt füllen
self.cutting_template_interpreter(data) # Lasse Schneidvorlage vom Interpreter überprüfen
self._schneidvorlage_items[6].set_sensitive(True) #Button "Bearbeiten" aktivieren
self._schneidvorlage_items[4].set_sensitive(True) #Menüitem "Schneivorlage speichern" aktivieren
self._schneidvorlage_items[5].set_sensitive(True) #Menüitem "Schneivorlage speichern unter" aktivieren
def cutting_template_edit (self): # Editiere Schneidvorlage
if self._schneivorlage_filepath:
titlepath = str(self._schneivorlage_filepath)
else:
titlepath = "neue Schneidvorlage"
if not self.cutting_template_editstate:
self._schneidvorlage_items[0].set_text("Bearbeite: " + titlepath + '*') # Stelle geladene Dateipfad in Schneidvorlage dar
self._schneidvorlage_items[7].set_sensitive(False) #Button "Schneidvorlage an Säge senden" deaktivieren
self._schneidvorlage_items[1].set_sensitive(True) #TextView Widget aktivieren
self._schneidvorlage_items[6].set_label('Fertig') #Button "Bearbeiten" in "Fertig" umbeschriften
self.cutting_template_editstate = True
else:
self._schneidvorlage_items[0].set_text("Bearbeitet: " + titlepath + '*') # Stelle geladene Dateipfad in Schneidvorlage dar
self._schneidvorlage_items[1].set_sensitive(False) #TextView Widget deaktivieren
self._schneidvorlage_items[6].set_label('Bearbeiten') #Button "Fertig" in "Bearbeiten" umbeschriften
self.cutting_template_interpreter(self.tools.fetch_textbuffer(self._verbose, self._schneidvorlage_items[2])) # Lasse bearbeitete Schneidvorlage vom Interpreter überprüfen
self.cutting_template_editstate = False
def cutting_template_save (self): # Geöffnete Schneidvorlage mit neuen Inhalt überschreiben
if self._schneivorlage_filepath: # Wenn Datei geöffnet, neuen TextBuffer-Inhalt in Datei schreiben
text = self.tools.fetch_textbuffer(self._verbose, self._schneidvorlage_items[2]) # Hole Text aus Schneidvorlagen Text-Buffer
with open(self._schneivorlage_filepath, 'w') as f:
f.write(text)
self._schneidvorlage_items[0].set_text("Datei gespeichert: " + str(self._schneivorlage_filepath)) # Stelle gespeicherten Dateipfad in Schneidvorlage dar
def cutting_template_save_as (self, filepath): # Geöffnete Schneidvorlage an neuen Ort abspeichern
self._schneivorlage_filepath = self.tools.check_format(self._verbose, filepath, 1, '.vtc') #Pfad von Schneidvorlge
self._schneidvorlage_items[4].set_sensitive(True) #Menüitem "Schneivorlage speichern" aktivieren
text = self.tools.fetch_textbuffer(self._verbose, self._schneidvorlage_items[2]) # Hole Text aus Schneidvorlagen Text-Buffer
status = self.tools.save_file(self._verbose, text, self._schneivorlage_filepath, ) #Speichere Datei
self._schneidvorlage_items[0].set_text("Datei gespeichert: " + str(self._schneivorlage_filepath)) # Stelle gespeicherten Dateipfad in Schneidvorlage dar
def cutting_template_new (self): # Erzeuge neue Schneidvolage
self._schneidvorlage_items[0].set_text("neue Schneidvorlage") #Beschrifte die neue Schneidvorlage
self._schneidvorlage_items[2].set_text('') #TextView leeren
self._schneidvorlage_items[1].set_sensitive(True) #TextView Widget aktivieren
self._schneidvorlage_items[6].set_sensitive(True) #Button "Bearbeiten" aktivieren
self._schneidvorlage_items[6].set_label('Fertig') #Button "Bearbeiten" in "Fertig" umbeschriften
self._schneidvorlage_items[7].set_sensitive(False) #Button "Schneidvorlage an Säge senden" deaktivieren
#TODO Button "Schneidvorlage an Säge senden" nicht deaktivieren, wenn säge bereits sägt, würde sonst das Stoppen verhindern
self._schneidvorlage_items[5].set_sensitive(True) #Menüitem "Schneivorlage speichern unter" aktivieren
self.cutting_template_editstate = True
#Folgende Funktionen kümmern sich um das Abarbeiten der Arbeitsschritte und prüfen auf Vollständigkeit der nötigen Variablen
#Diese Funktionen können teilweise vom Benutzer direkt ausgeführt werden, Hauptsächlich jedoch vom Programminterpreter
def home (self, user): #Arbeitsschritt - Homen
if self.checkgcode('HOME'):
if user:
self._serial.sending(self._gcode['HOME'], user) #Wenn von Benutzer ausgelöst, direkt an die serielle Schnittstelle senden
else:
return self._gcode['HOME']
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
def vorherig (self): #Arbeitsschritt - Gcode der vor den Programmstart aufgerufen wird
return self.checkvalue(self._gcode['VORHERIG'], None, 'value', True, 'VORHERIG') #G-Code zurück zum Programmgenerator
def nachfolgend (self): #Arbeitsschritt - Gcode der nach den Programm aufgerufen wird
return self.checkvalue(self._gcode['NACHFOLGEND'], None, 'value', True, 'NACHFOLGEND') #G-Code zurück zum Programmgenerator
def anpressen (self, user, materialthickness): #Arbeitsschritt - Gcode der beim Anpressen erzeugt wird
if self.checkgcode('ANPRESSEN'):
if materialthickness != None: #Materialstärke sollte ausgewählt sein
newgcode = self.checkvalue(self._gcode['ANPRESSEN'], float(self._settings['PDA']['Fahrbare_Strecke']) - float(materialthickness), 'value', False, 'ANPRESSEN') #Ersetze <value> mit den Wert der fahrbaren Strecke abz. der Materialdicke
if newgcode:
if user:
self._serial.sending(newgcode, user) #Wenn von Benutzer ausgelöst, direkt an die serielle Schnittstelle senden
else:
return newgcode #Angepassten G-Code zurück zum Programmgenerator
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.tools.verbose(self._verbose, "kann Material nicht arretieren, da keine Särke für das Material zum anpressen erhalten")
self.__error = '1' #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
def schneiden (self, user, distance, materialthickness=None): #Arbeitsschritt - Gcode der beim Schneiden erzeugt wird
if self.checkgcode('SCHNEIDEN') and self.checkgcode('ANPRESSEN') and self.checkgcode('FREIGEBEN'):
if distance < self._settings['PDS']['Fahrbare_Strecke']:
xvalue = float(self._label_position[0].get_text()) #Hole Position Säge
if user: #Bei manuellen 'schneiden' Material auch anpressen
if materialthickness != None: #Materialstärke sollte ausgewählt sein
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if self.__cutvalue and xvalue == self.__cutvalue: # Prüfe ob ein manuelles "Überschreiben" der max. Sägenposition vom Anwender vorliegt und auch an richtiger Position steht
maxdistance = self.__cutvalue #<max_cut> als Ausgangspunkt akzeptieren
distance = xvalue #Säge komplett einfahren lassen
elif self.checkmaxcut_value and xvalue == self._settings['PDS']['Fahrbare_Strecke']: # Säge schneidet in rückwärts-Richtung und ist vollständig ausgefahren
maxdistance = self._settings['PDS']['Fahrbare_Strecke'] #momentane Position als Ausgangspunkt akzeptieren
distance = xvalue #Säge komplett einfahren lassen
elif not self.checkmaxcut_value and xvalue <= 0: # Säge schneidet in vorwärts-Richtung und befindet sich im Schutzbereich
maxdistance = self._settings['PDS']['Fahrbare_Strecke'] #Platzhalter hier eigentlich nicht nötig, bei Verwendung dennoch belegen
distance = self._settings['PDS']['Fahrbare_Strecke'] #Säge komplett ausfahren lassen
else: # Säge ist falsch positioniert
self.tools.verbose(self._verbose, "Säge läuft Gefahr vom Vorschub beschädigt zu werden, Aktion verweigert!", True)
if self.checkmaxcut_value:
self.toggle_ms(False, True) # Deaktiviere max. Sägenposition "Überschreiben" Togglebutton
return False # Funktion verlassen
try:
gcode = self.checkvalue(self._gcode['SCHNEIDEN'], distance, 'value', False, 'SCHNEIDEN') #Hänge angepassten G-Code für das 'Schneiden' an
gcode = self.checkvalue(gcode, maxdistance, 'max_cut', False, 'SCHNEIDEN') #Hänge angepassten G-Code für das 'Schneiden' an
newgcode = self.vorschub(False, self._settings['PDS']['Schnittbreite']) #Schiebe Material um eine Sägeblattbreite nach vorn
newgcode = newgcode + "\n" + self.anpressen(False, materialthickness) #Hole 'Anpressen' G-Code
newgcode = newgcode + "\n" + gcode #Optimierten 'Schneiden' G-Code einsetzen
newgcode = newgcode + "\n" + self.freigeben(False) #Hänge 'Freigeben' G-Code an
self._serial.sending(newgcode, user) #Anpressen + Schneiden + Freigeben an den seriellen Port schicken
except:
self.tools.verbose(self._verbose, "beim Versuch die Säge zu Bewegen ist ein Fehler aufgetreten", True)
else:
self.tools.verbose(self._verbose, "kann keinen Schneiddurchlauf starten, da keine Särke für das Material zum anpressen erhalten", True)
else: #Bei automatischen 'schneiden'
newgcode = self.checkvalue(self._gcode['SCHNEIDEN'], distance, 'value', False, 'SCHNEIDEN')
if newgcode:
return newgcode #Angepassten G-Code zurück zum Programmgenerator
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.tools.verbose(self._verbose, "Schnittlänge überschreitet die Eingestellte fahrbare Stecke der Säge")
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
def vorschub (self, user, distance): #Arbeitsschritt - Gcode der beim Vorschub erzeugt wird
if user: #Bei manuellem Vorschub
if not 'N/V' in self._label_position[3].get_text():
xvalue = float(self._label_position[0].get_text())
if self.__cutvalue: # Prüfe ob ein manuelles "Überschreiben" der max. Sägenposition vom Anwender vorliegt
if xvalue == self.__cutvalue: # Befindet sich Säge an der Position zum Zeitpunkt der Aktivierung, manuelle "maximale" Sägendistanz beibehalten
maxdistance = self.__cutvalue
else: # Ansonsten den ToggleButton deaktivieren und max. Sägenposition wieder auf voll ausgefahren definieren
self.toggle_ms(False, True) # Deaktiviere max. Sägenposition "Überschreiben" Togglebutton
maxdistance = self._settings['PDS']['Fahrbare_Strecke']
else: # Ansonsten als vollständig auszufahren ansehen
maxdistance = self._settings['PDS']['Fahrbare_Strecke']
minxdistance = self._settings['PDS']['Abstand_Saegeblatt_zum_Materialanschlag']
if not self.svprogress: #Wenn kein Programm läuft
if self.checkmaxcut_value: #Prüfe ob Säge entgegengesetzt schneidet
max_cut = maxdistance # Säge muss ganz ausgefahren sein
errortext = "Säge muss vollständig ausgefahren sein"
else:
max_cut = False #Säge muss eingeparkt sein
errortext = "Säge muss eingefahren sein"
else: #Falls programm läuft
if self.checkmaxcut_value: #Prüfe ob Säge entgegengesetzt schneidet
max_cut = self.maxvlstack[self.gcodeblock] #Die maximale Plattenbreite dem aktiven Block entnehmen
errortext = "Säge muss mindestens " + str(max_cut) + "mm ausgefahren sein"
else:
max_cut = False #Säge muss eingeparkt sein
errortext = "Säge muss eingefahren sein"
if (xvalue < minxdistance and not max_cut) or (xvalue >= max_cut and max_cut): #Prüfe ob beim Vorschub das Sägeblatt nicht beschädigt wird
if distance > 999: #Vorschub nach ganz vorne
distance = self._settings['PDV']['Fahrbare_Strecke'] - float(self._label_position[3].get_text()) #Bilde restliche Distanz aus momentaner Position und eingestellter fahrbaren Strecke
self._serial.sending('G91\nG0 Y' + str(distance) + '\nG90\nM114', user) #Vorschub an den seriellen Port senden
self._label_position[1].set_text(str(float(self._label_position[1].get_text()) + distance)) #Y-Distanz hochzählen
self._label_position[3].set_text(str(self._settings['PDV']['Fahrbare_Strecke'])) #Absolute Y-Distanz hochzählen
else:
if distance + float(self._label_position[3].get_text()) <= self._settings['PDV']['Fahrbare_Strecke']:
self._serial.sending('G91\nG0 Y' + str(distance) + '\nG90', user) #Vorschub an den seriellen Port
self._label_position[1].set_text(str(float(self._label_position[1].get_text()) + distance)) #Y-Distanz hochzählen
self._label_position[3].set_text(str(float(self._label_position[3].get_text()) + distance)) #Absolute Y-Distanz hochzählen
else:
self.tools.verbose(self._verbose, "Vorschubdistanz überschreitet die eingestellte fahrbare Stecke des Vorschubs", True)
self.gpioner.ButtonPressed(0, 1, 'MovementError', 3) #Lasse Bewegungs-Buttons auf Bedienpaneel 3x blinken
else:
self.tools.verbose(self._verbose, "Vorschub nicht möglich, " + errortext, True)
else:
self.tools.verbose(self._verbose, "keine absolute Position des Vorschubs vorhanden, bitte Maschine vorher 'homen'!", True)
else: #Bei automatischen Vorschub
if distance + self.feedsvalue + float(self._label_position[3].get_text()) <= self._settings['PDV']['Fahrbare_Strecke']:
if self.checkgcode('VORSCHUB'):
newgcode = self.checkvalue(self._gcode['VORSCHUB'], distance, 'value', False, 'VORSCHUB') #G-Code zurück zum Programmgenerator
if newgcode:
return newgcode #Angepassten G-Code zurück zum Programmgenerator
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.tools.verbose(self._verbose, "Vorschubdistanz überschreitet die eingestellte fahrbare Stecke des Vorschubs")
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
self.feedsvalue += distance # Addiere Vorschubdistanz auf momentane Interpreter-Instanz
def rueckfahrt (self, user, distance): #Arbeitsschritt - G-Gode der bei der Rückfahrt erzeugt wird
if user:
if not 'N/V' in self._label_position[3].get_text():
if distance == 0: #Vorschub ganz zurück
distance = self._label_position[3].get_text() #Hole absolut zurück gelegte Y-Stecke und fahre danach zu '0'
self._serial.sending('G92 Y' + distance + '\nG0 Y0', user) #Rückfahrt an den seriellen Port senden
self._label_position[1].set_text('0') #Y-Distanz auf '0' setzen
self._label_position[3].set_text('0') #Absolute Y-Distanz auf '0' setzen
else:
if float(self._label_position[1].get_text()) == 0 and float(self._label_position[3].get_text()) > 0: #Ausgangspunkt von relativen Vorschub erreicht
if float(self._label_position[3].get_text()) >= distance: #Die definierte Distanz passt noch in den zurückzulegenden Weg
self._serial.sending('G92 Y' + str(distance) + '\nG0 Y0', user) #Rückfahrt an den seriellen Port senden
self._label_position[3].set_text(str(float(self._label_position[3].get_text()) - distance)) #Absolute Y-Distanz herunterzählen
elif float(self._label_position[3].get_text()) < distance: #Die definierte Distanz passt nicht mehr in den zurückzulegenden Weg, Reststrecke wird ermittelt und gesendet
self._serial.sending('G92 Y' + self._label_position[3].get_text() + '\nG0 Y0', user) #Rückfahrt an den seriellen Port senden
self._label_position[3].set_text('0') #Absolute Y-Distanz auf '0' setzen
self.gpioner.ButtonPressed(0, 1, 'MovementError', 2) #Lasse Bewegungs-Buttons auf Bedienpaneel 2x blinken um Anwender zu signalisieren das er nun mit dem Vorschub auf absolut '0' steht
elif float(self._label_position[1].get_text()) < distance: #Die relative Distanz passt nicht mehr in die zurückzulegende Strecke, Reststrecke wird ermittelt und gesendet
self._serial.sending('G91\nG0 Y-' + self._label_position[1].get_text() + '\nG90', user) #Rückfahrt an den seriellen Port senden
self._label_position[1].set_text('0') #Y-Distanz auf '0' setzen
self._label_position[3].set_text(str(float(self._label_position[3].get_text()) - float(self._label_position[1].get_text()))) #Absolute Y-Distanz herunterzählen
self.gpioner.ButtonPressed(0, 1, 'MovementError', 1) #Lasse Bewegungs-Buttons auf Bedienpaneel 1x blinken um Anwender zu signalisieren das er nun am Ausgangspunkt des Vorschubs ist
elif float(self._label_position[1].get_text()) >= distance:
self._serial.sending('G91\nG0 Y-' + str(distance) + '\nG90', user) #Rückfahrt an den seriellen Port senden
self._label_position[1].set_text(str(float(self._label_position[1].get_text()) - distance)) #Y-Distanz herunterzählen
self._label_position[3].set_text(str(float(self._label_position[3].get_text()) - distance)) #Absolute Y-Distanz herunterzählen
else:
self.tools.verbose(self._verbose, "keine absolute Position des Vorschubs vorhanden, bitte Maschine vorher 'homen'!", True)
else:
if self.checkgcode('RUECKFAHRT'):
newgcode = self.checkvalue(self._gcode['RUECKFAHRT'], distance, 'value', False, 'RUECKFAHRT') #G-Code zurück zum Programmgenerator
if newgcode:
return newgcode #Angepassten G-Code zurück zum Programmgenerator
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
def freigeben (self, user): #Arbeitsschritt - Gcode der bei der beim Freigeben erzeugt wird
if self.checkgcode('FREIGEBEN'):
newgcode = self.checkvalue(self._gcode['FREIGEBEN'], 0, 'value', False, 'FREIGEBEN')
if newgcode:
if user:
self._serial.sending(newgcode, user) #Wenn von Benutzer ausgelöst, direkt an die serielle Schnittstelle senden
else:
return newgcode #Angepassten G-Code zurück zum Programmgenerator
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
else:
self.__error = True #Teile Benutzer mit, das Probleme aufgetreten sind
| gpl-3.0 | 6,914,094,647,843,214,000 | 76.344564 | 275 | 0.639231 | false |
okomarov/lsptf | setup.py | 1 | 3767 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'lsptf',
version = '0.1.0-alpha',
description = 'Long short investment portfolios',
long_description = long_description,
url='https://github.com/okomarov/lsptf',
author='Oleg Komarov',
author_email='oleg.komarov@hotmail.it',
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Topic :: Office/Business :: Financial :: Investment',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.6',
#'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.2',
#'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='finance investment neutral long short backtest',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
) | bsd-3-clause | 3,907,107,188,002,606,000 | 37.845361 | 94 | 0.648527 | false |
dougbenjamin/panda-harvester | pandaharvester/harvestercore/db_proxy.py | 1 | 241342 | """
database connection
"""
import os
import re
import sys
import copy
import random
import inspect
import time
import datetime
import threading
from future.utils import iteritems
from .command_spec import CommandSpec
from .job_spec import JobSpec
from .work_spec import WorkSpec
from .file_spec import FileSpec
from .event_spec import EventSpec
from .cache_spec import CacheSpec
from .seq_number_spec import SeqNumberSpec
from .panda_queue_spec import PandaQueueSpec
from .job_worker_relation_spec import JobWorkerRelationSpec
from .process_lock_spec import ProcessLockSpec
from .diag_spec import DiagSpec
from .service_metrics_spec import ServiceMetricSpec
from .queue_config_dump_spec import QueueConfigDumpSpec
from . import core_utils
from pandaharvester.harvesterconfig import harvester_config
# logger
_logger = core_utils.setup_logger('db_proxy')
# table names
commandTableName = 'command_table'
jobTableName = 'job_table'
workTableName = 'work_table'
fileTableName = 'file_table'
cacheTableName = 'cache_table'
eventTableName = 'event_table'
seqNumberTableName = 'seq_table'
pandaQueueTableName = 'pq_table'
jobWorkerTableName = 'jw_table'
processLockTableName = 'lock_table'
diagTableName = 'diag_table'
queueConfigDumpTableName = 'qcdump_table'
serviceMetricsTableName = 'sm_table'
# connection lock
conLock = threading.Lock()
# connection class
class DBProxy(object):
# constructor
def __init__(self, thr_name=None, read_only=False):
self.thrName = thr_name
self.verbLog = None
self.useInspect = False
if harvester_config.db.verbose:
self.verbLog = core_utils.make_logger(_logger, method_name='execute')
if self.thrName is None:
currentThr = threading.current_thread()
if currentThr is not None:
self.thrName = currentThr.ident
if hasattr(harvester_config.db, 'useInspect') and harvester_config.db.useInspect is True:
self.useInspect = True
if harvester_config.db.engine == 'mariadb':
if hasattr(harvester_config.db, 'host'):
host = harvester_config.db.host
else:
host = '127.0.0.1'
if hasattr(harvester_config.db, 'port'):
port = harvester_config.db.port
else:
port = 3306
if hasattr(harvester_config.db, 'useMySQLdb') and harvester_config.db.useMySQLdb is True:
import MySQLdb
import MySQLdb.cursors
class MyCursor (MySQLdb.cursors.Cursor):
def fetchone(self):
tmpRet = MySQLdb.cursors.Cursor.fetchone(self)
if tmpRet is None:
return None
tmpRet = core_utils.DictTupleHybrid(tmpRet)
tmpRet.set_attributes([d[0] for d in self.description])
return tmpRet
def fetchall(self):
tmpRets = MySQLdb.cursors.Cursor.fetchall(self)
if len(tmpRets) == 0:
return tmpRets
newTmpRets = []
attributes = [d[0] for d in self.description]
for tmpRet in tmpRets:
tmpRet = core_utils.DictTupleHybrid(tmpRet)
tmpRet.set_attributes(attributes)
newTmpRets.append(tmpRet)
return newTmpRets
self.con = MySQLdb.connect(user=harvester_config.db.user, passwd=harvester_config.db.password,
db=harvester_config.db.schema, host=host, port=port,
cursorclass=MyCursor)
self.cur = self.con.cursor()
else:
import mysql.connector
self.con = mysql.connector.connect(user=harvester_config.db.user, passwd=harvester_config.db.password,
db=harvester_config.db.schema, host=host, port=port)
self.cur = self.con.cursor(named_tuple=True, buffered=True)
else:
import sqlite3
if read_only:
fd = os.open(harvester_config.db.database_filename, os.O_RDONLY)
database_filename = '/dev/fd/{0}'.format(fd)
else:
database_filename = harvester_config.db.database_filename
self.con = sqlite3.connect(database_filename,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=False)
core_utils.set_file_permission(harvester_config.db.database_filename)
# change the row factory to use Row
self.con.row_factory = sqlite3.Row
self.cur = self.con.cursor()
self.cur.execute('PRAGMA journal_mode')
resJ = self.cur.fetchone()
if resJ[0] != 'wal':
self.cur.execute('PRAGMA journal_mode = WAL')
# read to avoid database lock
self.cur.fetchone()
self.lockDB = False
# using application side lock if DB doesn't have a mechanism for exclusive access
if harvester_config.db.engine == 'mariadb':
self.usingAppLock = False
else:
self.usingAppLock = True
# exception handler for type of DBs
def _handle_exception(self, exc, retry_time=30):
tmpLog = core_utils.make_logger(_logger, 'thr={0}'.format(self.thrName), method_name='_handle_exception')
if harvester_config.db.engine == 'mariadb':
tmpLog.warning('exception of mysql {0} occurred'.format(exc.__class__.__name__))
# Case to try renew connection
isOperationalError = False
if hasattr(harvester_config.db, 'useMySQLdb') and harvester_config.db.useMySQLdb is True:
import MySQLdb
if isinstance(exc, MySQLdb.OperationalError):
isOperationalError = True
else:
import mysql.connector
if isinstance(exc, mysql.connector.errors.OperationalError):
isOperationalError = True
if isOperationalError:
try_timestamp = time.time()
while time.time() - try_timestamp < retry_time:
try:
self.__init__()
tmpLog.info('renewed connection')
break
except Exception as e:
tmpLog.error('failed to renew connection; {0}'.format(e))
time.sleep(1)
# convert param dict to list
def convert_params(self, sql, varmap):
# lock database if application side lock is used
if self.usingAppLock and \
(re.search('^INSERT', sql, re.I) is not None
or re.search('^UPDATE', sql, re.I) is not None
or re.search(' FOR UPDATE', sql, re.I) is not None
or re.search('^DELETE', sql, re.I) is not None
):
self.lockDB = True
# remove FOR UPDATE for sqlite
if harvester_config.db.engine == 'sqlite':
sql = re.sub(' FOR UPDATE', ' ', sql, re.I)
sql = re.sub('INSERT IGNORE', 'INSERT OR IGNORE', sql, re.I)
else:
sql = re.sub('INSERT OR IGNORE', 'INSERT IGNORE', sql, re.I)
# no conversation unless dict
if not isinstance(varmap, dict):
# using the printf style syntax for mariaDB
if harvester_config.db.engine == 'mariadb':
sql = re.sub(':[^ $,)]+', '%s', sql)
return sql, varmap
paramList = []
# extract placeholders
items = re.findall(':[^ $,)]+', sql)
for item in items:
if item not in varmap:
raise KeyError('{0} is missing in SQL parameters'.format(item))
if item not in paramList:
paramList.append(varmap[item])
# using the printf style syntax for mariaDB
if harvester_config.db.engine == 'mariadb':
sql = re.sub(':[^ $,)]+', '%s', sql)
return sql, paramList
# wrapper for execute
def execute(self, sql, varmap=None):
sw = core_utils.get_stopwatch()
if varmap is None:
varmap = dict()
# get lock if application side lock is used
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locking'.format(self.thrName))
conLock.acquire()
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locked'.format(self.thrName))
# execute
try:
# verbose
if harvester_config.db.verbose:
if not self.useInspect:
self.verbLog.debug('thr={2} sql={0} var={1}'.format(sql, str(varmap), self.thrName))
else:
self.verbLog.debug('thr={3} sql={0} var={1} exec={2}'.format(sql, str(varmap),
inspect.stack()[1][3],
self.thrName))
# convert param dict
newSQL, params = self.convert_params(sql, varmap)
# execute
try:
retVal = self.cur.execute(newSQL, params)
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during execute'.format(self.thrName))
raise
finally:
# release lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release'.format(self.thrName))
conLock.release()
# return
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} {1} sql=[{2}]'.format(self.thrName, sw.get_elapsed_time(),
newSQL.replace('\n', ' ').strip()))
return retVal
# wrapper for executemany
def executemany(self, sql, varmap_list):
# get lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locking'.format(self.thrName))
conLock.acquire()
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} locked'.format(self.thrName))
try:
# verbose
if harvester_config.db.verbose:
if not self.useInspect:
self.verbLog.debug('thr={2} sql={0} var={1}'.format(sql, str(varmap_list), self.thrName))
else:
self.verbLog.debug('thr={3} sql={0} var={1} exec={2}'.format(sql, str(varmap_list),
inspect.stack()[1][3],
self.thrName))
# convert param dict
paramList = []
newSQL = sql
for varMap in varmap_list:
if varMap is None:
varMap = dict()
newSQL, params = self.convert_params(sql, varMap)
paramList.append(params)
# execute
try:
if harvester_config.db.engine == 'sqlite':
retVal = []
iList = 0
nList = 5000
while iList < len(paramList):
retVal += self.cur.executemany(newSQL, paramList[iList:iList+nList])
iList += nList
else:
retVal = self.cur.executemany(newSQL, paramList)
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during executemany'.format(self.thrName))
raise
finally:
# release lock
if self.usingAppLock and not self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release'.format(self.thrName))
conLock.release()
# return
return retVal
# commit
def commit(self):
try:
self.con.commit()
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during commit'.format(self.thrName))
raise
if self.usingAppLock and self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release with commit'.format(self.thrName))
conLock.release()
self.lockDB = False
# rollback
def rollback(self):
try:
self.con.rollback()
except Exception as e:
self._handle_exception(e)
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} exception during rollback'.format(self.thrName))
finally:
if self.usingAppLock and self.lockDB:
if harvester_config.db.verbose:
self.verbLog.debug('thr={0} release with rollback'.format(self.thrName))
conLock.release()
self.lockDB = False
# type conversion
def type_conversion(self, attr_type):
# remove decorator
attr_type = attr_type.split('/')[0]
attr_type = attr_type.strip()
if attr_type == 'timestamp':
# add NULL attribute to disable automatic update
attr_type += ' null'
# type conversion
if harvester_config.db.engine == 'mariadb':
if attr_type.startswith('text'):
attr_type = attr_type.replace('text', 'varchar(256)')
elif attr_type.startswith('blob'):
attr_type = attr_type.replace('blob', 'longtext')
elif attr_type.startswith('integer'):
attr_type = attr_type.replace('integer', 'bigint')
attr_type = attr_type.replace('autoincrement', 'auto_increment')
elif harvester_config.db.engine == 'sqlite':
if attr_type.startswith('varchar'):
attr_type = re.sub('varchar\(\d+\)', 'text', attr_type)
attr_type = attr_type.replace('auto_increment', 'autoincrement')
return attr_type
# check if index is needed
def need_index(self, attr):
isIndex = False
isUnique = False
# look for separator
if '/' in attr:
decorators = attr.split('/')[-1].split()
if 'index' in decorators:
isIndex = True
if 'unique' in decorators:
isIndex = True
isUnique = True
return isIndex, isUnique
# make table
def make_table(self, cls, table_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='make_table')
tmpLog.debug('table={0}'.format(table_name))
# check if table already exists
varMap = dict()
varMap[':name'] = table_name
if harvester_config.db.engine == 'mariadb':
varMap[':schema'] = harvester_config.db.schema
sqlC = 'SELECT * FROM information_schema.tables WHERE table_schema=:schema AND table_name=:name '
else:
varMap[':type'] = 'table'
sqlC = 'SELECT name FROM sqlite_master WHERE type=:type AND tbl_name=:name '
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
indexes = []
uniques = set()
# not exists
if resC is None:
# sql to make table
sqlM = 'CREATE TABLE {0}('.format(table_name)
# collect columns
for attr in cls.attributesWithTypes:
# split to name and type
attrName, attrType = attr.split(':')
attrType = self.type_conversion(attrType)
# check if index is needed
isIndex, isUnique = self.need_index(attr)
if isIndex:
indexes.append(attrName)
if isUnique:
uniques.add(attrName)
sqlM += '{0} {1},'.format(attrName, attrType)
sqlM = sqlM[:-1]
sqlM += ')'
# make table
self.execute(sqlM)
# commit
self.commit()
tmpLog.debug('made {0}'.format(table_name))
else:
# check table
missingAttrs = self.check_table(cls, table_name, True)
if len(missingAttrs) > 0:
for attr in cls.attributesWithTypes:
# split to name and type
attrName, attrType = attr.split(':')
attrType = self.type_conversion(attrType)
# ony missing
if attrName not in missingAttrs:
continue
# check if index is needed
isIndex, isUnique = self.need_index(attr)
if isIndex:
indexes.append(attrName)
if isUnique:
uniques.add(attrName)
# add column
sqlA = 'ALTER TABLE {0} ADD COLUMN '.format(table_name)
sqlA += '{0} {1}'.format(attrName, attrType)
try:
self.execute(sqlA)
# commit
self.commit()
tmpLog.debug('added {0} to {1}'.format(attr, table_name))
except Exception:
core_utils.dump_error_message(tmpLog)
# make indexes
for index in indexes:
indexName = 'idx_{0}_{1}'.format(index, table_name)
if index in uniques:
sqlI = "CREATE UNIQUE INDEX "
else:
sqlI = "CREATE INDEX "
sqlI += "{0} ON {1}({2}) ".format(indexName, table_name, index)
try:
self.execute(sqlI)
# commit
self.commit()
tmpLog.debug('added {0}'.format(indexName))
except Exception:
core_utils.dump_error_message(tmpLog)
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
return self.check_table(cls, table_name)
# make tables
def make_tables(self, queue_config_mapper):
outStrs = []
outStrs += self.make_table(CommandSpec, commandTableName)
outStrs += self.make_table(JobSpec, jobTableName)
outStrs += self.make_table(WorkSpec, workTableName)
outStrs += self.make_table(FileSpec, fileTableName)
outStrs += self.make_table(EventSpec, eventTableName)
outStrs += self.make_table(CacheSpec, cacheTableName)
outStrs += self.make_table(SeqNumberSpec, seqNumberTableName)
outStrs += self.make_table(PandaQueueSpec, pandaQueueTableName)
outStrs += self.make_table(JobWorkerRelationSpec, jobWorkerTableName)
outStrs += self.make_table(ProcessLockSpec, processLockTableName)
outStrs += self.make_table(DiagSpec, diagTableName)
outStrs += self.make_table(QueueConfigDumpSpec, queueConfigDumpTableName)
outStrs += self.make_table(ServiceMetricSpec, serviceMetricsTableName)
# dump error messages
if len(outStrs) > 0:
errMsg = "ERROR : Definitions of some database tables are incorrect. "
errMsg += "Please add missing columns, or drop those tables "
errMsg += "so that harvester automatically re-creates those tables."
errMsg += "\n"
print (errMsg)
for outStr in outStrs:
print (outStr)
sys.exit(1)
# add sequential numbers
self.add_seq_number('SEQ_workerID', 1)
self.add_seq_number('SEQ_configID', 1)
# fill PandaQueue table
queue_config_mapper.load_data()
# delete process locks
self.clean_process_locks()
# check table
def check_table(self, cls, table_name, get_missing=False):
# get columns in DB
varMap = dict()
if harvester_config.db.engine == 'mariadb':
varMap[':name'] = table_name
sqlC = 'SELECT column_name,column_type FROM information_schema.columns WHERE table_name=:name '
else:
sqlC = 'PRAGMA table_info({0}) '.format(table_name)
self.execute(sqlC, varMap)
resC = self.cur.fetchall()
colMap = dict()
for tmpItem in resC:
if harvester_config.db.engine == 'mariadb':
if hasattr(tmpItem, '_asdict'):
tmpItem = tmpItem._asdict()
columnName, columnType = tmpItem['column_name'], tmpItem['column_type']
else:
columnName, columnType = tmpItem[1], tmpItem[2]
colMap[columnName] = columnType
self.commit()
# check with class definition
outStrs = []
for attr in cls.attributesWithTypes:
attrName, attrType = attr.split(':')
if attrName not in colMap:
if get_missing:
outStrs.append(attrName)
else:
attrType = self.type_conversion(attrType)
outStrs.append('{0} {1} is missing in {2}'.format(attrName, attrType, table_name))
return outStrs
# insert jobs
def insert_jobs(self, jobspec_list):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_jobs')
tmpLog.debug('{0} jobs'.format(len(jobspec_list)))
try:
# sql to insert a job
sqlJ = "INSERT INTO {0} ({1}) ".format(jobTableName, JobSpec.column_names())
sqlJ += JobSpec.bind_values_expression()
# sql to insert a file
sqlF = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlF += FileSpec.bind_values_expression()
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# loop over all jobs
varMapsJ = []
varMapsF = []
for jobSpec in jobspec_list:
# delete job just in case
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlDJ, varMap)
iDel = self.cur.rowcount
if iDel > 0:
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# commit
self.commit()
# insert job and files
varMap = jobSpec.values_list()
varMapsJ.append(varMap)
for fileSpec in jobSpec.inFiles:
varMap = fileSpec.values_list()
varMapsF.append(varMap)
# insert
self.executemany(sqlJ, varMapsJ)
self.executemany(sqlF, varMapsF)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get job
def get_job(self, panda_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='get_job')
tmpLog.debug('start')
# sql to get job
sql = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sql += "WHERE PandaID=:pandaID "
# get job
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sql, varMap)
resJ = self.cur.fetchone()
if resJ is None:
jobSpec = None
else:
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ)
# get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_file(fileSpec)
# commit
self.commit()
tmpLog.debug('done')
# return
return jobSpec
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get all jobs (fetch entire jobTable)
def get_jobs(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_jobs')
tmpLog.debug('start')
# sql to get job
sql = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sql += "WHERE PandaID IS NOT NULL"
# get jobs
varMap = None
self.execute(sql, varMap)
resJobs = self.cur.fetchall()
if resJobs is None:
return None
jobSpecList=[]
# make jobs list
for resJ in resJobs:
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpecList.append(jobSpec)
tmpLog.debug('done')
# return
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# update job
def update_job(self, jobspec, criteria=None, update_in_file=False):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0} subStatus={1}'.format(jobspec.PandaID,
jobspec.subStatus),
method_name='update_job')
tmpLog.debug('start')
if criteria is None:
criteria = {}
# sql to update job
sql = "UPDATE {0} SET {1} ".format(jobTableName, jobspec.bind_update_changes_expression())
sql += "WHERE PandaID=:PandaID "
# update job
varMap = jobspec.values_map(only_changed=True)
for tmpKey, tmpVal in iteritems(criteria):
mapKey = ':{0}_cr'.format(tmpKey)
sql += "AND {0}={1} ".format(tmpKey, mapKey)
varMap[mapKey] = tmpVal
varMap[':PandaID'] = jobspec.PandaID
self.execute(sql, varMap)
nRow = self.cur.rowcount
if nRow > 0:
# update events
for eventSpec in jobspec.events:
varMap = eventSpec.values_map(only_changed=True)
if varMap != {}:
sqlE = "UPDATE {0} SET {1} ".format(eventTableName, eventSpec.bind_update_changes_expression())
sqlE += "WHERE eventRangeID=:eventRangeID "
varMap[':eventRangeID'] = eventSpec.eventRangeID
self.execute(sqlE, varMap)
# update input file
if update_in_file:
for fileSpec in jobspec.inFiles:
varMap = fileSpec.values_map(only_changed=True)
if varMap != {}:
sqlF = "UPDATE {0} SET {1} ".format(fileTableName,
fileSpec.bind_update_changes_expression())
sqlF += "WHERE fileID=:fileID "
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlF, varMap)
else:
# set file status to done if jobs are done
if jobspec.is_final_status():
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':type'] = 'input'
varMap[':status'] = 'done'
sqlF = "UPDATE {0} SET status=:status ".format(fileTableName)
sqlF += "WHERE PandaID=:PandaID AND fileType=:type "
self.execute(sqlF, varMap)
# set to_delete flag
if jobspec.subStatus == 'done':
sqlD = "UPDATE {0} SET todelete=:to_delete ".format(fileTableName)
sqlD += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':to_delete'] = 1
self.execute(sqlD, varMap)
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# insert output files into database
def insert_files(self,jobspec_list):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_files')
tmpLog.debug('{0} jobs'.format(len(jobspec_list)))
try:
# sql to insert a file
sqlF = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlF += FileSpec.bind_values_expression()
# loop over all jobs
varMapsF = []
for jobSpec in jobspec_list:
for fileSpec in jobSpec.outFiles:
varMap = fileSpec.values_list()
varMapsF.append(varMap)
# insert
self.executemany(sqlF, varMapsF)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# update worker
def update_worker(self, workspec, criteria=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workspec.workerID),
method_name='update_worker')
tmpLog.debug('start')
if criteria is None:
criteria = {}
# sql to update job
sql = "UPDATE {0} SET {1} ".format(workTableName, workspec.bind_update_changes_expression())
sql += "WHERE workerID=:workerID "
# update worker
varMap = workspec.values_map(only_changed=True)
if len(varMap) > 0:
for tmpKey, tmpVal in iteritems(criteria):
mapKey = ':{0}_cr'.format(tmpKey)
sql += "AND {0}={1} ".format(tmpKey, mapKey)
varMap[mapKey] = tmpVal
varMap[':workerID'] = workspec.workerID
self.execute(sql, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
else:
nRow = None
tmpLog.debug('skip since no updated attributes')
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# fill panda queue table
def fill_panda_queue_table(self, panda_queue_list, queue_config_mapper):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='fill_panda_queue_table')
tmpLog.debug('start')
# get existing queues
sqlE = "SELECT queueName FROM {0} ".format(pandaQueueTableName)
varMap = dict()
self.execute(sqlE, varMap)
resE = self.cur.fetchall()
for queueName, in resE:
# delete if not listed in cfg
if queueName not in panda_queue_list:
sqlD = "DELETE FROM {0} ".format(pandaQueueTableName)
sqlD += "WHERE queueName=:queueName "
varMap = dict()
varMap[':queueName'] = queueName
self.execute(sqlD, varMap)
# commit
self.commit()
# loop over queues
for queueName in panda_queue_list:
queueConfig = queue_config_mapper.get_queue(queueName)
if queueConfig is not None:
# check if already exist
sqlC = "SELECT * FROM {0} ".format(pandaQueueTableName)
sqlC += "WHERE queueName=:queueName "
varMap = dict()
varMap[':queueName'] = queueName
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is not None:
# update limits just in case
varMap = dict()
sqlU = "UPDATE {0} SET ".format(pandaQueueTableName)
for qAttr in ['nQueueLimitJob', 'nQueueLimitWorker', 'maxWorkers',
'nQueueLimitJobRatio', 'nQueueLimitJobMax', 'nQueueLimitJobMin',
'nQueueLimitWorkerRatio', 'nQueueLimitWorkerMax', 'nQueueLimitWorkerMin']:
if hasattr(queueConfig, qAttr):
sqlU += '{0}=:{0},'.format(qAttr)
varMap[':{0}'.format(qAttr)] = getattr(queueConfig, qAttr)
if len(varMap) == 0:
continue
sqlU = sqlU[:-1]
sqlU += " WHERE queueName=:queueName "
varMap[':queueName'] = queueName
self.execute(sqlU, varMap)
else:
# insert queue
varMap = dict()
varMap[':queueName'] = queueName
attrName_list = []
tmpKey_list = []
for attrName in PandaQueueSpec.column_names().split(','):
if hasattr(queueConfig, attrName):
tmpKey = ':{0}'.format(attrName)
attrName_list.append(attrName)
tmpKey_list.append(tmpKey)
varMap[tmpKey] = getattr(queueConfig, attrName)
sqlP = "INSERT IGNORE INTO {0} ({1}) ".format(pandaQueueTableName, ','.join(attrName_list))
sqlS = "VALUES ({0}) ".format(','.join(tmpKey_list))
self.execute(sqlP + sqlS, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get number of jobs to fetch
def get_num_jobs_to_fetch(self, n_queues, interval):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_num_jobs_to_fetch')
try:
tmpLog.debug('start')
retMap = {}
# sql to get queues
sqlQ = "SELECT queueName,nQueueLimitJob,nQueueLimitJobRatio,nQueueLimitJobMax,nQueueLimitJobMin "
sqlQ += "FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE jobFetchTime IS NULL OR jobFetchTime<:timeLimit "
sqlQ += "ORDER BY jobFetchTime "
# sql to count nQueue
sqlN = "SELECT COUNT(*) cnt,status FROM {0} ".format(jobTableName)
sqlN += "WHERE computingSite=:computingSite AND status IN (:status1,:status2) "
sqlN += "GROUP BY status "
# sql to update timestamp
sqlU = "UPDATE {0} SET jobFetchTime=:jobFetchTime ".format(pandaQueueTableName)
sqlU += "WHERE queueName=:queueName "
sqlU += "AND (jobFetchTime IS NULL OR jobFetchTime<:timeLimit) "
# get queues
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=interval)
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
iQueues = 0
for queueName, nQueueLimitJob, nQueueLimitJobRatio, \
nQueueLimitJobMax, nQueueLimitJobMin in resQ:
# update timestamp to lock the queue
varMap = dict()
varMap[':queueName'] = queueName
varMap[':jobFetchTime'] = timeNow
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=interval)
self.execute(sqlU, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# count nQueue
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status1'] = 'starting'
varMap[':status2'] = 'running'
self.execute(sqlN, varMap)
resN = self.cur.fetchall()
nsMap = dict()
for tmpN, tmpStatus in resN:
nsMap[tmpStatus] = tmpN
# get num of queued jobs
try:
nQueue = nsMap['starting']
except Exception:
nQueue = 0
# dynamic nQueueLimitJob
if nQueueLimitJobRatio is not None and nQueueLimitJobRatio > 0:
try:
nRunning = nsMap['running']
except Exception:
nRunning = 0
nQueueLimitJob = int(nRunning * nQueueLimitJobRatio / 100)
if nQueueLimitJobMin is None:
nQueueLimitJobMin = 1
nQueueLimitJob = max(nQueueLimitJob, nQueueLimitJobMin)
if nQueueLimitJobMax is not None:
nQueueLimitJob = min(nQueueLimitJob, nQueueLimitJobMax)
# more jobs need to be queued
if nQueueLimitJob is not None and nQueue < nQueueLimitJob:
retMap[queueName] = nQueueLimitJob - nQueue
# enough queues
iQueues += 1
if iQueues >= n_queues:
break
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return {}
# get jobs to propagate checkpoints
def get_jobs_to_propagate(self, max_jobs, lock_interval, update_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'thr={0}'.format(locked_by),
method_name='get_jobs_to_propagate')
tmpLog.debug('start')
# sql to get jobs
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE propagatorTime IS NOT NULL "
sql += "AND ((propagatorTime<:lockTimeLimit AND propagatorLock IS NOT NULL) "
sql += "OR (propagatorTime<:updateTimeLimit AND propagatorLock IS NULL)) "
sql += "ORDER BY propagatorTime LIMIT {0} ".format(max_jobs)
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET propagatorTime=:timeNow,propagatorLock=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
sqlL += "AND ((propagatorTime<:lockTimeLimit AND propagatorLock IS NOT NULL) "
sqlL += "OR (propagatorTime<:updateTimeLimit AND propagatorLock IS NULL)) "
# sql to get events
sqlE = "SELECT {0} FROM {1} ".format(EventSpec.column_names(), eventTableName)
sqlE += "WHERE PandaID=:PandaID AND subStatus IN (:statusFinished,:statusFailed) "
# sql to get file
sqlF = "SELECT DISTINCT {0} FROM {1} f, {2} e, {1} f2 ".format(FileSpec.column_names('f2'),
fileTableName,
eventTableName)
sqlF += "WHERE e.PandaID=:PandaID AND e.fileID=f.fileID "
sqlF += "AND e.subStatus IN (:statusFinished,:statusFailed) "
sqlF += "AND f2.fileID=f.zipFileID "
# sql to get fileID of zip
sqlZ = "SELECT e.fileID,f.zipFileID FROM {0} f, {1} e ".format(fileTableName, eventTableName)
sqlZ += "WHERE e.PandaID=:PandaID AND e.fileID=f.fileID "
sqlZ += "AND e.subStatus IN (:statusFinished,:statusFailed) "
# get jobs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
updateTimeLimit = timeNow - datetime.timedelta(seconds=update_interval)
varMap = dict()
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sql, varMap)
resList = self.cur.fetchall()
pandaIDs = []
for pandaID, in resList:
pandaIDs.append(pandaID)
# partially randomise to increase success rate for lock
nJobs = int(max_jobs * 0.2)
subPandaIDs = list(pandaIDs[nJobs:])
random.shuffle(subPandaIDs)
pandaIDs = pandaIDs[:nJobs] + subPandaIDs
pandaIDs = pandaIDs[:max_jobs]
jobSpecList = []
iEvents = 0
for pandaID in pandaIDs:
# avoid a bulk update for many jobs with too many events
if iEvents > 10000:
break
# lock job
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
if nRow > 0:
# read job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
res = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(res)
jobSpec.propagatorLock = locked_by
zipFiles = {}
zipIdMap = dict()
# get zipIDs
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlZ, varMap)
resZ = self.cur.fetchall()
for tmpFileID, tmpZipFileID in resZ:
zipIdMap[tmpFileID] = tmpZipFileID
# get zip files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlF, varMap)
resFs = self.cur.fetchall()
for resF in resFs:
fileSpec = FileSpec()
fileSpec.pack(resF)
zipFiles[fileSpec.fileID] = fileSpec
# read events
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':statusFinished'] = 'finished'
varMap[':statusFailed'] = 'failed'
self.execute(sqlE, varMap)
resEs = self.cur.fetchall()
for resE in resEs:
eventSpec = EventSpec()
eventSpec.pack(resE)
zipFileSpec = None
# get associated zip file if any
if eventSpec.fileID is not None:
if eventSpec.fileID not in zipIdMap:
continue
zipFileID = zipIdMap[eventSpec.fileID]
if zipFileID is not None:
zipFileSpec = zipFiles[zipFileID]
jobSpec.add_event(eventSpec, zipFileSpec)
iEvents += 1
jobSpecList.append(jobSpec)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get jobs in sub status
def get_jobs_in_sub_status(self, sub_status, max_jobs, time_column=None, lock_column=None,
interval_without_lock=None, interval_with_lock=None,
locked_by=None, new_sub_status=None):
try:
# get logger
if locked_by is None:
msgPfx = None
else:
msgPfx = 'id={0}'.format(locked_by)
tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_in_sub_status')
tmpLog.debug('start subStatus={0} timeColumn={1}'.format(sub_status, time_column))
timeNow = datetime.datetime.utcnow()
# sql to count jobs being processed
sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName)
sqlC += "WHERE ({0} IS NOT NULL AND subStatus=:subStatus ".format(lock_column)
if time_column is not None and interval_with_lock is not None:
sqlC += "AND ({0} IS NOT NULL AND {0}>:lockTimeLimit) ".format(time_column)
sqlC += ") OR subStatus=:newSubStatus "
# count jobs
if max_jobs > 0 and new_sub_status is not None:
varMap = dict()
varMap[':subStatus'] = sub_status
varMap[':newSubStatus'] = new_sub_status
if time_column is not None and interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
self.execute(sqlC, varMap)
nProcessing, = self.cur.fetchone()
if nProcessing >= max_jobs:
# commit
self.commit()
tmpLog.debug('enough jobs {0} are being processed in {1} state'.format(nProcessing,
new_sub_status))
return []
max_jobs -= nProcessing
# sql to get job IDs
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE subStatus=:subStatus "
if time_column is not None:
sql += "AND ({0} IS NULL ".format(time_column)
if interval_with_lock is not None:
sql += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column)
if interval_without_lock is not None:
sql += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column)
sql += ') '
sql += "ORDER BY {0} ".format(time_column)
# sql to lock job
sqlL = "UPDATE {0} SET {1}=:timeNow,{2}=:lockedBy ".format(jobTableName, time_column, lock_column)
sqlL += "WHERE PandaID=:PandaID AND subStatus=:subStatus "
if time_column is not None:
sqlL += "AND ({0} IS NULL ".format(time_column)
if interval_with_lock is not None:
sqlL += "OR ({0}<:lockTimeLimit AND {1} IS NOT NULL) ".format(time_column, lock_column)
if interval_without_lock is not None:
sqlL += "OR ({0}<:updateTimeLimit AND {1} IS NULL) ".format(time_column, lock_column)
sqlL += ') '
# sql to get jobs
sqlGJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlGJ += "WHERE PandaID=:PandaID "
# sql to get file
sqlGF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlGF += "WHERE PandaID=:PandaID AND fileType=:type "
# get jobs
varMap = dict()
varMap[':subStatus'] = sub_status
if interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
if interval_without_lock is not None:
varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock)
self.execute(sql, varMap)
resList = self.cur.fetchall()
pandaIDs = []
for pandaID, in resList:
pandaIDs.append(pandaID)
# partially randomise to increase success rate for lock
nJobs = int(max_jobs * 0.2)
subPandaIDs = list(pandaIDs[nJobs:])
random.shuffle(subPandaIDs)
pandaIDs = pandaIDs[:nJobs] + subPandaIDs
pandaIDs = pandaIDs[:max_jobs]
jobSpecList = []
for pandaID in pandaIDs:
# lock job
if locked_by is not None:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':subStatus'] = sub_status
if interval_with_lock is not None:
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_with_lock)
if interval_without_lock is not None:
varMap[':updateTimeLimit'] = timeNow - datetime.timedelta(seconds=interval_without_lock)
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
else:
nRow = 1
if nRow > 0:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlGJ, varMap)
resGJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resGJ)
if locked_by is not None:
jobSpec.lockedBy = locked_by
setattr(jobSpec, time_column, timeNow)
# get files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
self.execute(sqlGF, varMap)
resGF = self.cur.fetchall()
for resFile in resGF:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_in_file(fileSpec)
# append
jobSpecList.append(jobSpec)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# register a worker
def register_worker(self, workspec, jobspec_list, locked_by):
tmpLog = core_utils.make_logger(_logger, 'batchID={0}'.format(workspec.batchID),
method_name='register_worker')
try:
tmpLog.debug('start')
# sql to check if exists
sqlE = "SELECT 1 c FROM {0} WHERE workerID=:workerID ".format(workTableName)
# sql to insert job and worker relationship
sqlR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names())
sqlR += JobWorkerRelationSpec.bind_values_expression()
# sql to get number of workers
sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName)
sqlNW += "WHERE t.PandaID=:pandaID AND w.workerID=t.workerID "
sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) "
# sql to decrement nNewWorkers
sqlDN = "UPDATE {0} ".format(pandaQueueTableName)
sqlDN += "SET nNewWorkers=nNewWorkers-1 "
sqlDN += "WHERE queueName=:queueName AND nNewWorkers IS NOT NULL AND nNewWorkers>0 "
# insert worker if new
isNew = False
if workspec.isNew:
varMap = dict()
varMap[':workerID'] = workspec.workerID
self.execute(sqlE, varMap)
resE = self.cur.fetchone()
if resE is None:
isNew = True
if isNew:
# insert a worker
sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names())
sqlI += WorkSpec.bind_values_expression()
varMap = workspec.values_list()
self.execute(sqlI, varMap)
# decrement nNewWorkers
varMap = dict()
varMap[':queueName'] = workspec.computingSite
self.execute(sqlDN, varMap)
else:
# not update workerID
workspec.force_not_update('workerID')
# update a worker
sqlU = "UPDATE {0} SET {1} ".format(workTableName, workspec.bind_update_changes_expression())
sqlU += "WHERE workerID=:workerID "
varMap = workspec.values_map(only_changed=True)
varMap[':workerID'] = workspec.workerID
self.execute(sqlU, varMap)
# collect values to update jobs or insert job/worker mapping
varMapsR = []
if jobspec_list is not None:
for jobSpec in jobspec_list:
# get number of workers for the job
varMap = dict()
varMap[':pandaID'] = jobSpec.PandaID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlNW, varMap)
resNW = self.cur.fetchall()
workerIDs = set()
workerIDs.add(workspec.workerID)
for tmpWorkerID, in resNW:
workerIDs.add(tmpWorkerID)
# update attributes
if jobSpec.subStatus in ['submitted', 'running']:
jobSpec.nWorkers = len(workerIDs)
try:
jobSpec.nWorkersInTotal += 1
except Exception:
jobSpec.nWorkersInTotal = jobSpec.nWorkers
elif workspec.hasJob == 1:
if workspec.status == WorkSpec.ST_missed:
# not update if other workers are active
if len(workerIDs) > 1:
continue
core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec],
[workspec], {}, {})
jobSpec.trigger_propagation()
else:
jobSpec.subStatus = 'submitted'
jobSpec.nWorkers = len(workerIDs)
try:
jobSpec.nWorkersInTotal += 1
except Exception:
jobSpec.nWorkersInTotal = jobSpec.nWorkers
else:
if workspec.status == WorkSpec.ST_missed:
# not update if other workers are active
if len(workerIDs) > 1:
continue
core_utils.update_job_attributes_with_workers(workspec.mapType, [jobSpec],
[workspec], {}, {})
jobSpec.trigger_propagation()
else:
jobSpec.subStatus = 'queued'
# sql to update job
if len(jobSpec.values_map(only_changed=True)) > 0:
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:cr_PandaID AND lockedBy=:cr_lockedBy "
# update job
varMap = jobSpec.values_map(only_changed=True)
varMap[':cr_PandaID'] = jobSpec.PandaID
varMap[':cr_lockedBy'] = locked_by
self.execute(sqlJ, varMap)
if jobSpec.subStatus in ['submitted', 'running']:
# values for job/worker mapping
jwRelation = JobWorkerRelationSpec()
jwRelation.PandaID = jobSpec.PandaID
jwRelation.workerID = workspec.workerID
varMap = jwRelation.values_list()
varMapsR.append(varMap)
# insert job/worker mapping
if len(varMapsR) > 0:
self.executemany(sqlR, varMapsR)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# insert workers
def insert_workers(self, workspec_list, locked_by):
tmpLog = core_utils.make_logger(_logger, 'locked_by={0}'.format(locked_by),
method_name='insert_workers')
try:
tmpLog.debug('start')
timeNow = datetime.datetime.utcnow()
# sql to insert a worker
sqlI = "INSERT INTO {0} ({1}) ".format(workTableName, WorkSpec.column_names())
sqlI += WorkSpec.bind_values_expression()
for workSpec in workspec_list:
tmpWorkSpec = copy.copy(workSpec)
# insert worker if new
if not tmpWorkSpec.isNew:
continue
tmpWorkSpec.modificationTime = timeNow
tmpWorkSpec.status = WorkSpec.ST_pending
varMap = tmpWorkSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get queues to submit workers
def get_queues_to_submit(self, n_queues, lookup_interval, lock_interval, locked_by, queue_lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_queues_to_submit')
tmpLog.debug('start')
retMap = dict()
siteName = None
resourceMap = dict()
# sql to get a site
sqlS = "SELECT siteName FROM {0} ".format(pandaQueueTableName)
sqlS += "WHERE submitTime IS NULL "
sqlS += "OR (submitTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlS += "OR (submitTime<:lookupTimeLimit AND lockedBy IS NULL) "
sqlS += "ORDER BY submitTime "
# sql to get queues
sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName "
# sql to get orphaned workers
sqlO = "SELECT workerID FROM {0} ".format(workTableName)
sqlO += "WHERE computingSite=:computingSite "
sqlO += "AND status=:status AND modificationTime<:timeLimit "
# sql to delete orphaned workers. Not to use bulk delete to avoid deadlock with 0-record deletion
sqlD = "DELETE FROM {0} ".format(workTableName)
sqlD += "WHERE workerID=:workerID "
# sql to count nQueue
sqlN = "SELECT status,COUNT(*) cnt FROM {0} ".format(workTableName)
sqlN += "WHERE computingSite=:computingSite "
# sql to count re-fillers
sqlR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName)
sqlR += "WHERE computingSite=:computingSite AND status=:status "
sqlR += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0 "
# sql to update timestamp and lock site
sqlU = "UPDATE {0} SET submitTime=:submitTime,lockedBy=:lockedBy ".format(pandaQueueTableName)
sqlU += "WHERE siteName=:siteName "
sqlU += "AND (submitTime IS NULL OR submitTime<:timeLimit) "
# get sites
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':lockTimeLimit'] = timeNow - datetime.timedelta(seconds=queue_lock_interval)
varMap[':lookupTimeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval)
self.execute(sqlS, varMap)
resS = self.cur.fetchall()
for siteName, in resS:
# update timestamp to lock the site
varMap = dict()
varMap[':siteName'] = siteName
varMap[':submitTime'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lookup_interval)
self.execute(sqlU, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get queues
varMap = dict()
varMap[':siteName'] = siteName
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
for queueName, resourceType, nNewWorkers in resQ:
# delete orphaned workers
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status'] = WorkSpec.ST_pending
varMap[':timeLimit'] = timeNow - datetime.timedelta(seconds=lock_interval)
sqlO_tmp = sqlO
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlO_tmp += "AND resourceType=:resourceType "
self.execute(sqlO_tmp, varMap)
resO = self.cur.fetchall()
for tmpWorkerID, in resO:
varMap = dict()
varMap[':workerID'] = tmpWorkerID
self.execute(sqlD, varMap)
# commit
self.commit()
# count nQueue
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':resourceType'] = resourceType
sqlN_tmp = sqlN
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlN_tmp += "AND resourceType=:resourceType "
sqlN_tmp += "GROUP BY status "
self.execute(sqlN_tmp, varMap)
nQueue = 0
nReady = 0
nRunning = 0
for workerStatus, tmpNum in self.cur.fetchall():
if workerStatus in [WorkSpec.ST_submitted, WorkSpec.ST_pending, WorkSpec.ST_idle]:
nQueue += tmpNum
elif workerStatus in [WorkSpec.ST_ready]:
nReady += tmpNum
elif workerStatus in [WorkSpec.ST_running]:
nRunning += tmpNum
# count nFillers
varMap = dict()
varMap[':computingSite'] = queueName
varMap[':status'] = WorkSpec.ST_running
sqlR_tmp = sqlR
if resourceType != 'ANY':
varMap[':resourceType'] = resourceType
sqlR_tmp += "AND resourceType=:resourceType "
self.execute(sqlR_tmp, varMap)
nReFill, = self.cur.fetchone()
nReady += nReFill
# add
retMap.setdefault(queueName, {})
retMap[queueName][resourceType] = {'nReady': nReady,
'nRunning': nRunning,
'nQueue': nQueue,
'nNewWorkers': nNewWorkers}
resourceMap[resourceType] = queueName
# enough queues
if len(retMap) >= 0:
break
tmpLog.debug('got retMap {0}'.format(str(retMap)))
tmpLog.debug('got siteName {0}'.format(str(siteName)))
tmpLog.debug('got resourceMap {0}'.format(str(resourceMap)))
return retMap, siteName, resourceMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}, None, {}
# get job chunks to make workers
def get_job_chunks_for_workers(self, queue_name, n_workers, n_ready, n_jobs_per_worker, n_workers_per_job,
use_job_late_binding, check_interval, lock_interval, locked_by,
allow_job_mixture=False, max_workers_per_job_in_total=None,
max_workers_per_job_per_cycle=None):
toCommit = False
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name),
method_name='get_job_chunks_for_workers')
tmpLog.debug('start')
# define maxJobs
if n_jobs_per_worker is not None:
maxJobs = (n_workers + n_ready) * n_jobs_per_worker
else:
maxJobs = -(-(n_workers + n_ready) // n_workers_per_job)
# core part of sql
# submitted and running are for multi-workers
sqlCore = "WHERE (subStatus IN (:subStat1,:subStat2) OR (subStatus IN (:subStat3,:subStat4) "
sqlCore += "AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL AND nWorkers<nWorkersLimit "
sqlCore += "AND moreWorkers IS NULL AND (maxWorkersInTotal IS NULL OR nWorkersInTotal IS NULL "
sqlCore += "OR nWorkersInTotal<maxWorkersInTotal))) "
sqlCore += "AND (submitterTime IS NULL "
sqlCore += "OR (submitterTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlCore += "OR (submitterTime<:checkTimeLimit AND lockedBy IS NULL)) "
sqlCore += "AND computingSite=:queueName "
# sql to get job IDs
sqlP = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlP += sqlCore
sqlP += "ORDER BY currentPriority DESC,taskID,PandaID "
# sql to get job
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET submitterTime=:timeNow,lockedBy=:lockedBy ".format(jobTableName)
sqlL += sqlCore
sqlL += "AND PandaID=:PandaID "
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
checkTimeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# sql to get file
sqlGF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlGF += "WHERE PandaID=:PandaID AND fileType=:type "
jobChunkList = []
# count jobs for nJobsPerWorker>1
nAvailableJobs = None
if n_jobs_per_worker is not None and n_jobs_per_worker > 1:
toCommit = True
# sql to count jobs
sqlC = "SELECT COUNT(*) cnt FROM {0} ".format(jobTableName)
sqlC += sqlCore
# count jobs
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlC, varMap)
nAvailableJobs, = self.cur.fetchone()
maxJobs = int(min(maxJobs, nAvailableJobs) / n_jobs_per_worker) * n_jobs_per_worker
tmpStr = 'n_workers={0} n_ready={1} '.format(n_workers, n_ready)
tmpStr += 'n_jobs_per_worker={0} n_workers_per_job={1} '.format(n_jobs_per_worker, n_workers_per_job)
tmpStr += 'n_ava_jobs={0}'.format(nAvailableJobs)
tmpLog.debug(tmpStr)
if maxJobs == 0:
tmpStr = 'skip due to maxJobs=0'
tmpLog.debug(tmpStr)
else:
# get job IDs
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
tmpStr = 'fetched {0} jobs'.format(len(resP))
tmpLog.debug(tmpStr)
jobChunk = []
iJobs = 0
for pandaID, in resP:
toCommit = True
toEscape = False
# lock job
varMap = dict()
varMap[':subStat1'] = 'prepared'
varMap[':subStat2'] = 'queued'
varMap[':subStat3'] = 'submitted'
varMap[':subStat4'] = 'running'
varMap[':queueName'] = queue_name
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow > 0:
iJobs += 1
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpec.lockedBy = locked_by
# get files
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':type'] = 'input'
self.execute(sqlGF, varMap)
resGF = self.cur.fetchall()
for resFile in resGF:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_in_file(fileSpec)
# new chunk
if len(jobChunk) > 0 and jobChunk[0].taskID != jobSpec.taskID and not allow_job_mixture:
tmpLog.debug('new chunk with {0} jobs due to taskID change'.format(len(jobChunk)))
jobChunkList.append(jobChunk)
jobChunk = []
# only prepared for new worker
if len(jobChunkList) >= n_ready and jobSpec.subStatus == 'queued':
toCommit = False
else:
jobChunk.append(jobSpec)
# enough jobs in chunk
if n_jobs_per_worker is not None and len(jobChunk) >= n_jobs_per_worker:
tmpLog.debug('new chunk with {0} jobs due to n_jobs_per_worker'.format(len(jobChunk)))
jobChunkList.append(jobChunk)
jobChunk = []
# one job per multiple workers
elif n_workers_per_job is not None:
if jobSpec.nWorkersLimit is None:
jobSpec.nWorkersLimit = n_workers_per_job
if max_workers_per_job_in_total is not None:
jobSpec.maxWorkersInTotal = max_workers_per_job_in_total
nMultiWorkers = min(jobSpec.nWorkersLimit - jobSpec.nWorkers,
n_workers - len(jobChunkList))
if jobSpec.maxWorkersInTotal is not None and jobSpec.nWorkersInTotal is not None:
nMultiWorkers = min(nMultiWorkers,
jobSpec.maxWorkersInTotal - jobSpec.nWorkersInTotal)
if max_workers_per_job_per_cycle is not None:
nMultiWorkers = min(nMultiWorkers, max_workers_per_job_per_cycle)
if nMultiWorkers < 0:
nMultiWorkers = 0
tmpLog.debug(
'new {0} chunks with {1} jobs due to n_workers_per_job'.format(nMultiWorkers,
len(jobChunk)))
for i in range(nMultiWorkers):
jobChunkList.append(jobChunk)
jobChunk = []
# enough job chunks
if len(jobChunkList) >= n_workers:
toEscape = True
if toCommit:
self.commit()
else:
self.rollback()
if toEscape or iJobs >= maxJobs:
break
tmpLog.debug('got {0} job chunks'.format(len(jobChunkList)))
return jobChunkList
except Exception:
# roll back
if toCommit:
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get workers to monitor
def get_workers_to_update(self, max_workers, check_interval, lock_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_update')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT workerID,configID,mapType FROM {0} ".format(workTableName)
sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) "
sqlW += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlW += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) "
sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers)
# sql to lock worker without time check
sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName)
sqlL += "WHERE workerID=:workerID "
# sql to update modificationTime
sqlLM = "UPDATE {0} SET modificationTime=:timeNow ".format(workTableName)
sqlLM += "WHERE workerID=:workerID "
# sql to lock worker with time check
sqlLT = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(workTableName)
sqlLT += "WHERE workerID=:workerID "
sqlLT += "AND status IN (:st_submitted,:st_running,:st_idle) "
sqlLT += "AND ((modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL) "
sqlLT += "OR (modificationTime<:checkTimeLimit AND lockedBy IS NULL)) "
# sql to get associated workerIDs
sqlA = "SELECT t.workerID FROM {0} t, {0} s, {1} w ".format(jobWorkerTableName, workTableName)
sqlA += "WHERE s.PandaID=t.PandaID AND s.workerID=:workerID "
sqlA += "AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) "
# sql to get associated workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# sql to get associated PandaIDs
sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# get workerIDs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
checkTimeLimit = timeNow - datetime.timedelta(seconds=check_interval)
varMap = dict()
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = set()
for workerID, configID, mapType in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
tmpWorkers.add((workerID, configID, mapType))
checkedIDs = set()
retVal = {}
for workerID, configID, mapType in tmpWorkers:
# skip
if workerID in checkedIDs:
continue
# get associated workerIDs
varMap = dict()
varMap[':workerID'] = workerID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workerIDtoScan = set()
for tmpWorkID, in resA:
workerIDtoScan.add(tmpWorkID)
# add original ID just in case since no relation when job is not yet bound
workerIDtoScan.add(workerID)
# use only the largest worker to avoid updating the same worker set concurrently
if mapType == WorkSpec.MT_MultiWorkers:
if workerID != min(workerIDtoScan):
# update modification time
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
self.execute(sqlLM, varMap)
# commit
self.commit()
continue
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':checkTimeLimit'] = checkTimeLimit
self.execute(sqlLT, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get workers
queueName = None
workersList = []
for tmpWorkID in workerIDtoScan:
checkedIDs.add(tmpWorkID)
# get worker
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if queueName is None:
queueName = workSpec.computingSite
workersList.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
workSpec.pandaid_list = []
for tmpPandaID, in resP:
workSpec.pandaid_list.append(tmpPandaID)
if len(workSpec.pandaid_list) > 0:
workSpec.nJobs = len(workSpec.pandaid_list)
# lock worker
if tmpWorkID != workerID:
varMap = dict()
varMap[':workerID'] = tmpWorkID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
self.execute(sqlL, varMap)
workSpec.lockedBy = locked_by
workSpec.force_not_update('lockedBy')
# commit
self.commit()
# add
if queueName is not None:
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workersList)
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get workers to propagate
def get_workers_to_propagate(self, max_workers, check_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_propagate')
tmpLog.debug('start')
# sql to get worker IDs
sqlW = "SELECT workerID FROM {0} ".format(workTableName)
sqlW += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit "
sqlW += "ORDER BY lastUpdate "
# sql to lock worker
sqlL = "UPDATE {0} SET lastUpdate=:timeNow ".format(workTableName)
sqlL += "WHERE lastUpdate IS NOT NULL AND lastUpdate<:checkTimeLimit "
sqlL += "AND workerID=:workerID "
# sql to get associated PandaIDs
sqlA = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlA += "WHERE workerID=:workerID "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
timeNow = datetime.datetime.utcnow()
timeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# get workerIDs
varMap = dict()
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = []
for workerID, in resW:
tmpWorkers.append(workerID)
# partially randomize to increase hit rate
nWorkers = int(max_workers * 0.2)
subTmpWorkers = list(tmpWorkers[nWorkers:])
random.shuffle(subTmpWorkers)
tmpWorkers = tmpWorkers[:nWorkers] + subTmpWorkers
tmpWorkers = tmpWorkers[:max_workers]
retVal = []
for workerID in tmpWorkers:
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow > 0:
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
retVal.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workSpec.pandaid_list = []
for pandaID, in resA:
workSpec.pandaid_list.append(pandaID)
# commit
self.commit()
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get workers to feed events
def get_workers_to_feed_events(self, max_workers, lock_interval, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_feed_events')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT workerID, status FROM {0} ".format(workTableName)
sqlW += "WHERE eventsRequest=:eventsRequest AND status IN (:status1,:status2) "
sqlW += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) "
sqlW += "ORDER BY eventFeedTime LIMIT {0} ".format(max_workers)
# sql to lock worker
sqlL = "UPDATE {0} SET eventFeedTime=:timeNow,eventFeedLock=:lockedBy ".format(workTableName)
sqlL += "WHERE eventsRequest=:eventsRequest AND status=:status "
sqlL += "AND (eventFeedTime IS NULL OR eventFeedTime<:lockTimeLimit) "
sqlL += "AND workerID=:workerID "
# sql to get associated workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# get workerIDs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
varMap = dict()
varMap[':status1'] = WorkSpec.ST_running
varMap[':status2'] = WorkSpec.ST_submitted
varMap[':eventsRequest'] = WorkSpec.EV_requestEvents
varMap[':lockTimeLimit'] = lockTimeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = dict()
for tmpWorkerID, tmpWorkStatus in resW:
tmpWorkers[tmpWorkerID] = tmpWorkStatus
retVal = {}
for workerID, workStatus in iteritems(tmpWorkers):
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':timeNow'] = timeNow
varMap[':status'] = workStatus
varMap[':eventsRequest'] = WorkSpec.EV_requestEvents
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':lockedBy'] = locked_by
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
# skip if not locked
if nRow == 0:
continue
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if workSpec.computingSite not in retVal:
retVal[workSpec.computingSite] = []
retVal[workSpec.computingSite].append(workSpec)
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# update jobs and workers
def update_jobs_workers(self, jobspec_list, workspec_list, locked_by, panda_ids_list=None):
try:
timeNow = datetime.datetime.utcnow()
# sql to check job
sqlCJ = "SELECT status FROM {0} WHERE PandaID=:PandaID FOR UPDATE ".format(jobTableName)
# sql to check file
sqlFC = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlFC += "WHERE PandaID=:PandaID AND lfn=:lfn "
# sql to get all LFNs
sqlFL = "SELECT lfn FROM {0} ".format(fileTableName)
sqlFL += "WHERE PandaID=:PandaID AND fileType<>:type "
# sql to check file with eventRangeID
sqlFE = "SELECT 1 c FROM {0} ".format(fileTableName)
sqlFE += "WHERE PandaID=:PandaID AND lfn=:lfn AND eventRangeID=:eventRangeID ".format(fileTableName)
# sql to insert file
sqlFI = "INSERT INTO {0} ({1}) ".format(fileTableName, FileSpec.column_names())
sqlFI += FileSpec.bind_values_expression()
# sql to get pending files
sqlFP = "SELECT fileID,fsize,lfn FROM {0} ".format(fileTableName)
sqlFP += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
# sql to get provenanceID,workerID for pending files
sqlPW = "SELECT SUM(fsize),provenanceID,workerID FROM {0} ".format(fileTableName)
sqlPW += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
sqlPW += "GROUP BY provenanceID,workerID "
# sql to update pending files
sqlFU = "UPDATE {0} ".format(fileTableName)
sqlFU += "SET status=:status,zipFileID=:zipFileID "
sqlFU += "WHERE fileID=:fileID "
# sql to check event
sqlEC = "SELECT eventRangeID,eventStatus FROM {0} ".format(eventTableName)
sqlEC += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL "
# sql to check associated file
sqlEF = "SELECT eventRangeID,status FROM {0} ".format(fileTableName)
sqlEF += "WHERE PandaID=:PandaID AND eventRangeID IS NOT NULL "
# sql to insert event
sqlEI = "INSERT INTO {0} ({1}) ".format(eventTableName, EventSpec.column_names())
sqlEI += EventSpec.bind_values_expression()
# sql to update event
sqlEU = "UPDATE {0} ".format(eventTableName)
sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlEU += "WHERE PandaID=:PandaID AND eventRangeID=:eventRangeID "
# sql to check if relationship is already available
sqlCR = "SELECT 1 c FROM {0} WHERE PandaID=:PandaID AND workerID=:workerID ".format(jobWorkerTableName)
# sql to insert job and worker relationship
sqlIR = "INSERT INTO {0} ({1}) ".format(jobWorkerTableName, JobWorkerRelationSpec.column_names())
sqlIR += JobWorkerRelationSpec.bind_values_expression()
# count number of workers
sqlNW = "SELECT DISTINCT t.workerID FROM {0} t, {1} w ".format(jobWorkerTableName, workTableName)
sqlNW += "WHERE t.PandaID=:PandaID AND w.workerID=t.workerID "
sqlNW += "AND w.status IN (:st_submitted,:st_running,:st_idle) "
# update job
if jobspec_list is not None:
if len(workspec_list) > 0 and workspec_list[0].mapType == WorkSpec.MT_MultiWorkers:
isMultiWorkers = True
else:
isMultiWorkers = False
for jobSpec in jobspec_list:
tmpLog = core_utils.make_logger(_logger, 'PandaID={0} by {1}'.format(jobSpec.PandaID, locked_by),
method_name='update_jobs_workers')
# check job
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlCJ, varMap)
resCJ = self.cur.fetchone()
tmpJobStatus, = resCJ
# don't update cancelled jobs
if tmpJobStatus == ['cancelled']:
pass
else:
# get nWorkers
tmpLog.debug('start')
activeWorkers = set()
if isMultiWorkers:
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlNW, varMap)
resNW = self.cur.fetchall()
for tmpWorkerID, in resNW:
activeWorkers.add(tmpWorkerID)
jobSpec.nWorkers = len(activeWorkers)
# get all LFNs
allLFNs = set()
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
self.execute(sqlFL, varMap)
resFL = self.cur.fetchall()
for tmpLFN, in resFL:
allLFNs.add(tmpLFN)
# insert files
nFiles = 0
fileIdMap = {}
zipFileRes = dict()
for fileSpec in jobSpec.outFiles:
# insert file
if fileSpec.lfn not in allLFNs:
if jobSpec.zipPerMB is None or fileSpec.isZip in [0, 1]:
fileSpec.status = 'defined'
jobSpec.hasOutFile = JobSpec.HO_hasOutput
else:
fileSpec.status = 'pending'
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
fileSpec.fileID = self.cur.lastrowid
nFiles += 1
# mapping between event range ID and file ID
if fileSpec.eventRangeID is not None:
fileIdMap[fileSpec.eventRangeID] = fileSpec.fileID
# associate to itself
if fileSpec.isZip == 1:
varMap = dict()
varMap[':status'] = fileSpec.status
varMap[':fileID'] = fileSpec.fileID
varMap[':zipFileID'] = fileSpec.fileID
self.execute(sqlFU, varMap)
elif fileSpec.isZip == 1 and fileSpec.eventRangeID is not None:
# add a fake file with eventRangeID which has the same lfn/zipFileID as zip file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':lfn'] = fileSpec.lfn
varMap[':eventRangeID'] = fileSpec.eventRangeID
self.execute(sqlFE, varMap)
resFE = self.cur.fetchone()
if resFE is None:
if fileSpec.lfn not in zipFileRes:
# get file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlFC, varMap)
resFC = self.cur.fetchone()
zipFileRes[fileSpec.lfn] = resFC
# associate to existing zip
resFC = zipFileRes[fileSpec.lfn]
zipFileSpec = FileSpec()
zipFileSpec.pack(resFC)
fileSpec.status = 'zipped'
fileSpec.zipFileID = zipFileSpec.zipFileID
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
nFiles += 1
# mapping between event range ID and file ID
fileIdMap[fileSpec.eventRangeID] = self.cur.lastrowid
if nFiles > 0:
tmpLog.debug('inserted {0} files'.format(nFiles))
# check pending files
if jobSpec.zipPerMB is not None and \
not (jobSpec.zipPerMB == 0 and jobSpec.subStatus != 'to_transfer'):
# get workerID and provenanceID of pending files
zippedFileIDs = []
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':status'] = 'pending'
varMap[':type'] = 'input'
self.execute(sqlPW, varMap)
resPW = self.cur.fetchall()
for subTotalSize, tmpProvenanceID, tmpWorkerID in resPW:
if jobSpec.subStatus == 'to_transfer' \
or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024) \
or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers):
sqlFPx = sqlFP
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':status'] = 'pending'
varMap[':type'] = 'input'
if tmpProvenanceID is None:
sqlFPx += 'AND provenanceID IS NULL '
else:
varMap[':provenanceID'] = tmpProvenanceID
sqlFPx += 'AND provenanceID=:provenanceID '
if tmpWorkerID is None:
sqlFPx += 'AND workerID IS NULL '
else:
varMap[':workerID'] = tmpWorkerID
sqlFPx += 'AND workerID=:workerID'
# get pending files
self.execute(sqlFPx, varMap)
resFP = self.cur.fetchall()
tmpLog.debug('got {0} pending files for workerID={1} provenanceID={2}'.format(
len(resFP),
tmpWorkerID,
tmpProvenanceID))
# make subsets
subTotalSize = 0
subFileIDs = []
for tmpFileID, tmpFsize, tmpLFN in resFP:
if jobSpec.zipPerMB > 0 and subTotalSize > 0 \
and (subTotalSize + tmpFsize > jobSpec.zipPerMB * 1024 * 1024):
zippedFileIDs.append(subFileIDs)
subFileIDs = []
subTotalSize = 0
subTotalSize += tmpFsize
subFileIDs.append((tmpFileID, tmpLFN))
if (jobSpec.subStatus == 'to_transfer'
or (jobSpec.zipPerMB > 0 and subTotalSize > jobSpec.zipPerMB * 1024 * 1024)
or (tmpWorkerID is not None and tmpWorkerID not in activeWorkers)) \
and len(subFileIDs) > 0:
zippedFileIDs.append(subFileIDs)
# make zip files
for subFileIDs in zippedFileIDs:
# insert zip file
fileSpec = FileSpec()
fileSpec.status = 'zipping'
fileSpec.lfn = 'panda.' + subFileIDs[0][-1] + '.zip'
fileSpec.scope = 'panda'
fileSpec.fileType = 'zip_output'
fileSpec.PandaID = jobSpec.PandaID
fileSpec.taskID = jobSpec.taskID
fileSpec.isZip = 1
varMap = fileSpec.values_list()
self.execute(sqlFI, varMap)
# update pending files
varMaps = []
for tmpFileID, tmpLFN in subFileIDs:
varMap = dict()
varMap[':status'] = 'zipped'
varMap[':fileID'] = tmpFileID
varMap[':zipFileID'] = self.cur.lastrowid
varMaps.append(varMap)
self.executemany(sqlFU, varMaps)
# set zip output flag
if len(zippedFileIDs) > 0:
jobSpec.hasOutFile = JobSpec.HO_hasZipOutput
# get event ranges and file stat
eventFileStat = dict()
eventRangesSet = set()
doneEventRangesSet = set()
if len(jobSpec.events) > 0:
# get event ranges
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlEC, varMap)
resEC = self.cur.fetchall()
for tmpEventRangeID, tmpEventStatus in resEC:
if tmpEventStatus in ['running']:
eventRangesSet.add(tmpEventRangeID)
else:
doneEventRangesSet.add(tmpEventRangeID)
# check associated file
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlEF, varMap)
resEF = self.cur.fetchall()
for tmpEventRangeID, tmpStat in resEF:
eventFileStat[tmpEventRangeID] = tmpStat
# insert or update events
varMapsEI = []
varMapsEU = []
for eventSpec in jobSpec.events:
# already done
if eventSpec.eventRangeID in doneEventRangesSet:
continue
# set subStatus
if eventSpec.eventStatus == 'finished':
# check associated file
if eventSpec.eventRangeID not in eventFileStat or \
eventFileStat[eventSpec.eventRangeID] == 'finished':
eventSpec.subStatus = 'finished'
elif eventFileStat[eventSpec.eventRangeID] == 'failed':
eventSpec.eventStatus = 'failed'
eventSpec.subStatus = 'failed'
else:
eventSpec.subStatus = 'transferring'
else:
eventSpec.subStatus = eventSpec.eventStatus
# set fileID
if eventSpec.eventRangeID in fileIdMap:
eventSpec.fileID = fileIdMap[eventSpec.eventRangeID]
# insert or update event
if eventSpec.eventRangeID not in eventRangesSet:
varMap = eventSpec.values_list()
varMapsEI.append(varMap)
else:
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':eventRangeID'] = eventSpec.eventRangeID
varMap[':eventStatus'] = eventSpec.eventStatus
varMap[':subStatus'] = eventSpec.subStatus
varMapsEU.append(varMap)
if len(varMapsEI) > 0:
self.executemany(sqlEI, varMapsEI)
tmpLog.debug('inserted {0} event'.format(len(varMapsEI)))
if len(varMapsEU) > 0:
self.executemany(sqlEU, varMapsEU)
tmpLog.debug('updated {0} event'.format(len(varMapsEU)))
# update job
varMap = jobSpec.values_map(only_changed=True)
if len(varMap) > 0:
tmpLog.debug('update job')
# sql to update job
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobSpec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:PandaID "
jobSpec.lockedBy = None
jobSpec.modificationTime = timeNow
varMap = jobSpec.values_map(only_changed=True)
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
tmpLog.debug('all done for job')
# commit
self.commit()
# update worker
retVal = True
for idxW, workSpec in enumerate(workspec_list):
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(workSpec.workerID),
method_name='update_jobs_workers')
tmpLog.debug('update worker')
workSpec.lockedBy = None
if workSpec.status == WorkSpec.ST_running and workSpec.startTime is None:
workSpec.startTime = timeNow
elif workSpec.is_final_status():
if workSpec.startTime is None:
workSpec.startTime = timeNow
if workSpec.endTime is None:
workSpec.endTime = timeNow
if not workSpec.nextLookup:
if workSpec.has_updated_attributes():
workSpec.modificationTime = timeNow
else:
workSpec.nextLookup = False
# sql to update worker
sqlW = "UPDATE {0} SET {1} ".format(workTableName, workSpec.bind_update_changes_expression())
sqlW += "WHERE workerID=:workerID AND lockedBy=:cr_lockedBy "
sqlW += "AND (status NOT IN (:st1,:st2,:st3,:st4)) "
varMap = workSpec.values_map(only_changed=True)
if len(varMap) > 0:
varMap[':workerID'] = workSpec.workerID
varMap[':cr_lockedBy'] = locked_by
varMap[':st1'] = WorkSpec.ST_cancelled
varMap[':st2'] = WorkSpec.ST_finished
varMap[':st3'] = WorkSpec.ST_failed
varMap[':st4'] = WorkSpec.ST_missed
self.execute(sqlW, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
if nRow == 0:
retVal = False
# insert relationship if necessary
if panda_ids_list is not None and len(panda_ids_list) > idxW:
varMapsIR = []
for pandaID in panda_ids_list[idxW]:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':workerID'] = workSpec.workerID
self.execute(sqlCR, varMap)
resCR = self.cur.fetchone()
if resCR is None:
jwRelation = JobWorkerRelationSpec()
jwRelation.PandaID = pandaID
jwRelation.workerID = workSpec.workerID
varMap = jwRelation.values_list()
varMapsIR.append(varMap)
if len(varMapsIR) > 0:
self.executemany(sqlIR, varMapsIR)
tmpLog.debug('all done for worker')
# commit
self.commit()
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get jobs with workerID
def get_jobs_with_worker_id(self, worker_id, locked_by, with_file=False, only_running=False, slim=False):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='get_jobs_with_worker_id')
tmpLog.debug('start')
# sql to get PandaIDs
sqlP = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=slim), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get job parameters
sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName)
sqlJJ += "WHERE PandaID=:PandaID "
# sql to lock job
sqlL = "UPDATE {0} SET modificationTime=:timeNow,lockedBy=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID AND zipFileID IS NULL "
# get jobs
jobChunkList = []
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlP, varMap)
resW = self.cur.fetchall()
for pandaID, in resW:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ, slim=slim)
if only_running and jobSpec.subStatus not in ['running', 'submitted', 'queued', 'idle']:
continue
jobSpec.lockedBy = locked_by
# for old jobs without extractions
if jobSpec.jobParamsExtForLog is None:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJJ, varMap)
resJJ = self.cur.fetchone()
jobSpec.set_blob_attribute('jobParams', resJJ[0])
jobSpec.get_output_file_attributes()
jobSpec.get_logfile_info()
# lock job
if locked_by is not None:
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = timeNow
self.execute(sqlL, varMap)
# get files
if with_file:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobSpec.add_file(fileSpec)
# append
jobChunkList.append(jobSpec)
# commit
self.commit()
tmpLog.debug('got {0} job chunks'.format(len(jobChunkList)))
return jobChunkList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get ready workers
def get_ready_workers(self, queue_name, n_ready):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queue={0}'.format(queue_name),
method_name='get_ready_workers')
tmpLog.debug('start')
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE computingSite=:queueName AND (status=:status_ready OR (status=:status_running "
sqlG += "AND nJobsToReFill IS NOT NULL AND nJobsToReFill>0)) "
sqlG += "ORDER BY modificationTime LIMIT {0} ".format(n_ready)
# sql to get associated PandaIDs
sqlP = "SELECT COUNT(*) cnt FROM {0} ".format(jobWorkerTableName)
sqlP += "WHERE workerID=:workerID "
# get workers
varMap = dict()
varMap[':status_ready'] = WorkSpec.ST_ready
varMap[':status_running'] = WorkSpec.ST_running
varMap[':queueName'] = queue_name
self.execute(sqlG, varMap)
resList = self.cur.fetchall()
retVal = []
for res in resList:
workSpec = WorkSpec()
workSpec.pack(res)
# get number of jobs
varMap = dict()
varMap[':workerID'] = workSpec.workerID
self.execute(sqlP, varMap)
resP = self.cur.fetchone()
if resP is not None and resP[0] > 0:
workSpec.nJobs = resP[0]
retVal.append(workSpec)
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get a worker
def get_worker_with_id(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='get_worker_with_id')
tmpLog.debug('start')
# sql to get a worker
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# get a worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlG, varMap)
res = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(res)
# commit
self.commit()
tmpLog.debug('got')
return workSpec
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get jobs to trigger or check output transfer or zip output
def get_jobs_for_stage_out(self, max_jobs, interval_without_lock, interval_with_lock, locked_by,
sub_status, has_out_file_flag, bad_has_out_file_flag=None,
max_files_per_job=None):
try:
# get logger
msgPfx = 'thr={0}'.format(locked_by)
tmpLog = core_utils.make_logger(_logger, msgPfx, method_name='get_jobs_for_stage_out')
tmpLog.debug('start')
# sql to get PandaIDs without FOR UPDATE which causes deadlock in MariaDB
sql = "SELECT PandaID FROM {0} ".format(jobTableName)
sql += "WHERE "
sql += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) "
if bad_has_out_file_flag is not None:
sql += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) "
sql += "AND (stagerTime IS NULL "
sql += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) "
sql += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) "
sql += ") "
sql += "ORDER BY stagerTime "
sql += "LIMIT {0} ".format(max_jobs)
# sql to lock job
sqlL = "UPDATE {0} SET stagerTime=:timeNow,stagerLock=:lockedBy ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID AND "
sqlL += "(subStatus=:subStatus OR hasOutFile=:hasOutFile) "
if bad_has_out_file_flag is not None:
sqlL += "AND (hasOutFile IS NULL OR hasOutFile<>:badHasOutFile) "
sqlL += "AND (stagerTime IS NULL "
sqlL += "OR (stagerTime<:lockTimeLimit AND stagerLock IS NOT NULL) "
sqlL += "OR (stagerTime<:updateTimeLimit AND stagerLock IS NULL) "
sqlL += ") "
# sql to get job
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(slim=True), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get job parameters
sqlJJ = "SELECT jobParams FROM {0} ".format(jobTableName)
sqlJJ += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID AND status=:status AND fileType<>:type "
if max_files_per_job is not None and max_files_per_job > 0:
sqlF += "LIMIT {0} ".format(max_files_per_job)
# sql to get associated files
sqlAF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlAF += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID AND fileType<>:type "
# sql to increment attempt number
sqlFU = "UPDATE {0} SET attemptNr=attemptNr+1 WHERE fileID=:fileID ".format(fileTableName)
# get jobs
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=interval_with_lock)
updateTimeLimit = timeNow - datetime.timedelta(seconds=interval_without_lock)
varMap = dict()
varMap[':subStatus'] = sub_status
varMap[':hasOutFile'] = has_out_file_flag
if bad_has_out_file_flag is not None:
varMap[':badHasOutFile'] = bad_has_out_file_flag
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
self.execute(sql, varMap)
resList = self.cur.fetchall()
jobSpecList = []
for pandaID, in resList:
# lock job
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':timeNow'] = timeNow
varMap[':lockedBy'] = locked_by
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':updateTimeLimit'] = updateTimeLimit
varMap[':subStatus'] = sub_status
varMap[':hasOutFile'] = has_out_file_flag
if bad_has_out_file_flag is not None:
varMap[':badHasOutFile'] = bad_has_out_file_flag
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
if nRow > 0:
# get job
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
# make job
jobSpec = JobSpec()
jobSpec.pack(resJ, slim=True)
jobSpec.stagerLock = locked_by
jobSpec.stagerTime = timeNow
# for old jobs without extractions
if jobSpec.jobParamsExtForLog is None:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJJ, varMap)
resJJ = self.cur.fetchone()
jobSpec.set_blob_attribute('jobParams', resJJ[0])
jobSpec.get_output_file_attributes()
jobSpec.get_logfile_info()
# get files
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
varMap[':type'] = 'input'
if has_out_file_flag == JobSpec.HO_hasOutput:
varMap[':status'] = 'defined'
elif has_out_file_flag == JobSpec.HO_hasZipOutput:
varMap[':status'] = 'zipping'
else:
varMap[':status'] = 'transferring'
self.execute(sqlF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
fileSpec.attemptNr += 1
jobSpec.add_out_file(fileSpec)
# increment attempt number
varMap = dict()
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlFU, varMap)
jobSpecList.append(jobSpec)
# commit
if len(resFileList) > 0:
self.commit()
# get associated files
if has_out_file_flag == JobSpec.HO_hasZipOutput:
for fileSpec in jobSpec.outFiles:
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':zipFileID'] = fileSpec.fileID
varMap[':type'] = 'input'
self.execute(sqlAF, varMap)
resAFs = self.cur.fetchall()
for resAF in resAFs:
assFileSpec = FileSpec()
assFileSpec.pack(resAF)
fileSpec.add_associated_file(assFileSpec)
# get associated workers
tmpWorkers = self.get_workers_with_job_id(jobSpec.PandaID, use_commit=False)
jobSpec.add_workspec_list(tmpWorkers)
tmpLog.debug('got {0} jobs'.format(len(jobSpecList)))
return jobSpecList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# update job for stage-out
def update_job_for_stage_out(self, jobspec, update_event_status, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,
'PandaID={0} subStatus={1} thr={2}'.format(jobspec.PandaID,
jobspec.subStatus,
locked_by),
method_name='update_job_for_stage_out')
tmpLog.debug('start')
# sql to update event
sqlEU = "UPDATE {0} ".format(eventTableName)
sqlEU += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlEU += "WHERE eventRangeID=:eventRangeID "
sqlEU += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone "
# sql to update associated events
sqlAE = "UPDATE {0} ".format(eventTableName)
sqlAE += "SET eventStatus=:eventStatus,subStatus=:subStatus "
sqlAE += "WHERE eventRangeID IN "
sqlAE += "(SELECT eventRangeID FROM {0} ".format(fileTableName)
sqlAE += "WHERE PandaID=:PandaID AND zipFileID=:zipFileID) "
sqlAE += "AND eventStatus<>:statusFailed AND subStatus<>:statusDone "
# sql to lock job again
sqlLJ = "UPDATE {0} SET stagerTime=:timeNow ".format(jobTableName)
sqlLJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy "
# sql to check lock
sqlLC = "SELECT stagerLock FROM {0} ".format(jobTableName)
sqlLC += "WHERE PandaID=:PandaID "
# lock
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlLJ, varMap)
nRow = self.cur.rowcount
# check just in case since nRow can be 0 if two lock actions are too close in time
if nRow == 0:
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
self.execute(sqlLC, varMap)
resLC = self.cur.fetchone()
if resLC is not None and resLC[0] == locked_by:
nRow = 1
# commit
self.commit()
if nRow == 0:
tmpLog.debug('skip since locked by another')
return None
# update files
tmpLog.debug('update {0} files'.format(len(jobspec.outFiles)))
for fileSpec in jobspec.outFiles:
# sql to update file
sqlF = "UPDATE {0} SET {1} ".format(fileTableName, fileSpec.bind_update_changes_expression())
sqlF += "WHERE PandaID=:PandaID AND fileID=:fileID "
varMap = fileSpec.values_map(only_changed=True)
updated = False
if len(varMap) > 0:
varMap[':PandaID'] = fileSpec.PandaID
varMap[':fileID'] = fileSpec.fileID
self.execute(sqlF, varMap)
updated = True
# update event status
if update_event_status:
if fileSpec.eventRangeID is not None:
varMap = dict()
varMap[':eventRangeID'] = fileSpec.eventRangeID
varMap[':eventStatus'] = fileSpec.status
varMap[':subStatus'] = fileSpec.status
varMap[':statusFailed'] = 'failed'
varMap[':statusDone'] = 'done'
self.execute(sqlEU, varMap)
updated = True
if fileSpec.isZip == 1:
# update files associated with zip file
varMap = dict()
varMap[':PandaID'] = fileSpec.PandaID
varMap[':zipFileID'] = fileSpec.fileID
varMap[':eventStatus'] = fileSpec.status
varMap[':subStatus'] = fileSpec.status
varMap[':statusFailed'] = 'failed'
varMap[':statusDone'] = 'done'
self.execute(sqlAE, varMap)
updated = True
nRow = self.cur.rowcount
tmpLog.debug('updated {0} events'.format(nRow))
if updated:
# lock job again
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlLJ, varMap)
# commit
self.commit()
nRow = self.cur.rowcount
if nRow == 0:
tmpLog.debug('skip since locked by another')
return None
# count files
sqlC = "SELECT COUNT(*) cnt,status FROM {0} ".format(fileTableName)
sqlC += "WHERE PandaID=:PandaID GROUP BY status "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
self.execute(sqlC, varMap)
resC = self.cur.fetchall()
cntMap = {}
for cnt, fileStatus in resC:
cntMap[fileStatus] = cnt
# set job attributes
jobspec.stagerLock = None
if 'zipping' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasZipOutput
elif 'defined' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasOutput
elif 'transferring' in cntMap:
jobspec.hasOutFile = JobSpec.HO_hasTransfer
else:
jobspec.hasOutFile = JobSpec.HO_noOutput
if jobspec.subStatus == 'to_transfer':
# change subStatus when no more files to trigger transfer
if jobspec.hasOutFile not in [JobSpec.HO_hasOutput, JobSpec.HO_hasZipOutput]:
jobspec.subStatus = 'transferring'
jobspec.stagerTime = None
elif jobspec.subStatus == 'transferring':
# all done
if jobspec.hasOutFile == JobSpec.HO_noOutput:
jobspec.trigger_propagation()
if 'failed' in cntMap:
jobspec.status = 'failed'
jobspec.subStatus = 'failed_to_stage_out'
else:
jobspec.subStatus = 'staged'
# get finished files
jobspec.reset_out_file()
sqlFF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlFF += "WHERE PandaID=:PandaID AND status=:status AND fileType IN (:type1,:type2) "
varMap = dict()
varMap[':PandaID'] = jobspec.PandaID
varMap[':status'] = 'finished'
varMap[':type1'] = 'output'
varMap[':type2'] = 'log'
self.execute(sqlFF, varMap)
resFileList = self.cur.fetchall()
for resFile in resFileList:
fileSpec = FileSpec()
fileSpec.pack(resFile)
jobspec.add_out_file(fileSpec)
# make file report
jobspec.outputFilesToReport = core_utils.get_output_file_report(jobspec)
# sql to update job
sqlJ = "UPDATE {0} SET {1} ".format(jobTableName, jobspec.bind_update_changes_expression())
sqlJ += "WHERE PandaID=:PandaID AND stagerLock=:lockedBy "
# update job
varMap = jobspec.values_map(only_changed=True)
varMap[':PandaID'] = jobspec.PandaID
varMap[':lockedBy'] = locked_by
self.execute(sqlJ, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return jobspec.subStatus
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# add a seq number
def add_seq_number(self, number_name, init_value):
try:
# check if already there
sqlC = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlC, varMap)
res = self.cur.fetchone()
# insert if missing
if res is None:
# make spec
seqNumberSpec = SeqNumberSpec()
seqNumberSpec.numberName = number_name
seqNumberSpec.curVal = init_value
# insert
sqlI = "INSERT INTO {0} ({1}) ".format(seqNumberTableName, SeqNumberSpec.column_names())
sqlI += SeqNumberSpec.bind_values_expression()
varMap = seqNumberSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get next value for a seq number
def get_next_seq_number(self, number_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'name={0}'.format(number_name),
method_name='get_next_seq_number')
# increment
sqlU = "UPDATE {0} SET curVal=curVal+1 WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlU, varMap)
# get
sqlG = "SELECT curVal FROM {0} WHERE numberName=:numberName ".format(seqNumberTableName)
varMap = dict()
varMap[':numberName'] = number_name
self.execute(sqlG, varMap)
retVal, = self.cur.fetchone()
# commit
self.commit()
tmpLog.debug('got {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get last update time for a cached info
def get_cache_last_update_time(self, main_key, sub_key):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='get_cache_last_update_time')
# get
varMap = dict()
varMap[":mainKey"] = main_key
sqlU = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName)
if sub_key is not None:
sqlU += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlU, varMap)
retVal = self.cur.fetchone()
if retVal is not None:
retVal, = retVal
# commit
self.commit()
tmpLog.debug('got {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# refresh a cached info
def refresh_cache(self, main_key, sub_key, new_info):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='refresh_cache')
# make spec
cacheSpec = CacheSpec()
cacheSpec.lastUpdate = datetime.datetime.utcnow()
cacheSpec.data = new_info
# check if already there
varMap = dict()
varMap[":mainKey"] = main_key
sqlC = "SELECT lastUpdate FROM {0} WHERE mainKey=:mainKey ".format(cacheTableName)
if sub_key is not None:
sqlC += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlC, varMap)
retC = self.cur.fetchone()
if retC is None:
# insert if missing
cacheSpec.mainKey = main_key
cacheSpec.subKey = sub_key
sqlU = "INSERT INTO {0} ({1}) ".format(cacheTableName, CacheSpec.column_names())
sqlU += CacheSpec.bind_values_expression()
varMap = cacheSpec.values_list()
else:
# update
sqlU = "UPDATE {0} SET {1} ".format(cacheTableName, cacheSpec.bind_update_changes_expression())
sqlU += "WHERE mainKey=:mainKey "
varMap = cacheSpec.values_map(only_changed=True)
varMap[":mainKey"] = main_key
if sub_key is not None:
sqlU += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sqlU, varMap)
# commit
self.commit()
# put into global dict
cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key)
globalDict = core_utils.get_global_dict()
globalDict.acquire()
globalDict[cacheKey] = cacheSpec.data
globalDict.release()
tmpLog.debug('refreshed')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get a cached info
def get_cache(self, main_key, sub_key=None):
useDB = False
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'mainKey={0} subKey={1}'.format(main_key, sub_key),
method_name='get_cache')
tmpLog.debug('start')
# get from global dict
cacheKey = 'cache|{0}|{1}'.format(main_key, sub_key)
globalDict = core_utils.get_global_dict()
# lock dict
globalDict.acquire()
# found
if cacheKey in globalDict:
# release dict
globalDict.release()
# make spec
cacheSpec = CacheSpec()
cacheSpec.data = globalDict[cacheKey]
else:
# read from database
useDB = True
sql = "SELECT {0} FROM {1} ".format(CacheSpec.column_names(), cacheTableName)
sql += "WHERE mainKey=:mainKey "
varMap = dict()
varMap[":mainKey"] = main_key
if sub_key is not None:
sql += "AND subKey=:subKey "
varMap[":subKey"] = sub_key
self.execute(sql, varMap)
resJ = self.cur.fetchone()
# commit
self.commit()
if resJ is None:
# release dict
globalDict.release()
return None
# make spec
cacheSpec = CacheSpec()
cacheSpec.pack(resJ)
# put into global dict
globalDict[cacheKey] = cacheSpec.data
# release dict
globalDict.release()
tmpLog.debug('done')
# return
return cacheSpec
except Exception:
if useDB:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# store commands
def store_commands(self, command_specs):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='store_commands')
tmpLog.debug('{0} commands'.format(len(command_specs)))
if not command_specs:
return True
try:
# sql to insert a command
sql = "INSERT INTO {0} ({1}) ".format(commandTableName, CommandSpec.column_names())
sql += CommandSpec.bind_values_expression()
# loop over all commands
var_maps = []
for command_spec in command_specs:
var_map = command_spec.values_list()
var_maps.append(var_map)
# insert
self.executemany(sql, var_maps)
# commit
self.commit()
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get commands for a receiver
def get_commands_for_receiver(self, receiver, command_pattern=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_commands_for_receiver')
tmpLog.debug('start')
# sql to get commands
varMap = dict()
varMap[':receiver'] = receiver
varMap[':processed'] = 0
sqlG = "SELECT {0} FROM {1} ".format(CommandSpec.column_names(), commandTableName)
sqlG += "WHERE receiver=:receiver AND processed=:processed "
if command_pattern is not None:
varMap[':command'] = command_pattern
if '%' in command_pattern:
sqlG += "AND command LIKE :command "
else:
sqlG += "AND command=:command "
sqlG += "FOR UPDATE "
# sql to lock command
sqlL = "UPDATE {0} SET processed=:processed WHERE command_id=:command_id ".format(commandTableName)
self.execute(sqlG, varMap)
commandSpecList = []
for res in self.cur.fetchall():
# make command
commandSpec = CommandSpec()
commandSpec.pack(res)
# lock
varMap = dict()
varMap[':command_id'] = commandSpec.command_id
varMap[':processed'] = 1
self.execute(sqlL, varMap)
# append
commandSpecList.append(commandSpec)
# commit
self.commit()
tmpLog.debug('got {0} commands'.format(len(commandSpecList)))
return commandSpecList
except Exception:
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# get command ids that have been processed and need to be acknowledged to panda server
def get_commands_ack(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_commands_ack')
tmpLog.debug('start')
# sql to get commands that have been processed and need acknowledgement
sql = """
SELECT command_id FROM {0}
WHERE ack_requested=1
AND processed=1
""".format(commandTableName)
self.execute(sql)
command_ids = [row[0] for row in self.cur.fetchall()]
tmpLog.debug('command_ids {0}'.format(command_ids))
return command_ids
except Exception:
# dump error
core_utils.dump_error_message(_logger)
# return
return []
def clean_commands_by_id(self, commands_ids):
"""
Deletes the commands specified in a list of IDs
"""
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='clean_commands_by_id')
try:
# sql to delete a specific command
sql = """
DELETE FROM {0}
WHERE command_id=:command_id""".format(commandTableName)
for command_id in commands_ids:
var_map = {':command_id': command_id}
self.execute(sql, var_map)
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(tmpLog)
return False
def clean_processed_commands(self):
"""
Deletes the commands that have been processed and do not need acknowledgement
"""
tmpLog = core_utils.make_logger(_logger, method_name='clean_processed_commands')
try:
# sql to delete all processed commands that do not need an ACK
sql = """
DELETE FROM {0}
WHERE (ack_requested=0 AND processed=1)
""".format(commandTableName)
self.execute(sql)
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(tmpLog)
return False
# get workers to kill
def get_workers_to_kill(self, max_workers, check_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_to_kill')
tmpLog.debug('start')
# sql to get worker IDs
sqlW = "SELECT workerID,status,configID FROM {0} ".format(workTableName)
sqlW += "WHERE killTime IS NOT NULL AND killTime<:checkTimeLimit "
sqlW += "ORDER BY killTime LIMIT {0} ".format(max_workers)
# sql to lock or release worker
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID "
sqlL += "AND killTime IS NOT NULL AND killTime<:checkTimeLimit "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
timeNow = datetime.datetime.utcnow()
timeLimit = timeNow - datetime.timedelta(seconds=check_interval)
# get workerIDs
varMap = dict()
varMap[':checkTimeLimit'] = timeLimit
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retVal = dict()
for workerID, workerStatus, configID in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
# lock or release worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':checkTimeLimit'] = timeLimit
if workerStatus in (WorkSpec.ST_cancelled, WorkSpec.ST_failed, WorkSpec.ST_finished):
# release
varMap[':setTime'] = None
else:
# lock
varMap[':setTime'] = timeNow
self.execute(sqlL, varMap)
# get worker
nRow = self.cur.rowcount
if nRow == 1 and varMap[':setTime'] is not None:
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
queueName = workSpec.computingSite
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workSpec)
# commit
self.commit()
tmpLog.debug('got {0} workers'.format(len(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker stats
def get_worker_stats(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats')
tmpLog.debug('start')
# sql to get nQueueLimit
sqlQ = "SELECT queueName,resourceType,nNewWorkers FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName "
# get nQueueLimit
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
retMap = dict()
for computingSite, resourceType, nNewWorkers in resQ:
if resourceType not in retMap:
retMap[resourceType] = {
'running': 0,
'submitted': 0,
'to_submit': nNewWorkers
}
# get worker stats
sqlW = "SELECT wt.status, wt.computingSite, pq.resourceType, COUNT(*) cnt "
sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sqlW += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status, wt.computingSite, pq.resourceType "
# get worker stats
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerStatus, computingSite, resourceType, cnt in resW:
if resourceType not in retMap:
retMap[resourceType] = {
'running': 0,
'submitted': 0,
'to_submit': 0
}
retMap[resourceType][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker stats
def get_worker_stats_bulk(self, active_ups_queues):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_stats_bulk')
tmpLog.debug('start')
# sql to get nQueueLimit
sqlQ = "SELECT queueName, resourceType, nNewWorkers FROM {0} ".format(pandaQueueTableName)
# get nQueueLimit
self.execute(sqlQ)
resQ = self.cur.fetchall()
retMap = dict()
for computingSite, resourceType, nNewWorkers in resQ:
retMap.setdefault(computingSite, {})
if resourceType and resourceType != 'ANY' and resourceType not in retMap[computingSite]:
retMap[computingSite][resourceType] = {'running': 0, 'submitted': 0, 'to_submit': nNewWorkers}
# get worker stats
sqlW = "SELECT wt.status, wt.computingSite, wt.resourceType, COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status,wt.computingSite, wt.resourceType "
# get worker stats
varMap = dict()
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerStatus, computingSite, resourceType, cnt in resW:
if resourceType and resourceType != 'ANY':
retMap.setdefault(computingSite, {})
retMap[computingSite].setdefault(resourceType, {'running': 0, 'submitted': 0, 'to_submit': 0})
retMap[computingSite][resourceType][workerStatus] = cnt
# if there are no jobs for an active UPS queue, it needs to be initialized so that the pilot streaming
# on panda server starts processing the queue
if active_ups_queues:
for ups_queue in active_ups_queues:
if ups_queue not in retMap or not retMap[ups_queue]:
retMap[ups_queue] = {'SCORE': {'running': 0, 'submitted': 0, 'to_submit': 0}}
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# send kill command to workers associated to a job
def kill_workers_with_job(self, panda_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id),
method_name='kill_workers_with_job')
tmpLog.debug('start')
# sql to set killTime
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) "
# sql to get associated workers
sqlA = "SELECT workerID FROM {0} ".format(jobWorkerTableName)
sqlA += "WHERE PandaID=:pandaID "
# set an older time to trigger sweeper
setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
# get workers
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
nRow = 0
for workerID, in resA:
# set killTime
varMap = dict()
varMap[':workerID'] = workerID
varMap[':setTime'] = setTime
varMap[':st1'] = WorkSpec.ST_finished
varMap[':st2'] = WorkSpec.ST_failed
varMap[':st3'] = WorkSpec.ST_cancelled
self.execute(sqlL, varMap)
nRow += self.cur.rowcount
# commit
self.commit()
tmpLog.debug('set killTime to {0} workers'.format(nRow))
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# send kill command to a worker
def kill_worker(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='kill_worker')
tmpLog.debug('start')
# sql to set killTime
sqlL = "UPDATE {0} SET killTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND killTime IS NULL AND NOT status IN (:st1,:st2,:st3) "
# set an older time to trigger sweeper
setTime = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
# set killTime
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':setTime'] = setTime
varMap[':st1'] = WorkSpec.ST_finished
varMap[':st2'] = WorkSpec.ST_failed
varMap[':st3'] = WorkSpec.ST_cancelled
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('set killTime with {0}'.format(nRow))
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get workers for cleanup
def get_workers_for_cleanup(self, max_workers, status_timeout_map):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_for_cleanup')
tmpLog.debug('start')
# sql to get worker IDs
timeNow = datetime.datetime.utcnow()
modTimeLimit = timeNow - datetime.timedelta(minutes=60)
varMap = dict()
varMap[':timeLimit'] = modTimeLimit
sqlW = "SELECT workerID, configID FROM {0} ".format(workTableName)
sqlW += "WHERE lastUpdate IS NULL AND ("
for tmpStatus, tmpTimeout in iteritems(status_timeout_map):
tmpStatusKey = ':status_{0}'.format(tmpStatus)
tmpTimeoutKey = ':timeLimit_{0}'.format(tmpStatus)
sqlW += '(status={0} AND endTime<={1}) OR '.format(tmpStatusKey, tmpTimeoutKey)
varMap[tmpStatusKey] = tmpStatus
varMap[tmpTimeoutKey] = timeNow - datetime.timedelta(hours=tmpTimeout)
sqlW = sqlW[:-4]
sqlW += ') '
sqlW += 'AND modificationTime<:timeLimit '
sqlW += "ORDER BY modificationTime LIMIT {0} ".format(max_workers)
# sql to lock or release worker
sqlL = "UPDATE {0} SET modificationTime=:setTime ".format(workTableName)
sqlL += "WHERE workerID=:workerID AND modificationTime<:timeLimit "
# sql to check associated jobs
sqlA = "SELECT COUNT(*) cnt FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName)
sqlA += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID "
sqlA += "AND propagatorTime IS NOT NULL "
# sql to get workers
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlG += "WHERE workerID=:workerID "
# sql to get PandaIDs
sqlP = "SELECT j.PandaID FROM {0} j, {1} r ".format(jobTableName, jobWorkerTableName)
sqlP += "WHERE j.PandaID=r.PandaID AND r.workerID=:workerID "
# sql to get jobs
sqlJ = "SELECT {0} FROM {1} ".format(JobSpec.column_names(), jobTableName)
sqlJ += "WHERE PandaID=:PandaID "
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE PandaID=:PandaID "
# sql to get files not to be deleted. b.todelete is not used to use index on b.lfn
sqlD = "SELECT b.lfn,b.todelete FROM {0} a, {0} b ".format(fileTableName)
sqlD += "WHERE a.PandaID=:PandaID AND a.fileType=:fileType AND b.lfn=a.lfn "
# get workerIDs
timeNow = datetime.datetime.utcnow()
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retVal = dict()
iWorkers = 0
for workerID, configID in resW:
# lock worker
varMap = dict()
varMap[':workerID'] = workerID
varMap[':setTime'] = timeNow
varMap[':timeLimit'] = modTimeLimit
self.execute(sqlL, varMap)
# commit
self.commit()
if self.cur.rowcount == 0:
continue
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
# check associated jobs
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlA, varMap)
nActJobs, = self.cur.fetchone()
# cleanup when there is no active job
if nActJobs == 0:
# get worker
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
queueName = workSpec.computingSite
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workSpec)
# get jobs
jobSpecs = []
checkedLFNs = set()
keepLFNs = set()
varMap = dict()
varMap[':workerID'] = workerID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
for pandaID, in resP:
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
jobSpec = JobSpec()
jobSpec.pack(resJ)
jobSpecs.append(jobSpec)
# get LFNs not to be deleted
varMap = dict()
varMap[':PandaID'] = pandaID
varMap[':fileType'] = 'input'
self.execute(sqlD, varMap)
resDs = self.cur.fetchall()
for tmpLFN, tmpTodelete in resDs:
if tmpTodelete == 0:
keepLFNs.add(tmpLFN)
# get files to be deleted
varMap = dict()
varMap[':PandaID'] = jobSpec.PandaID
self.execute(sqlF, varMap)
resFs = self.cur.fetchall()
for resF in resFs:
fileSpec = FileSpec()
fileSpec.pack(resF)
# skip if already checked
if fileSpec.lfn in checkedLFNs:
continue
checkedLFNs.add(fileSpec.lfn)
# check if it is ready to delete
if fileSpec.lfn not in keepLFNs:
jobSpec.add_file(fileSpec)
workSpec.set_jobspec_list(jobSpecs)
iWorkers += 1
tmpLog.debug('got {0} workers'.format(iWorkers))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# delete a worker
def delete_worker(self, worker_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='delete_worker')
tmpLog.debug('start')
# sql to get jobs
sqlJ = "SELECT PandaID FROM {0} ".format(jobWorkerTableName)
sqlJ += "WHERE workerID=:workerID "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# sql to delete worker
sqlDW = "DELETE FROM {0} ".format(workTableName)
sqlDW += "WHERE workerID=:workerID "
# get jobs
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlJ, varMap)
resJ = self.cur.fetchall()
for pandaID, in resJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# delete worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlDW, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# release jobs
def release_jobs(self, panda_ids, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='release_jobs')
tmpLog.debug('start for {0} jobs'.format(len(panda_ids)))
# sql to release job
sql = "UPDATE {0} SET lockedBy=NULL ".format(jobTableName)
sql += "WHERE PandaID=:pandaID AND lockedBy=:lockedBy "
nJobs = 0
for pandaID in panda_ids:
varMap = dict()
varMap[':pandaID'] = pandaID
varMap[':lockedBy'] = locked_by
self.execute(sql, varMap)
if self.cur.rowcount > 0:
nJobs += 1
# commit
self.commit()
tmpLog.debug('released {0} jobs'.format(nJobs))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# clone queue
def clone_queue_with_new_resource_type(self, site_name, queue_name, resource_type, new_workers):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'site_name={0} queue_name={1}'.format(site_name, queue_name),
method_name='clone_queue_with_new_resource_type')
tmpLog.debug('start')
# get the values from one of the existing queues
sql_select_queue = "SELECT {0} FROM {1} ".format(PandaQueueSpec.column_names(), pandaQueueTableName)
sql_select_queue += "WHERE siteName=:siteName "
var_map = dict()
var_map[':siteName'] = site_name
self.execute(sql_select_queue, var_map)
queue = self.cur.fetchone()
if queue: # a queue to clone was found
var_map = {}
attribute_list = []
attr_binding_list = []
for attribute, value in zip(PandaQueueSpec.column_names().split(','), queue):
attr_binding = ':{0}'.format(attribute)
if attribute == 'resourceType':
var_map[attr_binding] = resource_type
elif attribute == 'nNewWorkers':
var_map[attr_binding] = new_workers
elif attribute == 'uniqueName':
var_map[attr_binding] = core_utils.get_unique_queue_name(queue_name, resource_type)
else:
var_map[attr_binding] = value
attribute_list.append(attribute)
attr_binding_list.append(attr_binding)
sql_insert = "INSERT IGNORE INTO {0} ({1}) ".format(pandaQueueTableName, ','.join(attribute_list))
sql_values = "VALUES ({0}) ".format(','.join(attr_binding_list))
self.execute(sql_insert + sql_values, var_map)
else:
tmpLog.debug("Failed to clone the queue")
self.commit()
return True
except Exception:
self.rollback()
core_utils.dump_error_message(_logger)
return False
# set queue limit
def set_queue_limit(self, site_name, params):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'siteName={0}'.format(site_name), method_name='set_queue_limit')
tmpLog.debug('start')
# sql to reset queue limits before setting new command to avoid old values being repeated again and again
sql_reset = "UPDATE {0} ".format(pandaQueueTableName)
sql_reset += "SET nNewWorkers=:zero WHERE siteName=:siteName "
# sql to get resource types
sql_get_resource = "SELECT resourceType FROM {0} ".format(pandaQueueTableName)
sql_get_resource += "WHERE siteName=:siteName "
sql_get_resource += "FOR UPDATE "
# sql to update nQueueLimit
sql_update_queue = "UPDATE {0} ".format(pandaQueueTableName)
sql_update_queue += "SET nNewWorkers=:nQueue WHERE siteName=:siteName AND resourceType=:resourceType "
# sql to get num of submitted workers
sql_count_workers = "SELECT COUNT(*) cnt "
sql_count_workers += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sql_count_workers += "WHERE pq.siteName=:siteName AND wt.computingSite=pq.queueName AND wt.status=:status "
sql_count_workers += "ANd pq.resourceType=:resourceType "
# reset nqueued for all resource types
varMap = dict()
varMap[':zero'] = 0
varMap[':siteName'] = site_name
self.execute(sql_reset, varMap)
# get resource types
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sql_get_resource, varMap)
resRes = self.cur.fetchall()
resource_type_list = set()
for tmpRes, in resRes:
resource_type_list.add(tmpRes)
# set all queues
nUp = 0
retMap = dict()
queue_name = site_name
for resource_type, value in iteritems(params):
tmpLog.debug('Processing rt {0} -> {1}'.format(resource_type, value))
# get num of submitted workers
varMap = dict()
varMap[':siteName'] = site_name
varMap[':resourceType'] = resource_type
varMap[':status'] = 'submitted'
self.execute(sql_count_workers, varMap)
res = self.cur.fetchone()
tmpLog.debug('{0} has {1} submitted workers'.format(resource_type, res))
nSubmittedWorkers = 0
if res is not None:
nSubmittedWorkers, = res
# set new value
# value = max(value - nSubmittedWorkers, 0)
if value is None:
value = 0
varMap = dict()
varMap[':nQueue'] = value
varMap[':siteName'] = site_name
varMap[':resourceType'] = resource_type
self.execute(sql_update_queue, varMap)
iUp = self.cur.rowcount
# iUp is 0 when nQueue is not changed
if iUp > 0 or resource_type in resource_type_list:
# a queue was updated, add the values to the map
retMap[resource_type] = value
else:
# no queue was updated, we need to create a new one for the resource type
cloned = self.clone_queue_with_new_resource_type(site_name, queue_name, resource_type, value)
if cloned:
retMap[resource_type] = value
iUp = 1
nUp += iUp
tmpLog.debug('set nNewWorkers={0} to {1}:{2} with {3}'.format(value, queue_name, resource_type, iUp))
# commit
self.commit()
tmpLog.debug('updated {0} queues'.format(nUp))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get the number of missed worker
def get_num_missed_workers(self, queue_name, criteria):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,"queue={0}".format(queue_name),
method_name='get_num_missed_workers')
tmpLog.debug('start')
# get worker stats
sqlW = "SELECT COUNT(*) cnt "
sqlW += "FROM {0} wt, {1} pq ".format(workTableName, pandaQueueTableName)
sqlW += "WHERE wt.computingSite=pq.queueName AND wt.status=:status "
# get worker stats
varMap = dict()
for attr, val in iteritems(criteria):
if attr == 'timeLimit':
sqlW += "AND wt.submitTime>:timeLimit "
varMap[':timeLimit'] = val
elif attr in ['siteName']:
sqlW += "AND pq.{0}=:{0} ".format(attr)
varMap[':{0}'.format(attr)] = val
elif attr in ['computingSite', 'computingElement']:
sqlW += "AND wt.{0}=:{0} ".format(attr)
varMap[':{0}'.format(attr)] = val
varMap[':status'] = 'missed'
self.execute(sqlW, varMap)
resW = self.cur.fetchone()
if resW is None:
nMissed = 0
else:
nMissed, = resW
# commit
self.commit()
tmpLog.debug('got nMissed={0} for {1}'.format(nMissed, str(criteria)))
return nMissed
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return 0
# get a worker
def get_workers_with_job_id(self, panda_id, use_commit=True):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id),
method_name='get_workers_with_job_id')
tmpLog.debug('start')
# sql to get workerIDs
sqlW = "SELECT workerID FROM {0} WHERE PandaID=:PandaID ".format(jobWorkerTableName)
sqlW += "ORDER BY workerID "
# sql to get a worker
sqlG = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(slim=True), workTableName)
sqlG += "WHERE workerID=:workerID "
# get workerIDs
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlW, varMap)
retList = []
for worker_id, in self.cur.fetchall():
# get a worker
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlG, varMap)
res = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(res, slim=True)
retList.append(workSpec)
# commit
if use_commit:
self.commit()
tmpLog.debug('got {0} workers'.format(len(retList)))
return retList
except Exception:
# roll back
if use_commit:
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# delete old process locks
def clean_process_locks(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='clean_process_locks')
tmpLog.debug('start')
# delete locks
sqlW = "DELETE FROM {0} ".format(processLockTableName)
# get worker stats
self.execute(sqlW)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get a process lock
def get_process_lock(self, process_name, locked_by, lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by),
method_name='get_process_lock')
tmpLog.debug('start')
# delete old lock
sqlD = "DELETE FROM {0} ".format(processLockTableName)
sqlD += "WHERE lockTime<:timeLimit "
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
self.execute(sqlD, varMap)
# commit
self.commit()
# check lock
sqlC = "SELECT lockTime FROM {0} ".format(processLockTableName)
sqlC += "WHERE processName=:processName "
varMap = dict()
varMap[':processName'] = process_name
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
retVal = False
timeNow = datetime.datetime.utcnow()
if resC is None:
# insert lock if missing
sqlI = "INSERT INTO {0} ({1}) ".format(processLockTableName, ProcessLockSpec.column_names())
sqlI += ProcessLockSpec.bind_values_expression()
processLockSpec = ProcessLockSpec()
processLockSpec.processName = process_name
processLockSpec.lockedBy = locked_by
processLockSpec.lockTime = timeNow
varMap = processLockSpec.values_list()
self.execute(sqlI, varMap)
retVal = True
else:
oldLockTime, = resC
timeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
if oldLockTime <= timeLimit:
# update lock if old
sqlU = "UPDATE {0} SET lockedBy=:lockedBy,lockTime=:timeNow ".format(processLockTableName)
sqlU += "WHERE processName=:processName AND lockTime<=:timeLimit "
varMap = dict()
varMap[':processName'] = process_name
varMap[':lockedBy'] = locked_by
varMap[':timeLimit'] = timeLimit
varMap[':timeNow'] = timeNow
self.execute(sqlU, varMap)
if self.cur.rowcount > 0:
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# release a process lock
def release_process_lock(self, process_name, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, "proc={0} by={1}".format(process_name, locked_by),
method_name='release_process_lock')
tmpLog.debug('start')
# delete old lock
sqlC = "DELETE FROM {0} ".format(processLockTableName)
sqlC += "WHERE processName=:processName AND lockedBy=:lockedBy "
varMap = dict()
varMap[':processName'] = process_name
varMap[':lockedBy'] = locked_by
self.execute(sqlC, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get file status
def get_file_status(self, lfn, file_type, endpoint, job_status):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint),
method_name='get_file_status')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT f.status, COUNT(*) cnt FROM {0} f, {1} j ".format(fileTableName, jobTableName)
sqlF += "WHERE j.PandaID=f.PandaID AND j.status=:jobStatus "
sqlF += "AND f.lfn=:lfn AND f.fileType=:type "
if endpoint is not None:
sqlF += "AND f.endpoint=:endpoint "
sqlF += "GROUP BY f.status "
# get files
varMap = dict()
varMap[':lfn'] = lfn
varMap[':type'] = file_type
varMap[':jobStatus'] = job_status
if endpoint is not None:
varMap[':endpoint'] = endpoint
self.execute(sqlF, varMap)
retMap = dict()
for status, cnt in self.cur.fetchall():
retMap[status] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# change file status
def change_file_status(self, panda_id, data, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='change_file_status')
tmpLog.debug('start lockedBy={0}'.format(locked_by))
# sql to check lock of job
sqlJ = "SELECT lockedBy FROM {0} ".format(jobTableName)
sqlJ += "WHERE PandaID=:PandaID FOR UPDATE "
# sql to update files
sqlF = "UPDATE {0} ".format(fileTableName)
sqlF += "SET status=:status WHERE fileID=:fileID "
# check lock
varMap = dict()
varMap[':PandaID'] = panda_id
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
if resJ is None:
tmpLog.debug('skip since job not found')
else:
lockedBy, = resJ
if lockedBy != locked_by:
tmpLog.debug('skip since lockedBy is inconsistent in DB {0}'.format(lockedBy))
else:
# update files
for tmpFileID, tmpLFN, newStatus in data:
varMap = dict()
varMap[':fileID'] = tmpFileID
varMap[':status'] = newStatus
self.execute(sqlF, varMap)
tmpLog.debug('set new status {0} to {1}'.format(newStatus, tmpLFN))
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get group for a file
def get_group_for_file(self, lfn, file_type, endpoint):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'lfn={0} endpoint={1}'.format(lfn, endpoint),
method_name='get_group_for_file')
tmpLog.debug('start')
# sql to get group with the latest update
sqlF = "SELECT * FROM ("
sqlF += "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName)
sqlF += "WHERE lfn=:lfn AND fileType=:type "
sqlF += "AND groupID IS NOT NULL AND groupStatus<>:ngStatus "
if endpoint is not None:
sqlF += "AND endpoint=:endpoint "
sqlF += "ORDER BY groupUpdateTime DESC "
sqlF += ") AS TMP LIMIT 1 "
# get group
varMap = dict()
varMap[':lfn'] = lfn
varMap[':type'] = file_type
varMap[':ngStatus'] = 'failed'
if endpoint is not None:
varMap[':endpoint'] = endpoint
self.execute(sqlF, varMap)
resF = self.cur.fetchone()
if resF is None:
retVal = None
else:
groupID, groupStatus, groupUpdateTime = resF
retVal = {'groupID': groupID, 'groupStatus': groupStatus, 'groupUpdateTime': groupUpdateTime}
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return None
# get files with a group ID
def get_files_with_group_id(self, group_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='get_files_with_group_id')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT {0} FROM {1} ".format(FileSpec.column_names(), fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
retList = []
self.execute(sqlF, varMap)
for resFile in self.cur.fetchall():
fileSpec = FileSpec()
fileSpec.pack(resFile)
retList.append(fileSpec)
# commit
self.commit()
tmpLog.debug('got {0} files'.format(len(retList)))
return retList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# update group status
def update_file_group_status(self, group_id, status_string):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='update_file_group_status')
tmpLog.debug('start')
# sql to get files
sqlF = "UPDATE {0} set groupStatus=:groupStatus ".format(fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
varMap[':groupStatus'] = status_string
self.execute(sqlF, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('updated {0} files'.format(nRow))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get file group status
def get_file_group_status(self, group_id):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='get_file_group_status')
tmpLog.debug('start')
# sql to get files
sqlF = "SELECT DISTINCT groupStatus FROM {0} ".format(fileTableName)
sqlF += "WHERE groupID=:groupID "
# get files
varMap = dict()
varMap[':groupID'] = group_id
self.execute(sqlF, varMap)
res = self.cur.fetchall()
retVal = set()
for groupStatus, in res:
retVal.add(groupStatus)
# commit
self.commit()
tmpLog.debug('get {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# lock job again
def lock_job_again(self, panda_id, time_column, lock_column, locked_by):
try:
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id), method_name='lock_job_again')
tmpLog.debug('start column={0} id={1}'.format(lock_column, locked_by))
# check lock
sqlC = "SELECT {0},{1} FROM {2} ".format(lock_column, time_column, jobTableName)
sqlC += "WHERE PandaID=:pandaID "
sqlC += "FOR UPDATE "
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is None:
retVal = False
tmpLog.debug('not found')
else:
oldLockedBy, oldLockedTime = resC
if oldLockedBy != locked_by:
tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime))
retVal = False
else:
# update locked time
sqlU = "UPDATE {0} SET {1}=:timeNow WHERE pandaID=:pandaID ".format(jobTableName, time_column)
varMap = dict()
varMap[':pandaID'] = panda_id
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlU, varMap)
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# set file group
def set_file_group(self, file_specs, group_id, status_string):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'groupID={0}'.format(group_id),
method_name='set_file_group')
tmpLog.debug('start')
timeNow = datetime.datetime.utcnow()
# sql to update files
sqlF = "UPDATE {0} ".format(fileTableName)
sqlF += "SET groupID=:groupID,groupStatus=:groupStatus,groupUpdateTime=:groupUpdateTime "
sqlF += "WHERE lfn=:lfn "
# update files
for fileSpec in file_specs:
varMap = dict()
varMap[':groupID'] = group_id
varMap[':groupStatus'] = status_string
varMap[':groupUpdateTime'] = timeNow
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlF, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# refresh file group info
def refresh_file_group_info(self, job_spec):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(job_spec.PandaID),
method_name='refresh_file_group_info')
tmpLog.debug('start')
# sql to get info
sqlF = "SELECT groupID,groupStatus,groupUpdateTime FROM {0} ".format(fileTableName)
sqlF += "WHERE lfn=:lfn "
# get info
for fileSpec in job_spec.inFiles.union(job_spec.outFiles):
varMap = dict()
varMap[':lfn'] = fileSpec.lfn
self.execute(sqlF, varMap)
resF = self.cur.fetchone()
if resF is None:
continue
groupID, groupStatus, groupUpdateTime = resF
fileSpec.groupID = groupID
fileSpec.groupStatus = groupStatus
fileSpec.groupUpdateTime = groupUpdateTime
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# increment submission attempt
def increment_submission_attempt(self, panda_id, new_number):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'pandaID={0}'.format(panda_id),
method_name='increment_submission_attempt')
tmpLog.debug('start with newNum={0}'.format(new_number))
# sql to update attempt number
sqlL = "UPDATE {0} SET submissionAttempts=:newNum ".format(jobTableName)
sqlL += "WHERE PandaID=:PandaID "
varMap = dict()
varMap[':PandaID'] = panda_id
varMap[':newNum'] = new_number
self.execute(sqlL, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get queue status
def get_worker_limits(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_limits')
tmpLog.debug('start')
# sql to get
sqlQ = "SELECT maxWorkers,nQueueLimitWorker,nQueueLimitWorkerRatio,"
sqlQ += "nQueueLimitWorkerMax,nQueueLimitWorkerMin FROM {0} ".format(pandaQueueTableName)
sqlQ += "WHERE siteName=:siteName AND resourceType='ANY'"
# sql to count resource types
sqlNT = "SELECT COUNT(*) cnt FROM {0} ".format(pandaQueueTableName)
sqlNT += "WHERE siteName=:siteName AND resourceType!='ANY'"
# sql to count running workers
sqlNR = "SELECT COUNT(*) cnt FROM {0} ".format(workTableName)
sqlNR += "WHERE computingSite=:computingSite AND status IN (:status1)"
# get
varMap = dict()
varMap[':siteName'] = site_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
# count resource types
varMap = dict()
varMap[':computingSite'] = site_name
varMap[':siteName'] = site_name
self.execute(sqlNT, varMap)
resNT = self.cur.fetchall()
# count running workers
varMap = dict()
varMap[':computingSite'] = site_name
varMap[':status1'] = 'running'
self.execute(sqlNR, varMap)
resNR = self.cur.fetchall()
# dynamic nQueueLimitWorker
retMap = dict()
nRunning = 0
nRT = 1
for cnt, in resNR:
nRunning = cnt
for cnt, in resNT:
nRT = max(nRT, cnt)
for maxWorkers, nQueueLimitWorker_orig, nQueueLimitWorkerRatio, \
nQueueLimitWorkerMax, nQueueLimitWorkerMin_orig in resQ:
if nQueueLimitWorkerRatio is not None and nQueueLimitWorkerRatio > 0:
nQueueLimitWorkerByRatio = int(nRunning * nQueueLimitWorkerRatio / 100)
nQueueLimitWorkerMin = 1
if nQueueLimitWorkerMin_orig is not None:
nQueueLimitWorkerMin = nQueueLimitWorkerMin_orig
nQueueLimitWorkerMinAllRTs = nQueueLimitWorkerMin * nRT
nQueueLimitWorker = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMinAllRTs)
nQueueLimitWorkerPerRT = max(nQueueLimitWorkerByRatio, nQueueLimitWorkerMin)
if nQueueLimitWorkerMax is not None:
nQueueLimitWorker = min(nQueueLimitWorker, nQueueLimitWorkerMax)
nQueueLimitWorkerPerRT = min(nQueueLimitWorkerPerRT, nQueueLimitWorkerMax)
elif nQueueLimitWorker_orig is not None:
nQueueLimitWorker = nQueueLimitWorker_orig
nQueueLimitWorkerPerRT = nQueueLimitWorker
else:
nQueueLimitWorker = maxWorkers
nQueueLimitWorkerPerRT = nQueueLimitWorker
nQueueLimitWorker = min(nQueueLimitWorker, maxWorkers)
retMap.update({
'maxWorkers': maxWorkers,
'nQueueLimitWorker': nQueueLimitWorker,
'nQueueLimitWorkerPerRT': nQueueLimitWorkerPerRT,
})
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker CE stats
def get_worker_ce_stats(self, site_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_stats')
tmpLog.debug('start')
# get worker CE stats
sqlW = "SELECT wt.status,wt.computingSite,wt.computingElement,COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.computingSite=:siteName AND wt.status IN (:st1,:st2) "
sqlW += "GROUP BY wt.status,wt.computingElement "
# get worker CE stats
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'running'
varMap[':st2'] = 'submitted'
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retMap = dict()
for workerStatus, computingSite, computingElement, cnt in resW:
if computingElement not in retMap:
retMap[computingElement] = {
'running': 0,
'submitted': 0,
}
retMap[computingElement][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(retMap)))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# get worker CE backend throughput
def get_worker_ce_backend_throughput(self, site_name, time_window):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_worker_ce_backend_throughput')
tmpLog.debug('start')
# get worker CE throughput
sqlW = "SELECT wt.computingElement,wt.status,COUNT(*) cnt "
sqlW += "FROM {0} wt ".format(workTableName)
sqlW += "WHERE wt.computingSite=:siteName "
sqlW += "AND wt.status IN (:st1,:st2,:st3) "
sqlW += "AND wt.creationtime < :timeWindowMiddle "
sqlW += "AND (wt.starttime is NULL OR "
sqlW += "(wt.starttime >= :timeWindowStart AND wt.starttime < :timeWindowEnd) ) "
sqlW += "GROUP BY wt.status,wt.computingElement "
# time window start and end
timeWindowEnd = datetime.datetime.utcnow()
timeWindowStart = timeWindowEnd - datetime.timedelta(seconds=time_window)
timeWindowMiddle = timeWindowEnd - datetime.timedelta(seconds=time_window/2)
# get worker CE throughput
varMap = dict()
varMap[':siteName'] = site_name
varMap[':st1'] = 'submitted'
varMap[':st2'] = 'running'
varMap[':st3'] = 'finished'
varMap[':timeWindowStart'] = timeWindowStart
varMap[':timeWindowEnd'] = timeWindowEnd
varMap[':timeWindowMiddle'] = timeWindowMiddle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
retMap = dict()
for computingElement, workerStatus, cnt in resW:
if computingElement not in retMap:
retMap[computingElement] = {
'submitted': 0,
'running': 0,
'finished': 0,
}
retMap[computingElement][workerStatus] = cnt
# commit
self.commit()
tmpLog.debug('got {0} with time_window={1} for site {2}'.format(
str(retMap), time_window, site_name))
return retMap
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# add dialog message
def add_dialog_message(self, message, level, module_name, identifier=None):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='add_dialog_message')
tmpLog.debug('start')
# delete old messages
sqlS = "SELECT diagID FROM {0} ".format(diagTableName)
sqlS += "WHERE creationTime<:timeLimit "
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=60)
self.execute(sqlS, varMap)
resS = self.cur.fetchall()
sqlD = "DELETE FROM {0} ".format(diagTableName)
sqlD += "WHERE diagID=:diagID "
for diagID, in resS:
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlD, varMap)
# commit
self.commit()
# make spec
diagSpec = DiagSpec()
diagSpec.moduleName = module_name
diagSpec.creationTime = datetime.datetime.utcnow()
diagSpec.messageLevel = level
try:
diagSpec.identifier = identifier[:100]
except Exception:
pass
diagSpec.diagMessage = message[:500]
# insert
sqlI = "INSERT INTO {0} ({1}) ".format(diagTableName, DiagSpec.column_names())
sqlI += DiagSpec.bind_values_expression()
varMap = diagSpec.values_list()
self.execute(sqlI, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get dialog messages to send
def get_dialog_messages_to_send(self, n_messages, lock_interval):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_dialog_messages_to_send')
tmpLog.debug('start')
# sql to select messages
sqlD = "SELECT diagID FROM {0} ".format(diagTableName)
sqlD += "WHERE (lockTime IS NULL OR lockTime<:timeLimit) "
sqlD += "ORDER BY diagID LIMIT {0} ".format(n_messages)
# sql to lock message
sqlL = "UPDATE {0} SET lockTime=:timeNow ".format(diagTableName)
sqlL += "WHERE diagID=:diagID "
sqlL += "AND (lockTime IS NULL OR lockTime<:timeLimit) "
# sql to get message
sqlM = "SELECT {0} FROM {1} ".format(DiagSpec.column_names(), diagTableName)
sqlM += "WHERE diagID=:diagID "
# select messages
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(seconds=lock_interval)
varMap = dict()
varMap[':timeLimit'] = timeLimit
self.execute(sqlD, varMap)
resD = self.cur.fetchall()
diagList = []
for diagID, in resD:
# lock
varMap = dict()
varMap[':diagID'] = diagID
varMap[':timeLimit'] = timeLimit
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
if nRow == 1:
# get
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlM, varMap)
resM = self.cur.fetchone()
# make spec
diagSpec = DiagSpec()
diagSpec.pack(resM)
diagList.append(diagSpec)
# commit
self.commit()
tmpLog.debug('got {0} messages'.format(len(diagList)))
return diagList
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return []
# delete dialog messages
def delete_dialog_messages(self, ids):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='delete_dialog_messages')
tmpLog.debug('start')
# sql to delete message
sqlM = "DELETE FROM {0} ".format(diagTableName)
sqlM += "WHERE diagID=:diagID "
for diagID in ids:
# lock
varMap = dict()
varMap[':diagID'] = diagID
self.execute(sqlM, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# delete old jobs
def delete_old_jobs(self, timeout):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'timeout={0}'.format(timeout),
method_name='delete_old_jobs')
tmpLog.debug('start')
# sql to get old jobs to be deleted
sqlGJ = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlGJ += "WHERE subStatus=:subStatus AND propagatorTime IS NULL "
sqlGJ += "AND ((modificationTime IS NOT NULL AND modificationTime<:timeLimit1) "
sqlGJ += "OR (modificationTime IS NULL AND creationTime<:timeLimit2)) "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# get jobs
varMap = dict()
varMap[':subStatus'] = 'done'
varMap[':timeLimit1'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout)
varMap[':timeLimit2'] = datetime.datetime.utcnow() - datetime.timedelta(hours=timeout*2)
self.execute(sqlGJ, varMap)
resGJ = self.cur.fetchall()
nDel = 0
for pandaID, in resGJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
iDel = self.cur.rowcount
if iDel > 0:
nDel += iDel
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDR, varMap)
# commit
self.commit()
tmpLog.debug('deleted {0} jobs'.format(nDel))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get iterator of active workers to monitor fifo
def get_active_workers(self, n_workers, seconds_ago=0):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_active_workers')
tmpLog.debug('start')
# sql to get workers
sqlW = "SELECT {0} FROM {1} ".format(WorkSpec.column_names(), workTableName)
sqlW += "WHERE status IN (:st_submitted,:st_running,:st_idle) "
sqlW += "AND modificationTime<:timeLimit "
sqlW += "ORDER BY modificationTime,computingSite LIMIT {0} ".format(n_workers)
varMap = dict()
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_ago)
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
def _get_workspec_from_record(rec):
workspec = WorkSpec()
workspec.pack(rec)
workspec.pandaid_list = []
return workspec
retVal = map(_get_workspec_from_record, resW)
tmpLog.debug('got {0} workers'.format(len(resW)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# lock workers for specific thread
def lock_workers(self, worker_id_list, lock_interval):
try:
timeNow = datetime.datetime.utcnow()
lockTimeLimit = timeNow - datetime.timedelta(seconds=lock_interval)
retVal = True
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='lock_worker')
tmpLog.debug('start')
# loop
for worker_id, attrs in iteritems(worker_id_list):
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':timeNow'] = timeNow
varMap[':lockTimeLimit'] = lockTimeLimit
varMap[':st1'] = WorkSpec.ST_cancelled
varMap[':st2'] = WorkSpec.ST_finished
varMap[':st3'] = WorkSpec.ST_failed
varMap[':st4'] = WorkSpec.ST_missed
# extract lockedBy
varMap[':lockedBy'] = attrs['lockedBy']
if attrs['lockedBy'] is None:
del attrs['lockedBy']
# sql to lock worker
sqlL = "UPDATE {0} SET modificationTime=:timeNow".format(workTableName)
for attrKey, attrVal in iteritems(attrs):
sqlL += ',{0}=:{0}'.format(attrKey)
varMap[':{0}'.format(attrKey)] = attrVal
sqlL += " WHERE workerID=:workerID AND (lockedBy IS NULL "
sqlL += "OR (modificationTime<:lockTimeLimit AND lockedBy IS NOT NULL)) "
sqlL += "AND (status NOT IN (:st1,:st2,:st3,:st4)) "
# lock worker
self.execute(sqlL, varMap)
nRow = self.cur.rowcount
tmpLog.debug('done with {0}'.format(nRow))
# false if failed to lock
if nRow == 0:
retVal = False
# commit
self.commit()
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get queue config dumps
def get_queue_config_dumps(self):
try:
retVal = dict()
configIDs = set()
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_queue_config_dumps')
tmpLog.debug('start')
# sql to get used IDs
sqlIJ = "SELECT DISTINCT configID FROM {0} ".format(jobTableName)
self.execute(sqlIJ)
resIJ = self.cur.fetchall()
for tmpID, in resIJ:
configIDs.add(tmpID)
sqlIW = "SELECT DISTINCT configID FROM {0} ".format(workTableName)
self.execute(sqlIW)
resIW = self.cur.fetchall()
for tmpID, in resIW:
configIDs.add(tmpID)
# sql to delete
sqlD = "DELETE FROM {0} WHERE configID=:configID ".format(queueConfigDumpTableName)
# sql to get config
sqlQ = "SELECT {0} FROM {1} ".format(QueueConfigDumpSpec.column_names(), queueConfigDumpTableName)
sqlQ += "FOR UPDATE "
self.execute(sqlQ)
resQs = self.cur.fetchall()
iDump = 0
iDel = 0
for resQ in resQs:
dumpSpec = QueueConfigDumpSpec()
dumpSpec.pack(resQ)
# delete if unused and too old
if dumpSpec.configID not in configIDs and dumpSpec.creationTime < timeLimit:
varMap = dict()
varMap[':configID'] = dumpSpec.configID
self.execute(sqlD, varMap)
iDel += 1
else:
retVal[dumpSpec.dumpUniqueName] = dumpSpec
iDump += 1
# commit
self.commit()
tmpLog.debug('got {0} dumps and delete {1} dumps'.format(iDump, iDel))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return {}
# add queue config dump
def add_queue_config_dump(self, dump_spec):
try:
# sql to insert a job
sqlJ = "INSERT INTO {0} ({1}) ".format(queueConfigDumpTableName, QueueConfigDumpSpec.column_names())
sqlJ += QueueConfigDumpSpec.bind_values_expression()
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='add_queue_config_dumps')
tmpLog.debug('start for {0}'.format(dump_spec.dumpUniqueName))
varMap = dump_spec.values_list()
# insert
self.execute(sqlJ, varMap)
# commit
self.commit()
tmpLog.debug('done')
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get configID for queue config dump
def get_config_id_dump(self, dump_spec):
try:
# sql to get configID
sqlJ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName)
sqlJ += "WHERE queueName=:queueName AND dumpUniqueName=:dumpUniqueName "
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_config_id_for_dump')
tmpLog.debug('start for {0}:{1}'.format(dump_spec.queueName, dump_spec.dumpUniqueName))
# get
varMap = dict()
varMap[':queueName'] = dump_spec.queueName
varMap[':dumpUniqueName'] = dump_spec.dumpUniqueName
self.execute(sqlJ, varMap)
resJ = self.cur.fetchone()
if resJ is not None:
configID, = resJ
else:
configID = None
tmpLog.debug('got configID={0}'.format(configID))
# return
return configID
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return None
# purge a panda queue
def purge_pq(self, queue_name):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'queueName={0}'.format(queue_name),
method_name='purge_pq')
tmpLog.debug('start')
# sql to get jobs
sqlJ = "SELECT PandaID FROM {0} ".format(jobTableName)
sqlJ += "WHERE computingSite=:computingSite "
# sql to get workers
sqlW = "SELECT workerID FROM {0} ".format(workTableName)
sqlW += "WHERE computingSite=:computingSite "
# sql to get queue configs
sqlQ = "SELECT configID FROM {0} ".format(queueConfigDumpTableName)
sqlQ += "WHERE queueName=:queueName "
# sql to delete job
sqlDJ = "DELETE FROM {0} ".format(jobTableName)
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations by job
sqlDRJ = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDRJ += "WHERE PandaID=:PandaID "
# sql to delete worker
sqlDW = "DELETE FROM {0} ".format(workTableName)
sqlDW += "WHERE workerID=:workerID "
# sql to delete relations by worker
sqlDRW = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDRW += "WHERE workerID=:workerID "
# sql to delete queue config
sqlDQ = "DELETE FROM {0} ".format(queueConfigDumpTableName)
sqlDQ += "WHERE configID=:configID "
# sql to delete panda queue
sqlDP = "DELETE FROM {0} ".format(pandaQueueTableName)
sqlDP += "WHERE queueName=:queueName "
# get jobs
varMap = dict()
varMap[':computingSite'] = queue_name
self.execute(sqlJ, varMap)
resJ = self.cur.fetchall()
for pandaID, in resJ:
varMap = dict()
varMap[':PandaID'] = pandaID
# delete job
self.execute(sqlDJ, varMap)
# delete files
self.execute(sqlDF, varMap)
# delete events
self.execute(sqlDE, varMap)
# delete relations
self.execute(sqlDRJ, varMap)
# get workers
varMap = dict()
varMap[':computingSite'] = queue_name
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
for workerID, in resW:
varMap = dict()
varMap[':workerID'] = workerID
# delete workers
self.execute(sqlDW, varMap)
# delete relations
self.execute(sqlDRW, varMap)
# get queue configs
varMap = dict()
varMap[':queueName'] = queue_name
self.execute(sqlQ, varMap)
resQ = self.cur.fetchall()
for configID, in resQ:
varMap = dict()
varMap[':configID'] = configID
# delete queue configs
self.execute(sqlDQ, varMap)
# delete panda queue
varMap = dict()
varMap[':queueName'] = queue_name
self.execute(sqlDP, varMap)
# commit
self.commit()
tmpLog.debug('done')
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# disable multi workers
def disable_multi_workers(self, panda_id):
tmpLog = None
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'PandaID={0}'.format(panda_id),
method_name='disable_multi_workers')
tmpLog.debug('start')
# sql to update flag
sqlJ = "UPDATE {0} SET moreWorkers=0 ".format(jobTableName)
sqlJ += "WHERE PandaID=:pandaID AND nWorkers IS NOT NULL AND nWorkersLimit IS NOT NULL "
sqlJ += "AND nWorkers>0 "
# set flag
varMap = dict()
varMap[':pandaID'] = panda_id
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return nRow
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return None
# update PQ table
def update_panda_queue_attribute(self, key, value, site_name=None, queue_name=None):
tmpLog = None
try:
# get logger
tmpLog = core_utils.make_logger(_logger, 'site={0} queue={1}'.format(site_name, queue_name),
method_name='update_panda_queue')
tmpLog.debug('start key={0}'.format(key))
# sql to update
sqlJ = "UPDATE {0} SET {1}=:{1} ".format(pandaQueueTableName, key)
sqlJ += "WHERE "
varMap = dict()
varMap[':{0}'.format(key)] = value
if site_name is not None:
sqlJ += "siteName=:siteName "
varMap[':siteName'] = site_name
else:
sqlJ += "queueName=:queueName "
varMap[':queueName'] = queue_name
# update
self.execute(sqlJ, varMap)
nRow = self.cur.rowcount
# commit
self.commit()
tmpLog.debug('done with {0}'.format(nRow))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# delete orphaned job info
def delete_orphaned_job_info(self):
try:
# get logger
tmpLog = core_utils.make_logger(_logger,
method_name='delete_orphaned_job_info')
tmpLog.debug('start')
# sql to get job info to be deleted
sqlGJ = "SELECT PandaID FROM {0} "
sqlGJ += "WHERE PandaID NOT IN ("
sqlGJ += "SELECT PandaID FROM {1}) "
# sql to delete job info
sqlDJ = "DELETE FROM {0} "
sqlDJ += "WHERE PandaID=:PandaID "
# sql to delete files
sqlDF = "DELETE FROM {0} ".format(fileTableName)
sqlDF += "WHERE PandaID=:PandaID "
# sql to delete events
sqlDE = "DELETE FROM {0} ".format(eventTableName)
sqlDE += "WHERE PandaID=:PandaID "
# sql to delete relations
sqlDR = "DELETE FROM {0} ".format(jobWorkerTableName)
sqlDR += "WHERE PandaID=:PandaID "
# loop over all tables
for tableName in [fileTableName, eventTableName, jobWorkerTableName]:
# get job info
self.execute(sqlGJ.format(tableName, jobTableName))
resGJ = self.cur.fetchall()
nDel = 0
for pandaID, in resGJ:
# delete
varMap = dict()
varMap[':PandaID'] = pandaID
self.execute(sqlDJ.format(tableName), varMap)
iDel = self.cur.rowcount
if iDel > 0:
nDel += iDel
# commit
self.commit()
tmpLog.debug('deleted {0} records from {1}'.format(nDel, tableName))
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# lock worker again to feed events
def lock_worker_again_to_feed_events(self, worker_id, locked_by):
try:
tmpLog = core_utils.make_logger(_logger, 'workerID={0}'.format(worker_id),
method_name='lock_worker_again_to_feed_events')
tmpLog.debug('start id={0}'.format(locked_by))
# check lock
sqlC = "SELECT eventFeedLock,eventFeedTime FROM {0} ".format(workTableName)
sqlC += "WHERE workerID=:workerID "
sqlC += "FOR UPDATE "
varMap = dict()
varMap[':workerID'] = worker_id
self.execute(sqlC, varMap)
resC = self.cur.fetchone()
if resC is None:
retVal = False
tmpLog.debug('not found')
else:
oldLockedBy, oldLockedTime = resC
if oldLockedBy != locked_by:
tmpLog.debug('locked by another {0} at {1}'.format(oldLockedBy, oldLockedTime))
retVal = False
else:
# update locked time
sqlU = "UPDATE {0} SET eventFeedTime=:timeNow WHERE workerID=:workerID ".format(workTableName)
varMap = dict()
varMap[':workerID'] = worker_id
varMap[':timeNow'] = datetime.datetime.utcnow()
self.execute(sqlU, varMap)
retVal = True
# commit
self.commit()
tmpLog.debug('done with {0}'.format(retVal))
# return
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# insert service metrics
def insert_service_metrics(self, service_metric_spec):
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='insert_service_metrics')
tmpLog.debug('start')
try:
sql = "INSERT INTO {0} ({1}) ".format(serviceMetricsTableName, ServiceMetricSpec.column_names())
sql += ServiceMetricSpec.bind_values_expression()
var_map = service_metric_spec.values_list()
self.execute(sql, var_map)
self.commit()
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(tmpLog)
# return
return False
# get service metrics
def get_service_metrics(self, last_update):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_service_metrics')
tmpLog.debug('start with last_update: {0}'.format(last_update))
sql = "SELECT creationTime, hostName, metrics FROM {0} ".format(serviceMetricsTableName)
sql += "WHERE creationTime>=:last_update "
var_map = {':last_update': last_update}
self.execute(sql, var_map)
res = self.cur.fetchall()
# change datetime objects to strings for json serialization later
res_corrected = []
for entry in res:
try:
res_corrected.append([entry[0].strftime('%Y-%m-%d %H:%M:%S.%f'), entry[1], entry[2]])
except Exception:
pass
# commit
self.commit()
tmpLog.debug('got {0}'.format(str(res)))
return res_corrected
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
# release a site
def release_site(self, site_name, locked_by):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='release_site')
tmpLog.debug('start')
# sql to release site
sql = "UPDATE {0} SET lockedBy=NULL ".format(pandaQueueTableName)
sql += "WHERE siteName=:siteName AND lockedBy=:lockedBy "
# release site
varMap = dict()
varMap[':siteName'] = site_name
varMap[':lockedBy'] = locked_by
self.execute(sql, varMap)
n_done = self.cur.rowcount > 0
# commit
self.commit()
if n_done >= 1:
tmpLog.debug('released {0}'.format(site_name))
else:
tmpLog.debug('found nothing to release. Skipped'.format(site_name))
# return
return True
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return False
# get workers via workerID
def get_workers_from_ids(self, ids):
try:
# get logger
tmpLog = core_utils.make_logger(_logger, method_name='get_workers_from_ids')
tmpLog.debug('start')
# sql to get workers
sqlW = (
"SELECT workerID,configID,mapType FROM {workTableName} "
"WHERE workerID IN ({ids_str}) "
"AND status IN (:st_submitted,:st_running,:st_idle) "
).format(workTableName=workTableName, ids_str=','.join([ str(_) for _ in ids]))
# sql to get associated workerIDs
sqlA = (
"SELECT t.workerID FROM {jobWorkerTableName} t, {jobWorkerTableName} s, {workTableName} w "
"WHERE s.PandaID=t.PandaID AND s.workerID=:workerID "
"AND w.workerID=t.workerID AND w.status IN (:st_submitted,:st_running,:st_idle) "
).format(jobWorkerTableName=jobWorkerTableName, workTableName=workTableName)
# sql to get associated workers
sqlG = (
"SELECT {0} FROM {1} "
"WHERE workerID=:workerID "
).format(WorkSpec.column_names(), workTableName)
# sql to get associated PandaIDs
sqlP = (
"SELECT PandaID FROM {0} "
"WHERE workerID=:workerID "
).format(jobWorkerTableName)
# get workerIDs
timeNow = datetime.datetime.utcnow()
varMap = dict()
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlW, varMap)
resW = self.cur.fetchall()
tmpWorkers = set()
for workerID, configID, mapType in resW:
# ignore configID
if not core_utils.dynamic_plugin_change():
configID = None
tmpWorkers.add((workerID, configID, mapType))
checkedIDs = set()
retVal = {}
for workerID, configID, mapType in tmpWorkers:
# skip
if workerID in checkedIDs:
continue
# get associated workerIDs
varMap = dict()
varMap[':workerID'] = workerID
varMap[':st_submitted'] = WorkSpec.ST_submitted
varMap[':st_running'] = WorkSpec.ST_running
varMap[':st_idle'] = WorkSpec.ST_idle
self.execute(sqlA, varMap)
resA = self.cur.fetchall()
workerIDtoScan = set()
for tmpWorkID, in resA:
workerIDtoScan.add(tmpWorkID)
# add original ID just in case since no relation when job is not yet bound
workerIDtoScan.add(workerID)
# use only the largest worker to avoid updating the same worker set concurrently
if mapType == WorkSpec.MT_MultiWorkers:
if workerID != min(workerIDtoScan):
continue
# get workers
queueName = None
workersList = []
for tmpWorkID in workerIDtoScan:
checkedIDs.add(tmpWorkID)
# get worker
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlG, varMap)
resG = self.cur.fetchone()
workSpec = WorkSpec()
workSpec.pack(resG)
if queueName is None:
queueName = workSpec.computingSite
workersList.append(workSpec)
# get associated PandaIDs
varMap = dict()
varMap[':workerID'] = tmpWorkID
self.execute(sqlP, varMap)
resP = self.cur.fetchall()
workSpec.pandaid_list = []
for tmpPandaID, in resP:
workSpec.pandaid_list.append(tmpPandaID)
if len(workSpec.pandaid_list) > 0:
workSpec.nJobs = len(workSpec.pandaid_list)
# commit
self.commit()
# add
if queueName is not None:
retVal.setdefault(queueName, dict())
retVal[queueName].setdefault(configID, [])
retVal[queueName][configID].append(workersList)
tmpLog.debug('got {0}'.format(str(retVal)))
return retVal
except Exception:
# roll back
self.rollback()
# dump error
core_utils.dump_error_message(_logger)
# return
return {}
| apache-2.0 | 825,545,955,905,895,200 | 44.390634 | 119 | 0.493884 | false |
CZAlmon/MangaMine | MangaMineBot_Folder/MangaBotDownloader.py | 1 | 57509 | #Ver. 0.0.7
#Author: Zach Almon
import urllib.request
import re
import os
import platform
import sys
import string
import html
import time
platformType = platform.system()
def Batoto(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\Batoto"
else:
MASTERdirectoryName = currentDirectory + "/Batoto"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Batoto Folder
os.chdir(MASTERdirectoryName)
type_one_manga = False
type_two_manga = False
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
Manga_Title = re.findall(r'<title>+(.*?)- Scanlations', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
return
Manga_Title_string = Manga_Title[0]
Manga_Title_string = Manga_Title_string[:-1]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
print("Downloading", Manga_Title_string)
#For any other language on Bato.to change lang_English to whatever matches the language you desire.
#Then this file *SHOULD* work with your language. It is Untested as anything else but english
allENGLISHChaps = re.findall(r'lang_English+(.*?)\ title="+', str(urllibHTML))
if len(allENGLISHChaps) == 0:
print("Manga has no English Chapters or there was an error reading the HTML!")
return
else:
First_chapter_string = allENGLISHChaps[-1]
First_chapter_address = re.findall(r'href=\"+(.*?)\"', First_chapter_string)
First_chapter_address_string = First_chapter_address[0]
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Second Main Request that Failed.')
return
type_one_padding_right = re.search("<div style=\"text-align:center;\">", str(First_chapter_html))
type_two_comic_page = re.search("comic_page", str(First_chapter_html))
#Type one is All images on One Page
if type_one_padding_right != None:
type_one_manga = True
#Type two is All images on seperate pages
elif type_two_comic_page != None:
type_two_manga = True
else:
print("There was an error with the Manga Type!")
return
#This will get the chapter links from the Select options on the chapters first page
#There are 2 select options (one at top and one at bottom
#They are same so its arbutrary which you pick. I Will be selecting [0]
get_Chapters = re.findall(r'250px;">+(.*?)</select>', str(First_chapter_html))
chapter_master_string = get_Chapters[0]
list_of_Chapter_Links = []
#Get all chapter links. Last thing in list is an unneeded "selected" string. Pop that off.
list_of_Chapter_Links = re.findall(r'\"+(.*?)\"', chapter_master_string)
#In this list there may be a "selected". It may or may not be at the end. The loop solves it.
#I am 95% sure there will only ever be 1 "selected" per list.
#list_of_Chapter_Links.pop(-1)
for i in range(len(list_of_Chapter_Links)):
if list_of_Chapter_Links[i] == "selected":
list_of_Chapter_Links.pop(i)
break
#Get Numbers of the chapters. Will be "Matched" up to the list_of_Chapter_Links.
list_of_Chapter_Numbers_raw = re.findall(r'Ch\.+(.*?)<', chapter_master_string)
list_of_chapter_names_refined = []
#Some chapters may be like "230: Title of Chapter" Some may be "145"
for i in range(len(list_of_Chapter_Numbers_raw)):
temp_list = re.findall('^(.*?):', list_of_Chapter_Numbers_raw[i])
if len(temp_list) == 0:
list_of_chapter_names_refined.append(list_of_Chapter_Numbers_raw[i])
elif len(temp_list) == 1:
list_of_chapter_names_refined.append(temp_list[0])
else:
print("Manga Chapter Name Error!")
return
list_of_Chapter_Links_Final = list_of_Chapter_Links
list_of_Chapter_Numbers_Final = list_of_chapter_names_refined
list_of_Chapter_Links_Final.reverse()
list_of_Chapter_Numbers_Final.reverse()
fullDownload = True
#Because there are duplicates I must check and add a v2 or v3 if it is in there more times
temp_name = []
temp_name_str = ''
for i in range(len(list_of_Chapter_Numbers_Final)):
if list_of_Chapter_Numbers_Final[i] in temp_name:
#At this point there are duplicates. The chapters may not be in order.
#This is the only method I can come up with to deal with duplicates
# that may be out of order.
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v2'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v3'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v4'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v5'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v6'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v7'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v8'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v9'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v10'
#If there are more then 10 dulicates I can't help you
temp_name.append(temp_name_str)
else:
temp_name.append(list_of_Chapter_Numbers_Final[i])
list_of_Chapter_Numbers_Final = temp_name
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
#Main Loop for Downloading Images.
for i in range(len(list_of_Chapter_Numbers_Final)):
first_page_of_each_chapter = True
chapter_number = list_of_Chapter_Numbers_Final[i]
chapter_link = list_of_Chapter_Links_Final[i]
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_number
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_number
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_number)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
if type_one_manga == True:
get_images = re.findall(r'text-align:center;">+(.*?)</div><div', str(urllibHTML))
get_images_master_string = get_images[0]
image_file_name_list = re.findall(r"<img src=\\'(.*?)\\'", str(get_images_master_string))
Amount_of_pages = len(image_file_name_list)
for j in range(len(image_file_name_list)):
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
image_file_name = image_file_name_list[j]
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
elif type_two_manga == True:
#Get the pages between "<id..." and "</se..."
get_Pages = re.findall(r'id="page_select" onchange="window.location=this.value;">+(.*?)</select></li>', str(urllibHTML))
#There will be Two found
Pages_master_string = get_Pages[0]
#Get all page links. Second thing in list is an unneeded "selected" string. Loop to get rid
list_of_page_Links = re.findall(r'\"+(.*?)\"', Pages_master_string)
list_of_page_links_final = []
#Loop to rid of the "Selected" part of list
for j in range(len(list_of_page_Links)):
if list_of_page_Links[j] != "selected":
list_of_page_links_final.append(list_of_page_Links[j])
Amount_of_pages = len(list_of_page_links_final)
for j in range(len(list_of_page_links_final)):
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
#Check for First page. Checks to see if anything is already downloaded
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
#At this point There will be something you need to download.
#Since we already have the HTML for the first page of EACH Chapter
#We dont need to waste time to read that again, set it here.
page_urllibHTML = urllibHTML
else:
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
#Get Image URL
image_file_name_list = re.findall(r'comic_page" style="max-width: 100%;" src="(.*?)"', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Bato.to Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
else:
print("Manga Type Error!")
return
return
def MangaPanda(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
downloadMangaListOnce = False
does_it_have_dot_html = re.findall(r'(\.html)', link_to_manga_site)
if len(does_it_have_dot_html) == 0:
pass
else:
link_to_manga_site = link_to_manga_site[:-5]
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
allChaps = re.findall(r'<div class="chico_manga"></div>\\n<a href="+(.*?)\">+', str(urllibHTML))
numOfChapLinks = len(allChaps)
#However the 6 most recent chapters are also under the 'chico_manga' class
#so it is necessary to pop those chapters off and if there are not a total
#of 6 chapters in the manga we have special cases
if numOfChapLinks < 12:
if numOfChapLinks == 10:
for i in range(5):
allChaps.pop(0)
elif numOfChapLinks == 8:
for i in range(4):
allChaps.pop(0)
elif numOfChapLinks == 6:
for i in range(3):
allChaps.pop(0)
elif numOfChapLinks == 4:
for i in range(2):
allChaps.pop(0)
elif numOfChapLinks == 2:
allChaps.pop(0)
else:
print('There was an error parsing the HTML!')
else:
for i in range(6):
allChaps.pop(0)
#Rather conveniently, there is a class called 'aname' which contains the name of the manga
grabName = re.findall(r'<h2 class="aname">+(.*?)\</h2>+', str(urllibHTML))
if len(grabName) == 0:
print("Title not found. URL or HTML Error.")
return
#some mangas contained characters in aname which cannot be used in windows directories
#these statements attempt to make said strings directory friendly
directorySafeName = grabName[0]
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", "")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
print("Downloading", directorySafeName)
#since Windows and UNIX platforms use different directory syntax we need to know the platform
#and adjust accordingly
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaPanda\\" + str(directorySafeName)
else:
directoryName = currentDirectory + "/MangaPanda/" + str(directorySafeName)
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#loops chapter URLs to determine chapter number for both types of URLs
chapterNames = []
for i in range(len(allChaps)):
chapterNum = re.findall('((?:\d)+)', allChaps[i])
chapterNames.append(chapterNum[-1])
for i in range(len(allChaps)):
if platformType == 'Windows':
chapDirectoryName = directoryName + "\\Chapter " + str(chapterNames[i])
else:
chapDirectoryName = directoryName + "/Chapter " + str(chapterNames[i])
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
#There are some special cases associated with the first loop through the chapter
isFirstLoopPage = True
chapURL = "http://www.mangapanda.com" + allChaps[i]
print("Downloading Chapter", str(chapterNames[i]))
imageLocation = 0
while 1:
imageLocation += 1
#Looks at page URLs for any and all sequences of numbers
nextChapDetermine = re.findall('((?:\d)+)', chapURL)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
if isFirstLoopPage == True:
determineAmountOfPages = re.findall('<option value="+(.*?)\</option>', str(urllibHTML))
if len(determineAmountOfPages) == imageLocation - 1:
break
#Checks the number of files in directory in comparison to the number of images in the chapter
#If the number is the same the assumption is made that all images have been downloaded
if isFirstLoopPage == True:
isFirstLoopPage = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(determineAmountOfPages):
break
#Waiting till next request. MangaPanda doesn't like alot of requests in a short time period.
time.sleep(1)
#grabs both the next page URL and the URL for the image on the current page
URLandIMG = re.findall(r'<div id="imgholder">+(.*?)\" name=+', str(urllibHTML))
nextPageURL = re.findall(r'<a href="+(.*?)\">', URLandIMG[0])
imageURL = re.findall(r'src="+(.*?)\"', URLandIMG[0])
extensionForIMG = re.findall('\.\D[^\.]+', imageURL[0])
imageName = "Page " + str(imageLocation) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
#Old code that would put each page thats currently downloading on a new line
#print("Downloading Page", imageLocation)
#New code that will overwrite each "Downloading Page #" with the next page
#and will eventually be overwrote by the "Downloading Chapter #"
print("Downloading Page %d" % imageLocation, end="", flush=True)
print("\r", end="", flush=True)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = imageURL[0]
headers={'User-Agent':user_agent,}
request = urllib.request.Request(url,None,headers)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 1 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 2 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 3 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
chapURL = "http://www.mangapanda.com" + nextPageURL[0]
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
#Time between chapters as well
#time.sleep(1)
return
def MangaHere(link_to_manga_site):
success = False
Search_feature = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaHere"
else:
directoryName = currentDirectory + "/MangaHere"
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#downloadMangaListOnce = False
downloadManga = False
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
allChaps = re.findall(r' <a class="color_0077" href="(.*?)"', str(urllibHTML))
allChaps.reverse()
numOfChapLinks = len(allChaps)
mangaName = re.findall(r' <h1 class="title"><span class="title_icon"></span>(.*?)</h1>', str(urllibHTML))
try:
directorySafeName = mangaName[0]
except:
print('Invalid URL!')
return
#Python 3.4 Converts '&' Type things to their string equivalent.
directorySafeName = html.unescape(directorySafeName)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
directorySafeName = re.sub(r'\\x\w{2}', r' ', directorySafeName)
directorySafeName = re.sub(r"\\'", r"'", directorySafeName)
directorySafeName = directorySafeName.title()
print("Downloading", directorySafeName)
if platformType == 'Windows':
directoryName = directoryName + "\\" + directorySafeName
else:
directoryName = directoryName + "/" + directorySafeName
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
for i in allChaps:
skipBool1 = False
skipBool2 = False
firstLoop = True
currentPage = 0
volChapDirectoryString = ""
findVolume = re.findall(r'v\d{2}.\d+' , i)
findChap = re.findall(r'c\d{3}.\d+' , i)
if len(findVolume) == 0:
findVolume = re.findall(r'v\d{2}', i)
try:
volTempString = re.findall(r'\d{2}', findVolume[0])
except:
skipBool1 = True
if skipBool1 == False:
volTempString = str(int(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
else:
volTempString = re.findall(r'\d{2}.\d+', findVolume[-1])
volTempString = str(float(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
if len(findChap) == 0:
findChap = re.findall(r'c\d{3}', i)
try:
chapTempString = re.findall(r'\d{3}', findChap[0])
except:
skipBool2 = True
if skipBool2 == False:
chapTempString = str(int(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
else:
chapTempString = re.findall(r'\d{3}.\d+', findChap[-1])
chapTempString = str(float(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
if volChapDirectoryString == "":
print('An error has occured getting chapter or volume number!')
return 1
print('Downloading', volChapDirectoryString)
if platformType == 'Windows':
volChapDirectoryName = directoryName + "\\" + volChapDirectoryString
else:
volChapDirectoryName = directoryName + "/" + volChapDirectoryString
try:
os.makedirs(volChapDirectoryName)
except OSError:
if not os.path.isdir(volChapDirectoryName):
raise
os.chdir(volChapDirectoryName)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
trimHTML = re.findall('<select id="top_chapter_list"(.*?)read_img', urllibIMG)
try:
allPageURLs = re.findall('<option value="(.*?)" ', trimHTML[-1])
except:
print('Something went wrong when trying to find the page URL\'s!')
print('This manga cannot be downloaded at this time.')
return
for k in allPageURLs:
currentPage += 1
skipPage = False
if firstLoop == False:
#urllibReq = urllib.request.Request(k, None, {}, None, True,'POST')
urllibReq = urllib.request.Request(k)
urllibReq.method = 'POST'
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
if firstLoop == True:
firstLoop = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(allPageURLs):
break
print("Downloading Page %d" % currentPage, end="", flush=True)
print("\r", end="", flush=True)
#textFile = open("HTMLFile " + str(currentPage) + ".HTML", "w")
#textFile.write(urllibIMG)
#textFile.close()
imageURL = re.findall('<img src="(.*?)" onerror="', urllibIMG)
try:
extensionForIMG = re.findall('\.[a-z]{3}', imageURL[0])
except:
print('Page ' + str(currentPage) + ' could not be downloaded!')
skipPage = True
if skipPage == False:
imageName = "Page " + str(currentPage) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
return
def MangaStream(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\MangaStream"
else:
MASTERdirectoryName = currentDirectory + "/Mangastream"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Mangastream Folder
os.chdir(MASTERdirectoryName)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
Manga_Title = re.findall(r'<title>(.*?) Manga', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
return
Manga_Title_string = Manga_Title[0]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"", "'")
directorySafeName = directorySafeName.replace("\'", "'")
directorySafeName = directorySafeName.replace("\\'", "'")
directorySafeName = directorySafeName.replace("\\", "")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
print("Downloading", Manga_Title_string)
all_chaps_list = re.findall('<th style="width: 70%">Chapter<\/th>\\\\n<th style="width: 30%">Released<\/th>\\\\n<\/tr>\\\\n<tr>\\\\n(.*?)<\/table>', str(urllibHTML), re.DOTALL)
all_chaps_str = all_chaps_list[0]
chapter_list_tuples = re.findall(r'href="(.*?)">(.*?)</a>', str(all_chaps_str))
chapter_names = []
chapter_links = []
for i in range(len(chapter_list_tuples)):
chapter_links.append(chapter_list_tuples[i][0])
chapter_names.append(chapter_list_tuples[i][1])
#Start Manga Downloading
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
for i in range(len(chapter_names)):
first_chapter_bool = True
chapter_link_string = chapter_links[i]
chapter_name_string = chapter_names[i]
chapDirectoryName = ''
chapter_name_string = re.sub(r'\\x\w{2}', r' ', chapter_name_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
#chapter_name_string = html.unescape(chapter_name_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = chapter_name_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"", "'")
directorySafeName = directorySafeName.replace("\'", "'")
directorySafeName = directorySafeName.replace("\\'", "'")
directorySafeName = directorySafeName.replace("\\", "")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
chapter_name_string = directorySafeName
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_name_string
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_name_string
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_name_string)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
page_list_raw = re.findall(r'<ul class="dropdown-menu">(.*?)</ul>', str(urllibHTML), re.DOTALL)
page_list_string = page_list_raw[-1]
list_of_some_of_the_pages = re.findall(r'href="(.*?)">', str(page_list_string))
final_page = list_of_some_of_the_pages[-1]
number_of_pages_list = re.findall(r'http://readms.com/r/.*?/.*?/\d+/(\d+)', final_page)
number_of_pages = int(number_of_pages_list[0])
chapter_url_list = re.findall(r'(http://readms.com/r/.*?/.*?/\d+/)\d+', final_page)
chapter_url = chapter_url_list[0]
for j in range(number_of_pages):
if j == 0:
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == number_of_pages:
break
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
if first_chapter_bool:
first_chapter_bool = False
page_urllibHTML = urllibHTML
else:
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
image_file_name_list = re.findall(r'<img id="manga-page" src="(.*?)"/></a>', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Mangastream Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
#
# Here may be a problem. After the program gets done with downloading a single page you may
# want to artifically slow the program down so you don't anger the website/server hosts with
# too many requests. A small test i did with good internet was 100 downloaded pages (around 4 chapters)
# in a minute. Which would have been over 200 urllib requests to mangastream's website in under a minute.
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
return
#FULL DOWNLOAD. NO OPTIONS. THIS IS A BOT TO RUN 24/7 TO CHECK FOR UPDATES
def main():
#Time Run 5 Times over a ten hour period, once every 2 hours.
# Then wait a week. and Repeat. 2 Hours = 7200 ::: 1 week = 604800
currentDirectory = os.getcwd()
if platformType == 'Windows':
Main_Directory = currentDirectory + "\\Manga_Bot_Folder"
else:
Main_Directory = currentDirectory + "/Manga_Bot_Folder"
try:
os.makedirs(Main_Directory)
except OSError:
if not os.path.isdir(Main_Directory):
raise
os.chdir(Main_Directory)
Main_Directory = os.getcwd()
counter = 0
#To add more items to any list
# '', '', '', '', '', '', '', ''
#
# Lists must Look like this:
#
# batoto_manga = ['http://bato.to/comic/_/comics/one-piece-r39']
#
# with comma's between each link. Links can all be on one line or to make it neater, each link on its own line (See test list)
#
# Links must be to the manga's top page of each website. Examples:
# Bato: http://bato.to/comic/_/comics/one-piece-r39
# MangaPanda: http://www.mangapanda.com/one-piece
# MangaStream: http://mangastream.com/manga/one_piece
# MangaHere: http://www.mangahere.co/manga/one_piece/
#This is a List to test things/manga/url or anything else
#tests_list = ['',
# '']
batoto_manga = []
mangahere_manga = []
mangapanda_manga = []
mangastream_manga = []
while True:
#This is a loop to test things/manga/url or anything else
#print("Downloading Manga From TEST:\n")
#for i in range(len(tests_list)):
# os.chdir(Main_Directory)
# #Change this call to whatever mangasite you are testing
# MangaHere(tests_list[i])
# #Batoto()
# #MangaPanda()
# #MangaStream()
# print('\n')
### PLEASE READ ###
#Batoto has implemented anti-bot crawling measures. I would recommend you find the desired manga on
# MangaHere or MangaPanda. I will leave this here, but I would recommend leaving the list blank/empty.
print("Downloading Manga From Batoto:\n")
for i in range(len(batoto_manga)):
os.chdir(Main_Directory)
Batoto(batoto_manga[i])
print('\n')
print("Downloading Manga From MangaHere:\n")
for i in range(len(mangahere_manga)):
os.chdir(Main_Directory)
MangaHere(mangahere_manga[i])
print('\n')
print("Downloading Manga From MangaPanda:\n")
for i in range(len(mangapanda_manga)):
os.chdir(Main_Directory)
MangaPanda(mangapanda_manga[i])
print('\n')
print("Downloading Manga From MangaStream:\n")
for i in range(len(mangastream_manga)):
os.chdir(Main_Directory)
MangaStream(mangastream_manga[i])
print('\n')
counter += 1
if counter < 5:
print('\n\nSleeping for 2 Hours.\n')
time.sleep(7200)
else:
counter = 0
print('\n\nSleeping for 1 Week.\n')
time.sleep(604800)
main()
#To See any error/error code wait before the program exits completely
time.sleep(15)
| gpl-2.0 | -3,809,125,284,167,767,600 | 37.161248 | 180 | 0.519571 | false |
jmlong1027/multiscanner | storage/file.py | 1 | 2965 | import codecs
import gzip
import json
import storage
class File(storage.Storage):
DEFAULTCONF = {
'ENABLED': True,
'path': 'report.json',
'gzip': False,
'files-per-line': 1
}
def setup(self):
if self.config['gzip'] is True:
self.file_handle = gzip.open(self.config['path'], 'a')
else:
self.file_handle = codecs.open(self.config['path'], 'ab', 'utf-8')
return True
def store(self, results):
if self.config['files-per-line'] and self.config['files-per-line'] > 0:
writedata = {}
metadata = None
if ['Files', 'Metadata'] == results.keys():
metadata = results['Metadata']
results = results['Files']
i = 0
for filename in results:
writedata[filename] = results[filename]
i += 1
if i >= self.config['files-per-line']:
if metadata:
writedata = {'Files': writedata, 'Metadata': metadata}
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
i = 0
writedata = {}
if writedata:
if metadata:
writedata = {'Files': writedata, 'Metadata': metadata}
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
else:
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(results, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(results, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
def teardown(self):
self.file_handle.close()
| mpl-2.0 | 6,322,562,406,190,201,000 | 38.533333 | 88 | 0.455312 | false |
forkbong/qutebrowser | qutebrowser/completion/models/miscmodels.py | 1 | 10499 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Functions that return miscellaneous completion models."""
import datetime
from typing import List, Sequence, Tuple
from qutebrowser.config import config, configdata
from qutebrowser.utils import objreg, log, utils
from qutebrowser.completion.models import completionmodel, listcategory, util
from qutebrowser.browser import inspector
def command(*, info):
"""A CompletionModel filled with non-hidden commands and descriptions."""
model = completionmodel.CompletionModel(column_widths=(20, 60, 20))
cmdlist = util.get_cmd_completions(info, include_aliases=True,
include_hidden=False)
model.add_category(listcategory.ListCategory("Commands", cmdlist))
return model
def helptopic(*, info):
"""A CompletionModel filled with help topics."""
model = completionmodel.CompletionModel(column_widths=(20, 70, 10))
cmdlist = util.get_cmd_completions(info, include_aliases=False,
include_hidden=True, prefix=':')
settings = ((opt.name, opt.description, info.config.get_str(opt.name))
for opt in configdata.DATA.values())
model.add_category(listcategory.ListCategory("Commands", cmdlist))
model.add_category(listcategory.ListCategory("Settings", settings))
return model
def quickmark(*, info=None):
"""A CompletionModel filled with all quickmarks."""
def delete(data: Sequence[str]) -> None:
"""Delete a quickmark from the completion menu."""
name = data[0]
quickmark_manager = objreg.get('quickmark-manager')
log.completion.debug('Deleting quickmark {}'.format(name))
quickmark_manager.delete(name)
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
marks = objreg.get('quickmark-manager').marks.items()
model.add_category(listcategory.ListCategory('Quickmarks', marks,
delete_func=delete,
sort=False))
return model
def bookmark(*, info=None):
"""A CompletionModel filled with all bookmarks."""
def delete(data: Sequence[str]) -> None:
"""Delete a bookmark from the completion menu."""
urlstr = data[0]
log.completion.debug('Deleting bookmark {}'.format(urlstr))
bookmark_manager = objreg.get('bookmark-manager')
bookmark_manager.delete(urlstr)
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
marks = objreg.get('bookmark-manager').marks.items()
model.add_category(listcategory.ListCategory('Bookmarks', marks,
delete_func=delete,
sort=False))
return model
def session(*, info=None):
"""A CompletionModel filled with session names."""
from qutebrowser.misc import sessions
utils.unused(info)
model = completionmodel.CompletionModel()
try:
sess = ((name,) for name
in sessions.session_manager.list_sessions()
if not name.startswith('_'))
model.add_category(listcategory.ListCategory("Sessions", sess))
except OSError:
log.completion.exception("Failed to list sessions!")
return model
def _tabs(*, win_id_filter=lambda _win_id: True, add_win_id=True):
"""Helper to get the completion model for tabs/other_tabs.
Args:
win_id_filter: A filter function for window IDs to include.
Should return True for all included windows.
add_win_id: Whether to add the window ID to the completion items.
"""
def delete_tab(data):
"""Close the selected tab."""
win_id, tab_index = data[0].split('/')
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=int(win_id))
tabbed_browser.on_tab_close_requested(int(tab_index) - 1)
model = completionmodel.CompletionModel(column_widths=(6, 40, 46, 8))
tabs_are_windows = config.val.tabs.tabs_are_windows
# list storing all single-tabbed windows when tabs_are_windows
windows: List[Tuple[str, str, str, str]] = []
for win_id in objreg.window_registry:
if not win_id_filter(win_id):
continue
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if tabbed_browser.is_shutting_down:
continue
tab_entries: List[Tuple[str, str, str, str]] = []
for idx in range(tabbed_browser.widget.count()):
tab = tabbed_browser.widget.widget(idx)
tab_str = ("{}/{}".format(win_id, idx + 1) if add_win_id
else str(idx + 1))
pid = tab.renderer_process_pid()
tab_entries.append((
tab_str,
tab.url().toDisplayString(),
tabbed_browser.widget.page_title(idx),
"" if pid is None else f"PID {pid}",
))
if tabs_are_windows:
windows += tab_entries
else:
title = str(win_id) if add_win_id else "Tabs"
cat = listcategory.ListCategory(
title, tab_entries, delete_func=delete_tab, sort=False)
model.add_category(cat)
if tabs_are_windows:
win = listcategory.ListCategory(
"Windows", windows, delete_func=delete_tab, sort=False)
model.add_category(win)
return model
def tabs(*, info=None):
"""A model to complete on open tabs across all windows.
Used for the tab-select command (and others).
"""
utils.unused(info)
return _tabs()
def other_tabs(*, info):
"""A model to complete on open tabs across all windows except the current.
Used for the tab-take command.
"""
return _tabs(win_id_filter=lambda win_id: win_id != info.win_id)
def tab_focus(*, info):
"""A model to complete on open tabs in the current window."""
model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id,
add_win_id=False)
special = [
("last", "Focus the last-focused tab"),
("stack-next", "Go forward through a stack of focused tabs"),
("stack-prev", "Go backward through a stack of focused tabs"),
]
model.add_category(listcategory.ListCategory("Special", special))
return model
def window(*, info):
"""A model to complete on all open windows."""
model = completionmodel.CompletionModel(column_widths=(6, 30, 64))
windows = []
for win_id in objreg.window_registry:
if win_id == info.win_id:
continue
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tab_titles = (tab.title() for tab in tabbed_browser.widgets())
windows.append(("{}".format(win_id),
objreg.window_registry[win_id].windowTitle(),
", ".join(tab_titles)))
model.add_category(listcategory.ListCategory("Windows", windows))
return model
def inspector_position(*, info):
"""A model for possible inspector positions."""
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(100, 0, 0))
positions = [(e.name,) for e in inspector.Position]
category = listcategory.ListCategory("Position (optional)", positions)
model.add_category(category)
return model
def _qdatetime_to_completion_format(qdate):
if not qdate.isValid():
ts = 0
else:
ts = qdate.toMSecsSinceEpoch()
if ts < 0:
ts = 0
pydate = datetime.datetime.fromtimestamp(ts / 1000)
return pydate.strftime(config.val.completion.timestamp_format)
def _back_forward(info, go_forward):
history = info.cur_tab.history
current_idx = history.current_idx()
model = completionmodel.CompletionModel(column_widths=(5, 36, 50, 9))
if go_forward:
start = current_idx + 1
items = history.forward_items()
else:
start = 0
items = history.back_items()
entries = [
(
str(idx),
entry.url().toDisplayString(),
entry.title(),
_qdatetime_to_completion_format(entry.lastVisited())
)
for idx, entry in enumerate(items, start)
]
if not go_forward:
# make sure the most recent is at the top for :back
entries.reverse()
cat = listcategory.ListCategory("History", entries, sort=False)
model.add_category(cat)
return model
def forward(*, info):
"""A model to complete on history of the current tab.
Used for the :forward command.
"""
return _back_forward(info, go_forward=True)
def back(*, info):
"""A model to complete on history of the current tab.
Used for the :back command.
"""
return _back_forward(info, go_forward=False)
def undo(*, info):
"""A model to complete undo entries."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=info.win_id)
model = completionmodel.CompletionModel(column_widths=(6, 84, 10))
timestamp_format = config.val.completion.timestamp_format
entries = [
(
str(idx),
', '.join(entry.url.toDisplayString() for entry in group),
group[-1].created_at.strftime(timestamp_format)
)
for idx, group in
enumerate(reversed(tabbed_browser.undo_stack), start=1)
]
cat = listcategory.ListCategory("Closed tabs", entries, sort=False)
model.add_category(cat)
return model
| gpl-3.0 | 8,726,302,838,719,023,000 | 33.536184 | 78 | 0.622059 | false |
gbd-consult/CartoCSS-Export | CartoCSSExport/ce/cartocss.py | 1 | 66477 | """CartoCSS properties."""
# extracted from https://raw.githubusercontent.com/mapbox/carto/master/docs/latest.md
Properties = {
"background-color": {
"default": None,
"description": "Map Background color.",
"type": "color"
},
"background-image": {
"default": "",
"description": "An image that is repeated below all features on a map as a background. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"background-image-comp-op": {
"default": "src-over",
"description": "Set the compositing operation used to blend the image into the background.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"background-image-opacity": {
"default": 1,
"description": "Set the opacity of the image.",
"type": "float"
},
"base": {
"default": "",
"description": "Any relative paths used to reference files will be understood as relative to this directory path if the map is loaded from an in memory object rather than from the filesystem. If the map is loaded from the filesystem and this option is not provided it will be set to the directory of the stylesheet.",
"type": "string"
},
"buffer-size": {
"default": 0,
"description": "Extra tolerance around the map (in pixels) used to ensure labels crossing tile boundaries are equally rendered in each tile (e.g. cut in each tile). Not intended to be used in combination with \"avoid-edges\".",
"type": "float"
},
"building-fill": {
"default": "The color gray will be used for fill.",
"description": "The color of the buildings fill. Note: 0.8 will be used to multiply each color component to auto-generate a darkened wall color.",
"type": "color"
},
"building-fill-opacity": {
"default": 1,
"description": "The opacity of the building as a whole, including all walls.",
"type": "float"
},
"building-height": {
"default": 0,
"description": "The height of the building in pixels.",
"type": "float"
},
"comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this layer should behave relative to layers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"debug-mode": {
"default": "collision",
"description": "The mode for debug rendering.",
"type": "string"
},
"direct-image-filters": {
"default": None,
"description": "A list of image filters to apply to the main canvas (see the image-filters doc for how they work on a separate canvas).",
"type": "functions",
"values": [
"agg-stack-blur",
"emboss",
"blur",
"gray",
"sobel",
"edge-detect",
"x-gradient",
"y-gradient",
"invert",
"sharpen",
"color-blind-protanope",
"color-blind-deuteranope",
"color-blind-tritanope",
"colorize-alpha",
"color-to-alpha",
"scale-hsla"
]
},
"dot-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this layer should behave relative to layers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"dot-fill": {
"default": "gray",
"description": "The color of the area of the dot.",
"type": "color"
},
"dot-height": {
"default": 1,
"description": "The height of the dot in pixels.",
"type": "float"
},
"dot-opacity": {
"default": 1,
"description": "The overall opacity of the dot.",
"type": "float"
},
"dot-width": {
"default": 1,
"description": "The width of the dot in pixels.",
"type": "float"
},
"font-directory": {
"default": None,
"description": "Path to a directory which holds fonts which should be registered when the Map is loaded (in addition to any fonts that may be automatically registered).",
"type": "uri"
},
"image-filters": {
"default": None,
"description": "A list of image filters that will be applied to the active rendering canvas for a given style. The presence of one more image-filters will trigger a new canvas to be created before starting to render a style and then this canvas will be composited back into the main canvas after rendering all features and after all image-filters have been applied. See direct-image-filters if you want to apply a filter directly to the main canvas.",
"type": "functions",
"values": [
"agg-stack-blur",
"emboss",
"blur",
"gray",
"sobel",
"edge-detect",
"x-gradient",
"y-gradient",
"invert",
"sharpen",
"color-blind-protanope",
"color-blind-deuteranope",
"color-blind-tritanope",
"colorize-alpha",
"color-to-alpha",
"scale-hsla"
]
},
"image-filters-inflate": {
"default": False,
"description": "A property that can be set to True to enable using an inflated image internally for seamless blurring across tiles (requires buffered data).",
"type": "boolean"
},
"line-cap": {
"default": "butt",
"description": "The display of line endings.",
"type": "keyword",
"values": [
"butt",
"round",
"square"
]
},
"line-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"line-color": {
"default": "black",
"description": "The color of a drawn line.",
"type": "color"
},
"line-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"line-dash-offset": {
"default": None,
"description": "Valid parameter but not currently used in renderers (only exists for experimental svg support in Mapnik which is not yet enabled).",
"type": "numbers"
},
"line-dasharray": {
"default": None,
"description": "A pair of length values [a,b], where (a) is the dash length and (b) is the gap length respectively. More than two values are supported for more complex patterns.",
"type": "numbers"
},
"line-gamma": {
"default": 1,
"description": "Level of antialiasing of stroke line.",
"type": "float"
},
"line-gamma-method": {
"default": "power",
"description": "An Antigrain Geometry specific rendering hint to control the quality of antialiasing. Under the hood in Mapnik this method is used in combination with the 'gamma' value (which defaults to 1). The methods are in the AGG source at https://github.com/mapnik/mapnik/blob/master/deps/agg/include/agg_gamma_functions.",
"type": "keyword",
"values": [
"power",
"linear",
"none",
"threshold",
"multiply"
]
},
"line-geometry-transform": {
"default": None,
"description": "Transform line geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-join": {
"default": "miter",
"description": "The behavior of lines when joining.",
"type": "keyword",
"values": [
"miter",
"miter-revert",
"round",
"bevel"
]
},
"line-miterlimit": {
"default": 4,
"description": "The limit on the ratio of the miter length to the stroke-width. Used to automatically convert miter joins to bevel joins for sharp angles to avoid the miter extending beyond the thickness of the stroking path. Normally will not need to be set, but a larger value can sometimes help avoid jaggy artifacts.",
"type": "float"
},
"line-offset": {
"default": 0,
"description": "Offsets a line a number of pixels parallel to its actual path. Positive values move the line left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"line-opacity": {
"default": 1,
"description": "The opacity of a line.",
"type": "float"
},
"line-pattern-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"line-pattern-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"line-pattern-file": {
"default": None,
"description": "An image file to be repeated and warped along a line. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"line-pattern-geometry-transform": {
"default": None,
"description": "Transform line geometry with specified function and apply pattern to transformed geometry.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-pattern-offset": {
"default": 0,
"description": "Offsets a line a number of pixels parallel to its actual path. Positive values move the line left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"line-pattern-opacity": {
"default": 1,
"description": "Apply an opacity level to the image used for the pattern.",
"type": "float"
},
"line-pattern-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"line-pattern-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"line-pattern-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"line-pattern-transform": {
"default": None,
"description": "Transform line pattern instance with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"line-rasterizer": {
"default": "full",
"description": "Exposes an alternate AGG rendering method that sacrifices some accuracy for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"line-simplify": {
"default": 0,
"description": "Simplify geometries by the given tolerance.",
"type": "float"
},
"line-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify geometries by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"line-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"line-width": {
"default": 1,
"description": "The width of a line in pixels.",
"type": "float"
},
"marker-allow-overlap": {
"default": False,
"description": "Control whether overlapping markers are shown or hidden.",
"type": "boolean"
},
"marker-avoid-edges": {
"default": False,
"description": "Avoid placing markers that intersect with tile boundaries.",
"type": "boolean"
},
"marker-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"marker-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"marker-direction": {
"default": "right",
"description": "How markers should be placed along lines. With the \"auto\" setting when marker is upside down the marker is automatically rotated by 180 degrees to keep it upright. The \"auto-down\" value places marker in the opposite orientation to \"auto\". The \"left\" or \"right\" settings can be used to force marker to always be placed along a line in a given direction and therefore disables rotating if marker appears upside down. The \"left-only\" or \"right-only\" properties also force a given direction but will discard upside down markers rather than trying to flip it. The \"up\" and \"down\" settings don't adjust marker's orientation to the line direction.",
"type": "keyword",
"values": [
"auto",
"auto-down",
"left",
"right",
"left-only",
"right-only",
"up",
"down"
]
},
"marker-file": {
"default": None,
"description": "A file that this marker shows at each placement. If no file is given, the marker will show an ellipse. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"marker-fill": {
"default": "blue",
"description": "The color of the area of the marker. This property will also set the fill of elements in an SVG loaded from a file.",
"type": "color"
},
"marker-fill-opacity": {
"default": 1,
"description": "The fill opacity of the marker. This property will also set the fill-opacity of elements in an SVG loaded from a file.",
"type": "float"
},
"marker-geometry-transform": {
"default": None,
"description": "Transform marker geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"marker-height": {
"default": 10,
"description": "The height of the marker, if using one of the default types.",
"type": "float"
},
"marker-ignore-placement": {
"default": False,
"description": "Value to control whether the placement of the feature will prevent the placement of other features.",
"type": "boolean"
},
"marker-line-color": {
"default": "black",
"description": "The color of the stroke around the marker. This property will also set the stroke of elements in an SVG loaded from a file.",
"type": "color"
},
"marker-line-opacity": {
"default": 1,
"description": "The opacity of a line.",
"type": "float"
},
"marker-line-width": {
"default": 0.5,
"description": "The width of the stroke around the marker, in pixels. This is positioned on the boundary, so high values can cover the area itself. This property will also set the stroke-width of elements in an SVG loaded from a file.",
"type": "float"
},
"marker-max-error": {
"default": 0.2,
"description": "N/A: not intended to be changed.",
"type": "float"
},
"marker-multi-policy": {
"default": "each",
"description": "A special setting to allow the user to control rendering behavior for 'multi-geometries' (when a feature contains multiple geometries). This setting does not apply to markers placed along lines. The 'each' policy is default and means all geometries will get a marker. The 'whole' policy means that the aggregate centroid between all geometries will be used. The 'largest' policy means that only the largest (by bounding box areas) feature will get a rendered marker (this is how text labeling behaves by default).",
"type": "keyword",
"values": [
"each",
"whole",
"largest"
]
},
"marker-offset": {
"default": 0,
"description": "Offsets a marker from a line a number of pixels parallel to its actual path. Positive values move the marker left, negative values move it right (relative to the directionality of the line).",
"type": "float"
},
"marker-opacity": {
"default": 1,
"description": "The overall opacity of the marker, if set, overrides both the opacity of the fill and the opacity of the stroke.",
"type": "float"
},
"marker-placement": {
"default": "point",
"description": "Attempt to place markers on a point, in the center of a polygon, or if markers-placement:line, then multiple times along a line. 'interior' placement can be used to ensure that points placed on polygons are forced to be inside the polygon interior. The 'vertex-first' and 'vertex-last' options can be used to place markers at the first or last vertex of lines or polygons.",
"type": "keyword",
"values": [
"point",
"line",
"interior",
"vertex-first",
"vertex-last"
]
},
"marker-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"marker-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"marker-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"marker-spacing": {
"default": 100,
"description": "Space between repeated markers in pixels. If the spacing is less than the marker size or larger than the line segment length then no marker will be placed. Any value less than 1 will be ignored and the default will be used instead.",
"type": "float"
},
"marker-transform": {
"default": None,
"description": "Transform marker instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"marker-type": {
"default": "ellipse",
"description": "The default marker-type. If a SVG file is not given as the marker-file parameter, the renderer provides either an arrow or an ellipse (a circle if height is equal to width).",
"type": "keyword",
"values": [
"arrow",
"ellipse"
]
},
"marker-width": {
"default": 10,
"description": "The width of the marker, if using one of the default types.",
"type": "float"
},
"maximum-extent": {
"default": "-20037508.34,-20037508.34,20037508.34,20037508.34",
"description": "An extent to be used to limit the bounds used to query all layers during rendering. Should be minx, miny, maxx, maxy in the coordinates of the Map.",
"type": "string"
},
"opacity": {
"default": 1,
"description": "An alpha value for the style (which means an alpha applied to all features in separate buffer and then composited back to main buffer).",
"type": "float"
},
"point-allow-overlap": {
"default": False,
"description": "Control whether overlapping points are shown or hidden.",
"type": "boolean"
},
"point-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"point-file": {
"default": None,
"description": "Image file to represent a point. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"point-ignore-placement": {
"default": False,
"description": "Control whether the placement of the feature will prevent the placement of other features.",
"type": "boolean"
},
"point-opacity": {
"default": 1,
"description": "A value from 0 to 1 to control the opacity of the point.",
"type": "float"
},
"point-placement": {
"default": "centroid",
"description": "Control how this point should be placed. Centroid calculates the geometric center of a polygon, which can be outside of it, while interior always places inside of a polygon.",
"type": "keyword",
"values": [
"centroid",
"interior"
]
},
"point-transform": {
"default": None,
"description": "Transform point instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extend outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"polygon-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"polygon-fill": {
"default": "The color gray will be used for fill.",
"description": "Fill color to assign to a polygon.",
"type": "color"
},
"polygon-gamma": {
"default": 1,
"description": "Level of antialiasing of polygon edges.",
"type": "float"
},
"polygon-gamma-method": {
"default": "power",
"description": "An Antigrain Geometry specific rendering hint to control the quality of antialiasing. Under the hood in Mapnik this method is used in combination with the 'gamma' value (which defaults to 1). The methods are in the AGG source at https://github.com/mapnik/mapnik/blob/master/deps/agg/include/agg_gamma_functions.",
"type": "keyword",
"values": [
"power",
"linear",
"none",
"threshold",
"multiply"
]
},
"polygon-geometry-transform": {
"default": None,
"description": "Transform polygon geometry with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-opacity": {
"default": 1,
"description": "The opacity of the polygon.",
"type": "float"
},
"polygon-pattern-alignment": {
"default": "global",
"description": "Specify whether to align pattern fills to the layer's geometry (local) or to the map (global).",
"type": "keyword",
"values": [
"global",
"local"
]
},
"polygon-pattern-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"polygon-pattern-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"polygon-pattern-file": {
"default": None,
"description": "Image to use as a repeated pattern fill within a polygon. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"polygon-pattern-gamma": {
"default": 1,
"description": "Level of antialiasing of polygon pattern edges.",
"type": "float"
},
"polygon-pattern-geometry-transform": {
"default": None,
"description": "Transform polygon geometry with specified function and apply pattern to transformed geometry.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-pattern-opacity": {
"default": 1,
"description": "Apply an opacity level to the image used for the pattern.",
"type": "float"
},
"polygon-pattern-simplify": {
"default": 0,
"description": "geometries are simplified by the given tolerance.",
"type": "float"
},
"polygon-pattern-simplify-algorithm": {
"default": "radial-distance",
"description": "geometries are simplified by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"polygon-pattern-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"polygon-pattern-transform": {
"default": None,
"description": "Transform polygon pattern instance with specified function.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"polygon-simplify": {
"default": 0,
"description": "Simplify geometries by the given tolerance.",
"type": "float"
},
"polygon-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify geometries by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"polygon-smooth": {
"default": 0,
"description": "Smooths out geometry angles. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"raster-colorizer-default-color": {
"default": "transparent",
"description": "This can be any color. Sets the color that is applied to all values outside of the range of the colorizer-stops. If not supplied pixels will be fully transparent.",
"type": "color"
},
"raster-colorizer-default-mode": {
"default": "linear",
"description": "This can be either discrete, linear or exact. If it is not specified then the default is linear.",
"type": "keyword",
"values": [
"discrete",
"linear",
"exact"
]
},
"raster-colorizer-epsilon": {
"default": 1.1920928955078125e-07,
"description": "This can be any positive floating point value and will be used as a tolerance in floating point comparisions. The higher the value the more likely a stop will match and color data.",
"type": "float"
},
"raster-colorizer-stops": {
"default": "",
"description": "Assigns raster data values to colors. Stops must be listed in ascending order, and contain at a minimum the value and the associated color. You can also include the color-mode as a third argument, like stop(100,#fff,exact).",
"type": "tags"
},
"raster-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"raster-filter-factor": {
"default": -1,
"description": "This is used by the Raster or Gdal datasources to pre-downscale images using overviews. Higher numbers can sometimes cause much better scaled image output, at the cost of speed.",
"type": "float"
},
"raster-mesh-size": {
"default": 16,
"description": "A reduced resolution mesh is used for raster reprojection, and the total image size is divided by the mesh-size to determine the quality of that mesh. Values for mesh-size larger than the default will result in faster reprojection but might lead to distortion.",
"type": "unsigned"
},
"raster-opacity": {
"default": 1,
"description": "The opacity of the raster symbolizer on top of other symbolizers.",
"type": "float"
},
"raster-scaling": {
"default": "near",
"description": "The scaling algorithm used to making different resolution versions of this raster layer. Bilinear is a good compromise between speed and accuracy, while lanczos gives the highest quality.",
"type": "keyword",
"values": [
"near",
"fast",
"bilinear",
"bicubic",
"spline16",
"spline36",
"hanning",
"hamming",
"hermite",
"kaiser",
"quadric",
"catrom",
"gaussian",
"bessel",
"mitchell",
"sinc",
"lanczos",
"blackman"
]
},
"shield-allow-overlap": {
"default": False,
"description": "Control whether overlapping shields are shown or hidden.",
"type": "boolean"
},
"shield-avoid-edges": {
"default": False,
"description": "Avoid placing shields that intersect with tile boundaries.",
"type": "boolean"
},
"shield-character-spacing": {
"default": 0,
"description": "Horizontal spacing between characters (in pixels). Currently works for point placement only, not line placement.",
"type": "unsigned"
},
"shield-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"shield-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"shield-dx": {
"default": 0,
"description": "Displace shield by fixed amount, in pixels, +/- along the X axis. A positive value will shift the text right.",
"type": "float"
},
"shield-dy": {
"default": 0,
"description": "Displace shield by fixed amount, in pixels, +/- along the Y axis. A positive value will shift the text down.",
"type": "float"
},
"shield-face-name": {
"default": None,
"description": "Font name and style to use for the shield text.",
"type": "string"
},
"shield-file": {
"default": None,
"description": "Image file to render behind the shield text. Accepted formats: svg, jpg, png, tiff, and webp.",
"type": "uri"
},
"shield-fill": {
"default": "black",
"description": "The color of the shield text.",
"type": "color"
},
"shield-halo-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"shield-halo-fill": {
"default": "white",
"description": "Specifies the color of the halo around the text.",
"type": "color"
},
"shield-halo-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text halo.",
"type": "float"
},
"shield-halo-radius": {
"default": 0,
"description": "Specify the radius of the halo in pixels.",
"type": "float"
},
"shield-halo-rasterizer": {
"default": "full",
"description": "Exposes an alternate text halo rendering method that sacrifices quality for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"shield-halo-transform": {
"default": "",
"description": "Transform shield halo relative to the actual text with specified function. Allows for shadow or embossed effects. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"shield-horizontal-alignment": {
"default": "auto",
"description": "The shield's horizontal alignment from its centerpoint.",
"type": "keyword",
"values": [
"left",
"middle",
"right",
"auto"
]
},
"shield-justify-alignment": {
"default": "auto",
"description": "Define how text in a shield's label is justified.",
"type": "keyword",
"values": [
"left",
"center",
"right",
"auto"
]
},
"shield-label-position-tolerance": {
"default": "shield-spacing/2.0",
"description": "Allows the shield to be displaced from its ideal position by a number of pixels (only works with placement:line).",
"type": "float"
},
"shield-line-spacing": {
"default": 0,
"description": "Vertical spacing between lines of multiline labels (in pixels).",
"type": "float"
},
"shield-margin": {
"default": 0,
"description": "Minimum distance that a shield can be placed from any other text, shield, or marker.",
"type": "float"
},
"shield-min-distance": {
"default": 0,
"description": "Minimum distance to the next shield with the same text. Only works for line placement.",
"type": "float"
},
"shield-min-padding": {
"default": 0,
"description": "Minimum distance a shield will be placed from the edge of a tile. This option is similar to shield-avoid-edges:True except that the extra margin is used to discard cases where the shield+margin are not fully inside the tile.",
"type": "float"
},
"shield-name": {
"default": "",
"description": "Value to use for a shield\"s text label. Data columns are specified using brackets like [column_name].",
"type": "string"
},
"shield-opacity": {
"default": 1,
"description": "The opacity of the image used for the shield.",
"type": "float"
},
"shield-placement": {
"default": "point",
"description": "How this shield should be placed. Point placement places one shield on top of a point geometry and at the centroid of a polygon or the middle point of a line, line places along lines multiple times per feature, vertex places on the vertexes of polygons, and interior attempts to place inside of a polygon.",
"type": "keyword",
"values": [
"point",
"line",
"vertex",
"interior"
]
},
"shield-placement-type": {
"default": "dummy",
"description": "Re-position and/or re-size shield to avoid overlaps. \"simple\" for basic algorithm (using shield-placements string,) \"dummy\" to turn this feature off.",
"type": "keyword",
"values": [
"dummy",
"simple",
"list"
]
},
"shield-placements": {
"default": "",
"description": "If \"placement-type\" is set to \"simple\", use this \"POSITIONS,[SIZES]\" string. An example is shield-placements: \"E,NE,SE,W,NW,SW\";.",
"type": "string"
},
"shield-repeat-distance": {
"default": 0,
"description": "Minimum distance between repeated shields. If set this will prevent shields being rendered nearby each other that contain the same text. Similar to shield-min-distance with the difference that it works the same no matter what placement strategy is used.",
"type": "float"
},
"shield-simplify": {
"default": 0,
"description": "Simplify the geometries used for shield placement by the given tolerance.",
"type": "float"
},
"shield-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify the geometries used for shield placement by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"shield-size": {
"default": 10,
"description": "The size of the shield text in pixels.",
"type": "float"
},
"shield-smooth": {
"default": 0,
"description": "Smooths out the angles of the geometry used for shield placement. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"shield-spacing": {
"default": 0,
"description": "Distance the renderer should use to try to place repeated shields on a line.",
"type": "float"
},
"shield-text-dx": {
"default": 0,
"description": "Displace text within shield by fixed amount, in pixels, +/- along the X axis. A positive value will shift the shield right.",
"type": "float"
},
"shield-text-dy": {
"default": 0,
"description": "Displace text within shield by fixed amount, in pixels, +/- along the Y axis. A positive value will shift the shield down.",
"type": "float"
},
"shield-text-opacity": {
"default": 1,
"description": "The opacity of the text placed on top of the shield.",
"type": "float"
},
"shield-text-transform": {
"default": None,
"description": "Transform the case of the characters.",
"type": "keyword",
"values": [
"none",
"uppercase",
"lowercase",
"capitalize",
"reverse"
]
},
"shield-transform": {
"default": None,
"description": "Transform shield instance with specified function. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"shield-unlock-image": {
"default": False,
"description": "This parameter should be set to True if you are trying to position text beside rather than on top of the shield image.",
"type": "boolean"
},
"shield-vertical-alignment": {
"default": "middle",
"description": "The shield's vertical alignment from its centerpoint.",
"type": "keyword",
"values": [
"top",
"middle",
"bottom",
"auto"
]
},
"shield-wrap-before": {
"default": False,
"description": "Wrap text before wrap-width is reached.",
"type": "boolean"
},
"shield-wrap-character": {
"default": None,
"description": "Use this character instead of a space to wrap long names.",
"type": "string"
},
"shield-wrap-width": {
"default": 0,
"description": "Length of a chunk of text in pixels before wrapping text. If set to zero, text doesn't wrap.",
"type": "unsigned"
},
"srs": {
"default": "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs",
"description": "Map spatial reference (proj4 string).",
"type": "string"
},
"text-align": {
"default": "auto",
"description": "Define how text is justified.",
"type": "keyword",
"values": [
"left",
"right",
"center",
"auto"
]
},
"text-allow-overlap": {
"default": False,
"description": "Control whether overlapping text is shown or hidden.",
"type": "boolean"
},
"text-avoid-edges": {
"default": False,
"description": "Avoid placing labels that intersect with tile boundaries.",
"type": "boolean"
},
"text-character-spacing": {
"default": 0,
"description": "Horizontal spacing adjustment between characters in pixels. This value is ignored when horizontal-alignment is set to adjust. Typographic ligatures are turned off when this value is greater than zero.",
"type": "float"
},
"text-clip": {
"default": False,
"description": "Turning on clipping can help performance in the case that the boundaries of the geometry extent outside of tile extents. But clipping can result in undesirable rendering artifacts in rare cases.",
"type": "boolean"
},
"text-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"divide",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"linear-dodge",
"linear-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"text-dx": {
"default": 0,
"description": "Displace text by fixed amount, in pixels, +/- along the X axis. With \"dummy\" placement-type, a positive value displaces to the right. With \"simple\" placement-type, it is either left, right or unchanged, depending on the placement selected. Any non-zero value implies \"horizontal-alignment\" changes to \"left\" by default. Has no effect with 'line' text-placement-type.",
"type": "float"
},
"text-dy": {
"default": 0,
"description": "Displace text by fixed amount, in pixels, +/- along the Y axis. With \"dummy\" placement-type, a positive value displaces downwards. With \"simple\" placement-type, it is either up, down or unchanged, depending on the placement selected. With \"line\" placement-type, a positive value displaces above the path.",
"type": "float"
},
"text-face-name": {
"default": None,
"description": "Font name and style to render a label in.",
"type": "string"
},
"text-fill": {
"default": "black",
"description": "Specifies the color for the text.",
"type": "color"
},
"text-font-feature-settings": {
"default": "",
"description": "Comma separated list of OpenType typographic features. The syntax and semantics conforms to font-feature-settings from W3C CSS.",
"type": "string"
},
"text-halo-comp-op": {
"default": "src-over",
"description": "Composite operation. This defines how this symbolizer should behave relative to symbolizers atop or below it.",
"type": "keyword",
"values": [
"clear",
"src",
"dst",
"src-over",
"dst-over",
"src-in",
"dst-in",
"src-out",
"dst-out",
"src-atop",
"dst-atop",
"xor",
"plus",
"minus",
"multiply",
"screen",
"overlay",
"darken",
"lighten",
"color-dodge",
"color-burn",
"hard-light",
"soft-light",
"difference",
"exclusion",
"contrast",
"invert",
"invert-rgb",
"grain-merge",
"grain-extract",
"hue",
"saturation",
"color",
"value"
]
},
"text-halo-fill": {
"default": "white",
"description": "Specifies the color of the halo around the text.",
"type": "color"
},
"text-halo-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text halo.",
"type": "float"
},
"text-halo-radius": {
"default": 0,
"description": "Specify the radius of the halo in pixels.",
"type": "float"
},
"text-halo-rasterizer": {
"default": "full",
"description": "Exposes an alternate text halo rendering method that sacrifices quality for speed.",
"type": "keyword",
"values": [
"full",
"fast"
]
},
"text-halo-transform": {
"default": "",
"description": "Transform text halo relative to the actual text with specified function. Allows for shadow or embossed effects. Ignores map scale factor.",
"type": "functions",
"values": [
"matrix",
"translate",
"scale",
"rotate",
"skewX",
"skewY"
]
},
"text-horizontal-alignment": {
"default": "auto",
"description": "The text's horizontal alignment from it's centerpoint. If placement is set to line, then adjust can be set to auto-fit the text to the length of the path by dynamically calculating character-spacing.",
"type": "keyword",
"values": [
"left",
"middle",
"right",
"auto",
"adjust"
]
},
"text-label-position-tolerance": {
"default": "text-spacing/2.0",
"description": "Allows the label to be displaced from its ideal position by a number of pixels (only works with placement:line).",
"type": "float"
},
"text-largest-bbox-only": {
"default": True,
"description": "Controls default labeling behavior on multipolygons. The default is True and means that only the polygon with largest bbox is labeled.",
"type": "boolean"
},
"text-line-spacing": {
"default": 0,
"description": "Vertical spacing adjustment between lines in pixels.",
"type": "float"
},
"text-margin": {
"default": 0,
"description": "Minimum distance that a label can be placed from any other text, shield, or marker.",
"type": "float"
},
"text-max-char-angle-delta": {
"default": 22.5,
"description": "The maximum angle change, in degrees, allowed between adjacent characters in a label. This value internally is converted to radians to the default is 22.5*math.pi/180.0. The higher the value the fewer labels will be placed around around sharp corners.",
"type": "float"
},
"text-min-distance": {
"default": 0,
"description": "Minimum distance to the next label with the same text. Only works for line placement. Deprecated: replaced by text-repeat-distance and text-margin",
"type": "float"
},
"text-min-padding": {
"default": 0,
"description": "Minimum distance a text label will be placed from the edge of a tile. This option is similar to shield-avoid-edges:True except that the extra margin is used to discard cases where the shield+margin are not fully inside the tile.",
"type": "float"
},
"text-min-path-length": {
"default": 0,
"description": "Place labels only on polygons and lines with a bounding width longer than this value (in pixels).",
"type": "float"
},
"text-name": {
"default": None,
"description": "Value to use for a text label. Data columns are specified using brackets like [column_name].",
"type": "string"
},
"text-opacity": {
"default": 1,
"description": "A number from 0 to 1 specifying the opacity for the text.",
"type": "float"
},
"text-orientation": {
"default": 0,
"description": "Rotate the text. (only works with text-placement:point).",
"type": "float"
},
"text-placement": {
"default": "point",
"description": "How this label should be placed. Point placement places one label on top of a point geometry and at the centroid of a polygon or the middle point of a line, line places along lines multiple times per feature, vertex places on the vertexes of polygons, and interior attempts to place inside of a polygon.",
"type": "keyword",
"values": [
"point",
"line",
"vertex",
"interior"
]
},
"text-placement-type": {
"default": "dummy",
"description": "Re-position and/or re-size text to avoid overlaps. \"simple\" for basic algorithm (using text-placements string,) \"dummy\" to turn this feature off.",
"type": "keyword",
"values": [
"dummy",
"simple",
"list"
]
},
"text-placements": {
"default": "",
"description": "If \"placement-type\" is set to \"simple\", use this \"POSITIONS,[SIZES]\" string. An example is text-placements: \"E,NE,SE,W,NW,SW\";.",
"type": "string"
},
"text-ratio": {
"default": 0,
"description": "Define the amount of text (of the total) present on successive lines when wrapping occurs.",
"type": "unsigned"
},
"text-repeat-distance": {
"default": 0,
"description": "Minimum distance between repeated text. If set this will prevent text labels being rendered nearby each other that contain the same text. Similar to text-min-distance with the difference that it works the same no matter what placement strategy is used.",
"type": "float"
},
"text-repeat-wrap-character": {
"default": False,
"description": "Keep the character used to wrap a line instead of removing it, and repeat it on the new line.",
"type": "boolean"
},
"text-rotate-displacement": {
"default": False,
"description": "Rotates the displacement around the placement origin by the angle given by \"orientation\".",
"type": "boolean"
},
"text-simplify": {
"default": 0,
"description": "Simplify the geometries used for text placement by the given tolerance.",
"type": "float"
},
"text-simplify-algorithm": {
"default": "radial-distance",
"description": "Simplify the geometries used for text placement by the given algorithm.",
"type": "keyword",
"values": [
"radial-distance",
"zhao-saalfeld",
"visvalingam-whyatt"
]
},
"text-size": {
"default": 10,
"description": "Text size in pixels.",
"type": "float"
},
"text-smooth": {
"default": 0,
"description": "Smooths out the angles of the geometry used for text placement. 0 is no smoothing, 1 is fully smoothed. Values greater than 1 will produce wild, looping geometries.",
"type": "float"
},
"text-spacing": {
"default": 0,
"description": "Distance the renderer should use to try to place repeated text labels on a line.",
"type": "unsigned"
},
"text-transform": {
"default": None,
"description": "Transform the case of the characters.",
"type": "keyword",
"values": [
"none",
"uppercase",
"lowercase",
"capitalize",
"reverse"
]
},
"text-upright": {
"default": "auto",
"description": "How this label should be placed along lines. By default when more than half of a label's characters are upside down the label is automatically flipped to keep it upright. By changing this parameter you can prevent this \"auto-upright\" behavior. The \"auto-down\" value places text in the opposite orientation to \"auto\". The \"left\" or \"right\" settings can be used to force text to always be placed along a line in a given direction and therefore disables flipping if text appears upside down. The \"left-only\" or \"right-only\" properties also force a given direction but will discard upside down text rather than trying to flip it.",
"type": "keyword",
"values": [
"auto",
"auto-down",
"left",
"right",
"left-only",
"right-only"
]
},
"text-vertical-alignment": {
"default": "auto",
"description": "Position of label relative to point position.",
"type": "keyword",
"values": [
"top",
"middle",
"bottom",
"auto"
]
},
"text-wrap-before": {
"default": False,
"description": "Wrap text before wrap-width is reached.",
"type": "boolean"
},
"text-wrap-character": {
"default": None,
"description": "Use this character instead of a space to wrap long text.",
"type": "string"
},
"text-wrap-width": {
"default": 0,
"description": "Length of a chunk of text in pixels before wrapping text. If set to zero, text doesn't wrap.",
"type": "unsigned"
}
}
| gpl-2.0 | 491,658,651,293,921,540 | 33.713838 | 684 | 0.521985 | false |
point97/hapifis | server/apps/survey/migrations/0058_auto__add_field_question_skip_condition.py | 1 | 10477 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.skip_condition'
db.add_column(u'survey_question', 'skip_condition',
self.gf('django.db.models.fields.CharField')(max_length=254, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.skip_condition'
db.delete_column(u'survey_question', 'skip_condition')
models = {
u'survey.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.locationanswer': {
'Meta': {'object_name': 'LocationAnswer'},
'answer': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Location']"})
},
u'survey.multianswer': {
'Meta': {'object_name': 'MultiAnswer'},
'answer_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'integer'", 'max_length': '20'})
},
u'survey.page': {
'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']", 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cols': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_questions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter_questions_rel_+'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'foreach_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreach'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'grid_cols': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'grid_cols'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Option']"}),
'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'integer_max': ('django.db.models.fields.IntegerField', [], {'default': '365', 'null': 'True', 'blank': 'True'}),
'integer_min': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'report_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}),
'last_question': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'locations': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 2, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'01496547-962e-4773-a38d-bd6dacdc25ca'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {}),
'answer_raw': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 2, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}),
'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey'] | gpl-3.0 | -4,969,207,650,817,675,000 | 80.224806 | 207 | 0.549203 | false |
ceph/ceph-ansible | tests/functional/tests/rbd-mirror/test_rbd_mirror.py | 1 | 1848 | import pytest
import json
class TestRbdMirrors(object):
@pytest.mark.no_docker
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
def test_rbd_mirror_service_enabled_and_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
s = host.service(service_name)
assert s.is_enabled
assert s.is_running
def test_rbd_mirror_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])
assert hostname in daemons
| apache-2.0 | 4,185,165,945,069,044,000 | 41.976744 | 216 | 0.605519 | false |
mwalercz/virus-total-helper | server/dispatcher.py | 1 | 1180 | import inspect
from server.customhttp import HTTPResponse
class NoSuchUrl(Exception):
def __init__(self, url):
self.url = url
class Dispatcher:
def __init__(self, urls, scheduler, deque):
self.deque = deque
self.urls = urls
self.scheduler = scheduler
def dispatch(self, request):
fun = self._pick_handler_function(request.command, request.path)
return self._execute_handler_function(request, fun)
def _pick_handler_function(self, command, path):
key = command + path
if key in self.urls:
return self.urls[key]
else:
raise NoSuchUrl(key)
def _execute_handler_function(self, request, fun):
parameter_number = len(inspect.signature(fun).parameters)
if parameter_number == 2:
request.scheduler = self.scheduler
request.deque = self.deque
return fun(request, HTTPResponse())
else:
raise ArgumentLookupError(fun)
class ArgumentLookupError(Exception):
def __init__(self, fun):
self.fun = fun
def __str__(self):
return repr('cant find proper params in' + self.fun)
| bsd-3-clause | 8,067,938,445,552,639,000 | 26.44186 | 72 | 0.616949 | false |
crossroadchurch/paul | openlp/plugins/alerts/__init__.py | 1 | 1670 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`alerts` module provides the Alerts plugin for producing impromptu on-screen announcements during a service.
"""
| gpl-2.0 | -5,792,317,587,671,682,000 | 68.583333 | 117 | 0.443713 | false |
AravindK95/ee106b | project4/src/grasper_plan/src/transformations.py | 2 | 66033 | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.07.18
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.9 <http://www.numpy.org>`_
* `Transformations.c 2015.07.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2015.07.18'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('transformations')
if __name__ == "__main__":
import doctest
import random # used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| mit | -8,569,947,633,209,522,000 | 33.410109 | 79 | 0.579559 | false |
maxharp3r/archive-rotator | setup.py | 1 | 1784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
setup(
name='archive-rotator',
version='0.2.1',
description="Flexible utility for rotating backup files.",
long_description=readme + '\n\n' + history,
author="Max Harper",
author_email='maxharp3r@gmail.com',
url='https://github.com/maxharp3r/archive-rotator',
packages=[
'archive_rotator',
],
package_dir={'archive_rotator': 'archive_rotator'},
entry_points={
'console_scripts': [
'archive-rotator = archive_rotator.cli:main',
]
},
include_package_data=True,
install_requires=requirements,
license="MIT License",
zip_safe=False,
keywords='backup rotation',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Archiving :: Backup',
],
test_suite='tests',
tests_require=test_requirements
)
| mit | -6,119,830,027,694,422,000 | 27.31746 | 63 | 0.623879 | false |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/read_dynamic_parameter_state.py | 1 | 2586 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from dynamic_reconfigure.client import Client
"""
Created on 11/03/2014
@author: Philipp Schillinger
"""
class ReadDynamicParameterState(EventState):
"""
Reads a given trajectory controller parameter.
"""
LEFT_ARM_WRX = ['left_arm_traj_controller', 'l_arm_wrx']
LEFT_ARM_WRY = ['left_arm_traj_controller', 'l_arm_wry']
LEFT_ARM_ELX = ['left_arm_traj_controller', 'l_arm_elx']
LEFT_ARM_ELY = ['left_arm_traj_controller', 'l_arm_ely']
LEFT_ARM_SHX = ['left_arm_traj_controller', 'l_arm_shx']
LEFT_ARM_SHZ = ['left_arm_traj_controller', 'l_arm_shz']
RIGHT_ARM_WRX = ['right_arm_traj_controller', 'r_arm_wrx']
RIGHT_ARM_WRY = ['right_arm_traj_controller', 'r_arm_wry']
RIGHT_ARM_ELX = ['right_arm_traj_controller', 'r_arm_elx']
RIGHT_ARM_ELY = ['right_arm_traj_controller', 'r_arm_ely']
RIGHT_ARM_SHX = ['right_arm_traj_controller', 'r_arm_shx']
RIGHT_ARM_SHZ = ['right_arm_traj_controller', 'r_arm_shz']
def __init__(self, param):
"""Constructor"""
super(ReadDynamicParameterState, self).__init__(outcomes=['read', 'failed'],
input_keys=['traj_controller'],
output_keys=['parameter_value'])
self._param = param
self._failed = False
self._clients = {}
self._waiting_for_response = []
self._parameter_value_list = []
def execute(self, userdata):
if self._failed:
return 'failed'
value_offset = 0
for i in range(len(self._clients.keys())):
if self._waiting_for_response[i]:
param_dict = self._clients.values()[i].get_configuration(0.1)
if param_dict is not None:
self._waiting_for_response[i] = False
value_list = []
for j in range(len(self._param.values()[i])):
value_list.append(param_dict[self._param.values()[i][j]])
self._parameter_value_list[value_offset:value_offset+len(value_list)] = value_list
value_offset += len(self._param.values()[i])
if all(not waiting for waiting in self._waiting_for_response):
userdata.parameter_value = self._parameter_value_list
return 'read'
def on_enter(self, userdata):
self._clients = {}
self._waiting_for_response = [True] * len(self._param.keys())
self._parameter_value_list = [None] * sum(map(len, self._param.values()))
try:
for server in self._param.keys():
self._clients[server] = Client("/trajectory_controllers/" + userdata.traj_controller[0] + "/" + server + "/" + userdata.traj_controller[1])
except Exception as e:
Logger.logwarn('Was unable to reach parameter server:\n%s' % str(e))
self._failed = True
| bsd-3-clause | 9,053,889,298,215,037,000 | 30.536585 | 143 | 0.664346 | false |
JoshData/django-annotator-store | annotator/views.py | 1 | 4909 | from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseServerError, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseForbidden
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.conf import settings
import json, re
from annotator.models import Document, Annotation
class BaseStorageView(View):
def dispatch(self, request, *args, **kwargs):
# All PUT/POST requests must contain a JSON body. We decode that here and
# interpolate the value into the view argument list.
if request.method in ('PUT', 'POST'):
if not re.match("application/json(; charset=UTF-8)?", request.META['CONTENT_TYPE'], re.I):
return HttpResponseBadRequest("Request must have application/json content type.")
try:
body = json.loads(request.body.decode("utf8"))
except:
return HttpResponseBadRequest("Request body is not JSON.")
if not isinstance(body, dict):
return HttpResponseBadRequest("Request body is not a JSON object.")
# Interpolate the parsed JSON body into the arg list.
args = [body] + list(args)
# All requets return JSON on success, or some other HttpResponse.
try:
ret = super(BaseStorageView, self).dispatch(request, *args, **kwargs)
if isinstance(ret, HttpResponse):
return ret
# DELETE requests, when successful, return a 204 NO CONTENT.
if request.method == 'DELETE':
return HttpResponse(status=204)
ret = json.dumps(ret)
resp = HttpResponse(ret, mimetype="application/json")
resp["Content-Length"] = len(ret)
return resp
except ValueError as e:
return HttpResponseBadRequest(str(e))
except PermissionDenied as e:
return HttpResponseForbidden(str(e))
except ObjectDoesNotExist as e:
return HttpResponseNotFound(str(e))
except Exception as e:
if settings.DEBUG: raise # when debugging, don't trap
return HttpResponseServerError(str(e))
return ret
class Root(BaseStorageView):
http_method_names = ['get']
def get(self, request):
return {
"name": "Django Annotator Store",
"version": "0.0.1",
}
class Index(BaseStorageView):
http_method_names = ['get', 'post']
def get(self, request):
# index. Returns ALL annotation objects. Seems kind of not scalable.
return Annotation.as_list()
def post(self, request, client_data):
# create. Creates an annotation object and returns a 303.
obj = Annotation()
obj.owner = request.user if request.user.is_authenticated() else None
try:
obj.document = Document.objects.get(id=client_data.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in annotation data.")
obj.set_guid()
obj.data = "{ }"
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
class Annot(BaseStorageView):
http_method_names = ['get', 'put', 'delete']
def get(self, request, guid):
# read. Returns the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
return obj.as_json(request.user)
def put(self, request, client_data, guid):
# update. Updates the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to modify someone else's annotation.")
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
def delete(self, request, guid):
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to delete someone else's annotation.")
obj.delete()
return None # response handled by the base view
class Search(BaseStorageView):
http_method_names = ['get']
def get(self, request):
try:
document = Document.objects.get(id=request.GET.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in the query string.")
qs = Annotation.objects.filter(document=document)
return {
"total": qs.count(),
"rows": Annotation.as_list(qs=qs, user=request.user)
}
class EditorView(TemplateView):
template_name = 'annotator/editor.html'
def get_context_data(self, **kwargs):
context = super(EditorView, self).get_context_data(**kwargs)
context['storage_api_base_url'] = reverse('annotator.root')[0:-1] # chop off trailing slash
context['document'] = get_object_or_404(Document, id=kwargs['doc_id'])
return context
| unlicense | -49,917,196,215,574,370 | 34.316547 | 130 | 0.727032 | false |
ami/lob-python | lob/api_requestor.py | 1 | 2714 | import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| mit | 2,569,158,319,488,001,500 | 34.710526 | 122 | 0.542373 | false |
dragonrider7225/PythonGames | main.py | 1 | 2439 | #!py -3
from solitaire import *
import sys
def main():
opt = int(input("Which game would you like to play?\n\t0: Quit program\n" +
"\t1: Klondike\n"))
if not opt:
sys.exit(0)
if opt == 1:
game = klondike
args = []
game.set_up(*args)
game.show_board()
while True:
if game.get_result() == game.VICTORY:
print("YOU WIN!")
return
if game.get_result() == game.LOSE:
print("YOU LOSE!")
return
m = input("Move: ").split()
if game == klondike:
if m[0][0] == "s":
game.draw()
game.show_board()
elif m[0][0] == "m":
if m[1][0] == "w":
if m[2][0] == "f":
game.m()
elif m[2][0] == "l":
game.m(m[1], int(m[2][1:]))
else:
print_valid_moves(game)
continue
elif m[1][0] == "l":
if m[2][0] == "f":
game.m(int(m[1][1:]), "f")
elif m[2][0] == "l":
if len(m) == 3:
game.m(int(m[1][1:]), int(m[2][1:]))
else:
game.m(int(m[1][1:]), int(m[3]), int(m[2][1:]))
else:
print_valid_moves(game)
elif m[1][0] == "f":
if m[2][0] == "l":
game.m("f", int(m[1][1:]), int(m[2][1:]))
else:
print_valid_moves(game)
else:
print_valid_moves(game)
elif m[0][0] == "q":
sys.exit(0)
else:
print_valid_moves(game)
def print_valid_moves(game):
game.show_board()
print("Please enter a valid move:")
if game == klondike:
print("s[tock]")
print("m[ove] w[aste] f[oundation]")
print("m[ove] w[aste] lN")
print("m[ove] lN f[oundation]")
print("m[ove] lN1 lN2 C")
print("m[ove] fM lN")
print("q[uit]")
print("0 <= N* <= 6, 0 <= M <= 3, C is the number of cards", end=" ")
print("that are below the card to move from one layout", end=" ")
print("pile to another.")
if __name__ == "__main__":
main()
| apache-2.0 | -2,390,675,310,736,939,000 | 31.959459 | 79 | 0.373514 | false |
akimtke/arke | textserver/main.py | 1 | 2812 | # -*- coding: utf-8 -*-
import pyjsonrpc
from gsmmodem.modem import GsmModem, SentSms, Sms
from gsmmodem.exceptions import TimeoutException, PinRequiredError, IncorrectPinError
def text(number, message, key):
if key.strip() == '9703BB8D5A':
print "Creating modem instance"
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting modem"
modem.connect()
except PinRequiredError:
print "Pin required"
try:
print "Waiting for Network coverage info"
modem.waitForNetworkCoverage(5)
except TimeoutException:
print "Signal strength not strong enough"
return "No signal"
else:
try:
print "Sending %s to %s" % (message, number)
sms = modem.sendSms(number, message)
except TimeoutException:
print "Failed to send message"
return 'Error encountered'
print "Closing modem"
modem.close()
return True
else:
return 'Key is not correct'
def getUnreadText(key):
if key.strip() == '9703BB8D5A':
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting mode"
modem.connect()
except:
return "Error connecting"
try:
messages = modem.listStoredSms(status=Sms.STATUS_RECEIVED_UNREAD)
except Exception as e:
return str(e)
modem.close()
retString = ""
print "Got %d messages" % len(messages)
for message in messages:
retString = retString + "%s : %s" % (message.number, message.text)
return retString
else:
return "Incorrect key"
def getAllText(key):
if key.strip() == '9703BB8D5A':
modem = GsmModem('/dev/ttyUSB0', 9600)
try:
print "Connecting modem"
modem.connect()
except Exception as e:
return str(e)
try:
messages = modem.listStoredSms()
except Exception as e:
return str(e)
modem.close()
retString = ""
print "Got %d messages" % len(messages)
for message in messages:
retString = retString + "%s : %s" % (message.number, message.text) + "\n"
return retString
else:
return "Incorrect key"
class RequestHandler(pyjsonrpc.HttpRequestHandler):
methods = {
"text": text,
"getUnreadText": getUnreadText,
"getAllText": getAllText
}
http_server = pyjsonrpc.ThreadingHttpServer(
server_address = ('192.168.0.20', 8081),
RequestHandlerClass = RequestHandler
)
print "Starting HTTP Server..."
http_server.serve_forever()
| gpl-3.0 | 7,310,618,908,439,127,000 | 26.841584 | 85 | 0.562945 | false |
SKIRT/PTS | magic/region/region.py | 1 | 12293 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.region.region Contains the (abstract) Region class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import string
from abc import ABCMeta
# Import astronomical modules
from astropy.coordinates import frame_transform_graph
# Import the relevant PTS classes and modules
from ...core.tools import types
# -----------------------------------------------------------------
def make_point_template(fmt):
"""
This function ...
:param fmt:
:return:
"""
return 'point({x:' + fmt + '},{y:' + fmt + '})'
# -----------------------------------------------------------------
def make_line_template(fmt):
"""
Thisf ucntion ...
:return:
"""
return 'line({x1:' + fmt + '},{y1:' + fmt + '},{x2:' + fmt + '},{y2:' + fmt + '}) # line=0 0'
# -----------------------------------------------------------------
def make_vector_template(fmt, radunitstr):
"""
This function ...
:param fmtr:
:param radunitstr:
:return:
"""
return '# vector({x:' + fmt + '},{y:' + fmt + '},{l:' + fmt + '}' + radunitstr + ',{ang:' + fmt + '}) vector=1'
# -----------------------------------------------------------------
def make_circle_template(fmt, radunitstr):
"""
This function ...
:param fmt:
:param radunitstr:
:return:
"""
return 'circle({x:' + fmt + '},{y:' + fmt + '},{r:' + fmt + '}' + radunitstr + ')'
# -----------------------------------------------------------------
def make_ellipse_template(fmt, radunitstr, hmsdms=False):
"""
This functio n...
:param fmtr:
:param radunitstr:
:param hmsdms:
:return:
"""
if hmsdms: return 'ellipse({x},{y},{r1:' + fmt + '}' + radunitstr + ',{r2:' + fmt + '}' + radunitstr + ',{ang:' + fmt + '})'
else: return 'ellipse({x:' + fmt + '},{y:' + fmt + '},{r1:' + fmt + '}' + radunitstr + ',{r2:' + fmt + '}' + radunitstr + ',{ang:' + fmt + '})'
# -----------------------------------------------------------------
def make_rectangle_template(fmt, radunitstr):
"""
This function ...
:param fmt:
:param radunitstr:
:return:
"""
return 'box({x:' + fmt + '},{y:' + fmt + '},{d1:' + fmt + '}' + radunitstr + ',{d2:' + fmt + '}' + radunitstr + ',{ang:' + fmt + '})'
# -----------------------------------------------------------------
def make_polygon_template():
"""
Thisn function ...
:return:
"""
return 'polygon({c})'
# -----------------------------------------------------------------
def make_text_template(fmt):
"""
This function ...
:param fmt:
:return:
"""
return '# text({x:' + fmt + '},{y:' + fmt + '}) text="{text:}"'
# -----------------------------------------------------------------
def make_composite_template(fmt):
"""
This function ...
:param fmt:
:return:
"""
return '# composite({x:' + fmt + '},{y:' + fmt + '},{ang:' + fmt + '}) || composite=1'
# -----------------------------------------------------------------
def add_info(string, reg):
"""
This function ...
:param string:
:param reg:
:return:
"""
start_chars = " #" if not string.startswith("#") else " "
if reg.has_info: string += start_chars
if reg.has_label: string += " text={" + reg.label + "}"
if reg.has_meta:
if "text" in reg.meta: string += " text={" + reg.meta["text"] + "}"
string += " " + " ".join(key + "=" + value for key, value in reg.meta.items() if types.is_string_type(value) and key != "text")
if reg.has_appearance: string += " " + " ".join(key + "=" + value for key, value in reg.appearance.items())
return string
# -----------------------------------------------------------------
coordinate_systems = ['fk5', 'fk4', 'icrs', 'galactic', 'wcs', 'physical', 'image', 'ecliptic']
coordinate_systems += ['wcs{0}'.format(letter) for letter in string.ascii_lowercase]
coordsys_name_mapping = dict(zip(frame_transform_graph.get_names(), frame_transform_graph.get_names()))
coordsys_name_mapping['ecliptic'] = 'geocentrictrueecliptic' # needs expert attention TODO
# -----------------------------------------------------------------
class Region(object):
"""
This class ...
"""
default_extension = "reg"
# -----------------------------------------------------------------
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Set the label
self.label = kwargs.pop("label", None)
# Set the 'include' flag
self.include = kwargs.pop("include", True)
# Set the appearance info
self.appearance = kwargs.pop("appearance", dict())
# Set the meta information
self.meta = kwargs.pop("meta", dict())
# -----------------------------------------------------------------
@property
def has_label(self):
"""
This function ...
:return:
"""
return self.label is not None
# -----------------------------------------------------------------
@property
def has_appearance(self):
"""
This function ...
:return:
"""
return len(self.appearance) > 0
# -----------------------------------------------------------------
@property
def has_meta(self):
"""
This function ...
:return:
"""
return len(self.meta) > 0
# -----------------------------------------------------------------
@property
def has_info(self):
"""
This property ...
:return:
"""
return self.has_label or self.has_appearance or self.has_meta
# -----------------------------------------------------------------
def copy(self):
"""
This function ...
:return:
"""
return copy.deepcopy(self)
# -----------------------------------------------------------------
def __str__(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
class PixelRegion(Region):
"""
This class ...
"""
def __add__(self, other):
"""
This function ...
:param other:
:return:
"""
copy = self.copy()
copy += other
return copy
# -----------------------------------------------------------------
def __iadd__(self, other):
"""
This function ...
:param other:
:return:
"""
self.center += other
return self
# -----------------------------------------------------------------
def __sub__(self, other):
"""
This function ...
:param other:
:return:
"""
copy = self.copy()
copy -= other
return copy
# -----------------------------------------------------------------
def __isub__(self, other):
"""
This function ...
:param other:
:return:
"""
self.center -= other
return self
# -----------------------------------------------------------------
@property
def x_min(self):
"""
This property ...
:return:
"""
return self.axis1_min
# -----------------------------------------------------------------
@property
def x_min_pixel(self):
"""
This function ...
:return:
"""
return int(round(self.x_min))
# -----------------------------------------------------------------
@property
def x_max(self):
"""
This function ...
:return:
"""
return self.axis1_max
# -----------------------------------------------------------------
@property
def x_max_pixel(self):
"""
This function ...
:return:
"""
return int(round(self.x_max))
# -----------------------------------------------------------------
@property
def y_min(self):
"""
This function ...
:return:
"""
return self.axis2_min
# -----------------------------------------------------------------
@property
def y_min_pixel(self):
"""
This function ...
:return:
"""
return int(round(self.y_min))
# -----------------------------------------------------------------
@property
def y_max(self):
"""
This function ...
:return:
"""
return self.axis2_max
# -----------------------------------------------------------------
@property
def y_max_pixel(self):
"""
This function ...
:return:
"""
return int(round(self.y_max))
# -----------------------------------------------------------------
@property
def bounding_box(self):
"""
This function ...
:return:
"""
# Import the relevant PTS classes and modules
from .rectangle import PixelRectangleRegion
# Create the rectangle region and return it
return PixelRectangleRegion(self.center, self.unrotated_radius)
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
coordsys = 'image'
output = '# Region file format: DS9 PTS/magic/region\n'
output += '{}\n'.format(coordsys)
output += str(self)
# Write
with open(path, 'w') as fh: fh.write(output)
# -----------------------------------------------------------------
class SkyRegion(Region):
"""
This class ...
"""
@property
def ra_min(self):
return self.axis1_min
# -----------------------------------------------------------------
@property
def ra_max(self):
return self.axis1_max
# -----------------------------------------------------------------
@property
def dec_min(self):
return self.axis2_min
# -----------------------------------------------------------------
@property
def dec_max(self):
return self.axis2_max
# -----------------------------------------------------------------
@property
def bounding_box(self):
"""
This function ...
:return:
"""
# Import the relevant PTS classes and modules
from .rectangle import SkyRectangleRegion
# Create the rectangle region and return it
return SkyRectangleRegion(self.center, self.unrotated_radius)
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
coordsys = 'fk5'
output = '# Region file format: DS9 PTS/magic/region\n'
output += '{}\n'.format(coordsys)
output += str(self)
# Write
with open(path, 'w') as fh: fh.write(output)
# -----------------------------------------------------------------
class PhysicalRegion(Region):
"""
This class ...
"""
@property
def bounding_box(self):
"""
This function ...
:return:
"""
# Import the relevant PTS classes and modules
from .rectangle import PhysicalRectangleRegion
# Create the rectangle region and return it
return PhysicalRectangleRegion(self.center, self.radius)
# -----------------------------------------------------------------
| agpl-3.0 | 2,789,864,496,829,080,600 | 21.430657 | 147 | 0.376342 | false |
tomduck/pandoc-eqnos | setup.py | 1 | 1971 | """setup.py - install script for pandoc-eqnos."""
# Copyright 2015-2020 Thomas J. Duck.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import io
from setuptools import setup
# pylint: disable=invalid-name
DESCRIPTION = """\
A pandoc filter for numbering equations and their references
when converting markdown to other formats.
"""
# From https://stackoverflow.com/a/39671214
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
io.open('pandoc_eqnos.py', encoding='utf_8_sig').read()
).group(1)
setup(
name='pandoc-eqnos',
version=__version__,
author='Thomas J. Duck',
author_email='tomduck@tomduck.ca',
description='Equation number filter for pandoc',
long_description=DESCRIPTION,
license='GPL',
keywords='pandoc equation numbers filter',
url='https://github.com/tomduck/pandoc-eqnos',
download_url='https://github.com/tomduck/pandoc-eqnos/tarball/' + \
__version__,
install_requires=['pandoc-xnos >= 2.5.0, < 3.0'],
py_modules=['pandoc_eqnos'],
entry_points={'console_scripts':['pandoc-eqnos = pandoc_eqnos:main']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python'
]
)
| gpl-3.0 | 4,561,671,213,932,528,600 | 30.790323 | 75 | 0.676814 | false |
jdstregz/sky-scraper | prototypes/prototypeAWS/prototypeAWS/settings.py | 1 | 3478 | # -*- coding: utf-8 -*-
# Scrapy settings for prototypeAWS project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'prototypeAWS'
SPIDER_MODULES = ['prototypeAWS.spiders']
NEWSPIDER_MODULE = 'prototypeAWS.spiders'
#SPLASH_URL = 'http://192.168.59.103:8050'
SPLASH_URL = 'http://localhost:8050/'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'prototypeAWS (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'prototypeAWS.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | 843,985,632,857,596,800 | 34.489796 | 109 | 0.769695 | false |
hakancelik96/coogger | core/threaded_comment/serializers.py | 1 | 1282 | from rest_framework import serializers
from .models import ThreadedComments
class ReplySerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
title = serializers.ReadOnlyField(source="user.userprofile.title")
avatar_url = serializers.ReadOnlyField(source="user.githubauthuser.avatar_url")
parent_user = serializers.ReadOnlyField(source="get_parent.username")
parent_id = serializers.ReadOnlyField(source="get_parent.id")
reply_count = serializers.ReadOnlyField()
permlink = serializers.ReadOnlyField()
image_address = serializers.ReadOnlyField()
class Meta:
model = ThreadedComments
fields = [
"id",
"title",
"app_label",
"updated",
"model_name",
"content_type",
"object_id",
"username",
"reply",
"avatar_url",
"body",
"image_address",
"permlink",
"reply_count",
"depth",
"get_absolute_url",
"views",
"upvote_count",
"downvote_count",
"parent_permlink",
"parent_user",
"parent_id",
"created",
]
| mit | -7,206,508,981,116,535,000 | 29.52381 | 83 | 0.561622 | false |
richard-shepherd/monopyly | AIs/Stephen Chan/big_brick.py | 1 | 20396 | from monopyly import *
import random
class BigBrickAI(PlayerAIBase):
mClassDebug = True
def __init__(self):
self.p("__init__")
self.turn_count = 0
self.chance_cards_count=0
self.community_chest_cards_count=0
self.property_mortgage_cards_count=0
self.auction_spread=4
self.num_players=4
self.num_jail_freecards=0
self.property_set_count={}
self.property_offered_for_auction_adjustment=12
self.properties_we_like = [Square.Name.BOW_STREET,
Square.Name.MARLBOROUGH_STREET,
Square.Name.VINE_STREET,
Square.Name.STRAND,
Square.Name.FLEET_STREET,
Square.Name.TRAFALGAR_SQUARE,
Square.Name.LEICESTER_SQUARE,
Square.Name.COVENTRY_STREET,
Square.Name.PICCADILLY,
Square.Name.REGENT_STREET,
Square.Name.OXFORD_STREET,
Square.Name.BOND_STREET,
Square.Name.PARK_LANE,
Square.Name.MAYFAIR,
Square.Name.PALL_MALL,
Square.Name.WHITEHALL,
Square.Name.NORTHUMBERLAND_AVENUE,
Square.Name.THE_ANGEL_ISLINGTON,
Square.Name.EUSTON_ROAD,
Square.Name.PENTONVILLE_ROAD,
Square.Name.OLD_KENT_ROAD,
Square.Name.WHITECHAPEL_ROAD]
self.properties_we_like_current = Square.Name.BOW_STREET
def p(self, txt):
#print(txt)
pass
def get_name(self):
return "The Big Brick"
def start_of_game(self):
# self.p("Start_of_game")
self.turn_count = 0
return
def start_of_turn(self, game_state, player):
#self.p("Start of turn")
#self.p(self.turn_count)
self.turn_count = self.turn_count + 1
self.num_players = game_state.number_of_players
return
def player_landed_on_square(self, game_state, square, player):
'''
Called when a player lands on a square. All AIs receive this notification.
No response is required.
'''
pass
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
if player.state.cash > 100:
return PlayerAIBase.Action.BUY
else:
return PlayerAIBase.Action.DO_NOT_BUY
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def got_get_out_of_jail_free_card(self):
'''
Called when the player has picked up a
Get Out Of Jail Free card.
No response is required.
'''
self.num_jail_freecards = self.num_jail_freecards + 1
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
if player.state.cash > 100:
return PlayerAIBase.Action.TAKE_A_CHANCE
return PlayerAIBase.Action.PAY_TEN_POUND_FINE
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
adjustment = self.property_offered_for_auction_adjustment
return property.price + self.property_offered_for_auction_adjustment # random.randint(-100, 50)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
# Idea is that we make a better offer for the auction next time if we fail this time, visa versa.
if player == None:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
return
# squeeze the player in auction for the best deal the next time around!
if player.name == self.get_name():
#self.p("S " + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1 #?
else:
#self.p("F" + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1 #?
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
# I like Sophie's idea!
if player.state.cash < 1000:
return []
for owned_set in player.state.owned_unmortgaged_sets:
if not owned_set.can_build_houses:
continue
return [(p, 1) for p in owned_set.properties]
return []
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if player.state.cash > 100:
return []
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
#self.p(property.name)
return [property]
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return []
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
if self.num_jail_freecards > 0:
self.num_jail_freecards = self.num_jail_freecards -1
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
if player.state.cash >=50:
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
return PlayerAIBase.Action.STAY_IN_JAIL
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
# systematically propose a deal one by one
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
price_offered = property.price * 1.5
if player.state.cash > price_offered:
return DealProposal(
properties_wanted=[property],
maximum_cash_offered=price_offered,
propose_to_player=property.owner)
#self.p(property_to_propose_deal)
return None
# Rotates to the next property we want. Yes! its wacky.
def get_property_to_propose(self):
property_to_propose_deal = self.properties_we_like_current
index = self.properties_we_like.index(property_to_propose_deal)+1
if index > len(self.properties_we_like)-1:
index = 0
self.properties_we_like_current = self.properties_we_like[index]
return property_to_propose_deal
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
#return DealResponse(DealResponse.Action.REJECT)
total_price = 0
for p in deal_proposal.properties_wanted:
total_price = total_price + p.price
if total_price > 1000:
return DealResponse(
action=DealResponse.Action.ACCEPT,
minimum_cash_wanted= total_price * 2.1)
return DealResponse(DealResponse.Action.REJECT)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
#self.p("deal = " + str(deal_info))
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
#self.p("turns = " + str(self.turn_count))
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
def eminent_domain(self, game_state, player):
'''
Called when the eminent-domain rule is being played.
This rule is invoked in 'boring' games at round 200 if no
player has built any houses. All properties are compulsorily
repurchased by the bank and then immediately auctioned.
This method is called after the repurchase, but before
the auction.
No response is necessary.
'''
pass
| mit | -4,966,430,030,353,533,000 | 33.268908 | 114 | 0.605836 | false |
opoplawski/pytest-cov | src/pytest_cov/embed.py | 1 | 1885 | """Activate coverage at python startup if appropriate.
The python site initialisation will ensure that anything we import
will be removed and not visible at the end of python startup. However
we minimise all work by putting these init actions in this separate
module and only importing what is needed when needed.
For normal python startup when coverage should not be activated the pth
file checks a single env var and does not import or call the init fn
here.
For python startup when an ancestor process has set the env indicating
that code coverage is being collected we activate coverage based on
info passed via env vars.
"""
import os
def multiprocessing_start(obj):
cov = init()
if cov:
multiprocessing.util.Finalize(None, multiprocessing_finish, args=(cov,), exitpriority=1000)
def multiprocessing_finish(cov):
cov.stop()
cov.save()
try:
import multiprocessing.util
except ImportError:
pass
else:
multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)
def init():
# Only continue if ancestor process has set everything needed in
# the env.
cov_source = os.environ.get('COV_CORE_SOURCE')
cov_config = os.environ.get('COV_CORE_CONFIG')
if cov_config:
# Import what we need to activate coverage.
import coverage
# Determine all source roots.
if not cov_source:
cov_source = None
else:
cov_source = cov_source.split(os.pathsep)
# Activate coverage for this process.
cov = coverage.coverage(source=cov_source,
data_suffix=True,
config_file=cov_config,
auto_data=True)
cov.load()
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
return cov
| mit | 5,013,430,591,248,774,000 | 28.920635 | 99 | 0.668435 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-logic/azure/mgmt/logic/models/x12_validation_override.py | 1 | 3184 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12ValidationOverride(Model):
"""X12ValidationOverride.
:param message_id: The message id on which the validation settings has to
be applied.
:type message_id: str
:param validate_edi_types: The value indicating whether to validate EDI
types.
:type validate_edi_types: bool
:param validate_xsd_types: The value indicating whether to validate XSD
types.
:type validate_xsd_types: bool
:param allow_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to allow leading and trailing spaces and zeroes.
:type allow_leading_and_trailing_spaces_and_zeroes: bool
:param validate_character_set: The value indicating whether to validate
character Set.
:type validate_character_set: bool
:param trim_leading_and_trailing_spaces_and_zeroes: The value indicating
whether to trim leading and trailing spaces and zeroes.
:type trim_leading_and_trailing_spaces_and_zeroes: bool
:param trailing_separator_policy: The trailing separator policy. Possible
values include: 'NotSpecified', 'NotAllowed', 'Optional', 'Mandatory'
:type trailing_separator_policy: str or :class:`TrailingSeparatorPolicy
<azure.mgmt.logic.models.TrailingSeparatorPolicy>`
"""
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'validate_edi_types': {'key': 'validateEDITypes', 'type': 'bool'},
'validate_xsd_types': {'key': 'validateXSDTypes', 'type': 'bool'},
'allow_leading_and_trailing_spaces_and_zeroes': {'key': 'allowLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'validate_character_set': {'key': 'validateCharacterSet', 'type': 'bool'},
'trim_leading_and_trailing_spaces_and_zeroes': {'key': 'trimLeadingAndTrailingSpacesAndZeroes', 'type': 'bool'},
'trailing_separator_policy': {'key': 'trailingSeparatorPolicy', 'type': 'TrailingSeparatorPolicy'},
}
def __init__(self, message_id=None, validate_edi_types=None, validate_xsd_types=None, allow_leading_and_trailing_spaces_and_zeroes=None, validate_character_set=None, trim_leading_and_trailing_spaces_and_zeroes=None, trailing_separator_policy=None):
self.message_id = message_id
self.validate_edi_types = validate_edi_types
self.validate_xsd_types = validate_xsd_types
self.allow_leading_and_trailing_spaces_and_zeroes = allow_leading_and_trailing_spaces_and_zeroes
self.validate_character_set = validate_character_set
self.trim_leading_and_trailing_spaces_and_zeroes = trim_leading_and_trailing_spaces_and_zeroes
self.trailing_separator_policy = trailing_separator_policy
| mit | 9,220,224,516,508,743,000 | 52.966102 | 252 | 0.685616 | false |