repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/riesling/ReadConfigDiag.py | 1 | 7933 | # ========== Copyright Header Begin ==========================================
#
# OpenSPARC T1 Processor File: ReadConfigDiag.py
# Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
#
# The above named program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License version 2 as published by the Free Software Foundation.
#
# The above named program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this work; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ========== Copyright Header End ============================================
"""handle riesling frontend's '-c config' option
"""
import sys, types
OBJECT = 'OBJECT'
TYPE = 'TYPE'
OTHER = 'other'
PLI_SOCKET = 'pli-socket'
TYPE_NIAGARA = 'niagara'
TYPE_NIAGARA2 = 'niagara2'
TYPE_SYS_CONFIG = 'sys_config'
class ReadConfigDiag:
"""handle -c config option
"""
def __init__ (self):
"""
"""
# data[objType][objId][dataType]
self.data = { }
# object[objId] = data[objType][objId]
self.object = { }
self.count = { }
def __str__ (self):
"""
"""
#buffer = [ '-----ReadConfigDiag.py-----\n' ]
buffer = [ ]
klist1 = self.data.keys()
klist1.sort()
for tkey in klist1:
#for tkey in self.data.keys():
klist2 = self.data[tkey].keys()
klist2.sort()
for okey in klist2:
#for okey in self.data[tkey].keys():
buffer.append('%s %s %s %s {\n' % (OBJECT, okey, TYPE, tkey))
klist3 = self.data[tkey][okey].keys()
klist3.sort()
for ikey in klist3:
buffer.append('\t%s : %s\n' % (ikey, self.data[tkey][okey][ikey]))
buffer.append('}\n')
return ''.join(buffer)
def readConfig (self, fname):
"""
"""
try:
self.fd = open(fname, 'r')
line = self.fd.readline()
while line:
if line.startswith(OBJECT):
self.readObject(line)
else:
if line.strip():
self.data[OTHER].append(line)
# next line
line = self.fd.readline()
self.fd.close()
except:
raise
def readObject (self, line):
"""read an OBJECT description of format like:
OBJECT th00 TYPE niagara2 {
...
}
"""
tokens = line.split()
if (not tokens[0] == OBJECT) or (not tokens[2] == TYPE):
raise RuntimeError, 'ERROR: wrong format %s' % (line)
else:
key = tokens[1]
type = tokens[3]
if not self.data.has_key(type):
self.data[type] = { }
self.count[type] = 0
if self.data[type].has_key(key):
raise RuntimeError, 'ERROR: %s already defined in %s' % (key, type)
else:
self.count[type] += 1
self.data[type][key] = { }
line = self.fd.readline()
while line.strip() != '}':
if line.strip():
i = line.find(':')
if i > -1:
kword = line[:i].strip()
value = line[i+1:].strip()
self.data[type][key][kword] = value
else:
# a continue data from previous line
self.data[type][key][kword] += ' ' + line.strip()
line = self.fd.readline()
# when done with the object, create a shortcut
self.object[key] = self.data[type][key]
def getCount (self, type):
"""
"""
if self.count.has_key(type):
return self.count[type]
else:
return 0
def getObjTypes (self):
"""return a list of object types available in the configuration
"""
return self.data.keys()
def getObjIds (self, objType, silent=0):
"""return a list of object ids of the specified type
"""
try:
return self.data[objType].keys()
except Exception, ex:
if not silent:
sys.stderr.write('WARNING: ReadConfigDiag: wrong keyword (%s), ex=%s\n' % (objType, ex))
return []
def getObjKeys (self, objType, objId, silent=0):
"""return a list of data keywords of the specified object type+id
"""
try:
return self.data[objType][objId].keys()
except Exception, ex:
if not silent:
sys.stderr.write('WARNING: ReadConfigDiag: wrong keyword(s) (%s,%s), ex=%s\n' % (objType, objId, ex))
return []
def getObjData (self, objType, objId, key, silent=0):
"""return the data field of the specified object type+id+keyword
"""
try:
data = self.data[objType][objId][key]
if data.startswith('[') and data.endswith(']'):
data = self.convertList(data[1:-1])
return data
except Exception, ex:
if not silent:
sys.stderr.write('WARNING: ReadConfigDiag: wrong keyword(s) (%s,%s,%s), ex=%s\n' % (objType, objId, key, ex))
return None
def getData (self, objId, key, silent=0):
"""return the data field of the specified object id+keyword
"""
try:
data = self.object[objId][key]
if data.startswith('[') and data.endswith(']'):
data = self.convertList(data[1:-1])
return data
except Exception, ex:
if not silent:
sys.stderr.write('WARNING: ReadConfigDiag: wrong keyword(s) (%s,%s), ex=%s\n' % (objId, key, ex))
return None
def setDataLine (self, line):
"""
@conf.mom0.setvar= "THREAD_BASED_STAT=1"
@conf.swvmem0.good_trap = get_addr('\.TRAPS\.T0_GoodTrap_0x100')
=> eval'ed value
@conf.mom0.start_cycle= 1
"""
AT_CONF = '@conf.'
SETVAR = 'setvar'
if line.startswith(AT_CONF):
#sys.stderr.write('DBX: ReadConfigDiag: @conf: %s\n' % (line))
append = 0
line = line[len(AT_CONF):]
i = line.find('.')
objId = line[:i]
j = line.find('=+', i)
if j > -1:
append = 1
else:
j = line.find('=', i)
key = line[i+1:j].strip()
if key == SETVAR:
# "key=value"
if append == 0:
expr = line[j+1:].strip()
else:
expr = line[j+2:].strip()
# strip "
expr = expr[1:-1]
k = expr.find('=')
key = expr[:k].strip()
value = expr[k+1:].strip()
else:
if append == 0:
value = line[j+1:].strip()
else:
value = line[j+2:].strip()
self.setData(objId, key, value, append=append)
else:
sys.stderr.write('WARNING: ReadConfigDiag: wrong %s syntax <%s>\n' % (AT_CONF, line))
def setData (self, objId, key, value, append=0):
"""
"""
#sys.stderr.write('DBX: ReadConfigDiag: objId=%s, key=%s, value=%s, append=%s\n' % (objId, key, value, append))
if not self.object.has_key(objId):
# OBJECT config0 TYPE sys_config {
# # a default system config to store basic system config info
# }
if not self.data.has_key(TYPE_SYS_CONFIG):
self.data[TYPE_SYS_CONFIG] = { }
self.count[TYPE_SYS_CONFIG] = 0
self.count[TYPE_SYS_CONFIG] += 1
self.data[TYPE_SYS_CONFIG][objId] = { }
self.object[objId] = self.data[TYPE_SYS_CONFIG][objId]
try:
if self.object[objId].has_key(key) and append == 0:
sys.stderr.write('WARNING: ReadConfigDiag: overwrite (%s,%s)=%s, new value=%s\n' % (objId, key, self.object[objId][key], value))
if append == 0:
self.object[objId][key] = value
else:
if not self.object[objId].has_key(key):
self.object[objId][key] = [ ]
self.object[objId][key].append(value)
except Exception, ex:
sys.stderr.write('WARNING: ReadConfigDiag: wrong keyword(s) (%s,%s), ex=%s\n' % (objId, key, ex))
def convertList(self, data):
"""convert string (of list syntax) to real list
"""
#sys.stderr.write('#DBX: data=%s\n' % (data)) #DBX
tokens = data.split(',')
datalist = []
for token in tokens:
token = token.strip().strip("'")
#sys.stderr.write('#DBX: token=%s\n' % (token)) #DBX
datalist.append(token)
#sys.stderr.write('#DBX: datalist=%s\n' % (datalist)) #DBX
return datalist
"""self-testing
"""
if __name__ == "__main__":
"""
"""
# unit test here
import sys
reader = ReadConfigDiag()
reader.readConfig(sys.argv[1])
print reader
| gpl-2.0 | -2,575,197,130,050,867,000 | 26.449827 | 130 | 0.607715 | false |
Mbrownshoes/ckanext-bcgov | ckanext/bcgov/scripts/save_orgs.py | 6 | 1656 | # Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
import json
import urllib2
import urllib
import pprint
from base import (site_url, api_key)
org_filename = './data/orgs_list.json'
data_string = json.dumps({'all_fields' : True})
org_list = []
try :
request = urllib2.Request(site_url + '/api/3/action/organization_list')
request.add_header('Authorization', api_key)
response = urllib2.urlopen(request, data_string)
assert response.code == 200
response_dict = json.loads(response.read())
assert response_dict['success'] is True
org_list = response_dict['result']
# pprint.pprint(user_list)
except Exception, e:
pass
#Create a dictionary of org_name : org_id
#We need this dictionary to get the id of each org when creating organizations
orgs_dict = {}
for org in org_list :
members = []
data_dict = {'id' : org['id'], 'object_type' : 'user'}
data_string = urllib.quote(json.dumps(data_dict))
try :
request = urllib2.Request(site_url + '/api/3/action/member_list')
request.add_header('Authorization', api_key)
response = urllib2.urlopen(request, data_string)
assert response.code == 200
response_dict = json.loads(response.read())
assert response_dict['success'] is True
members = response_dict['result']
# pprint.pprint(user_list)
except Exception, e:
pass
org_dict = {'id' : org['id'], 'members' : members}
orgs_dict[org['name']] = org_dict
with open(org_filename, 'w') as org_file :
org_file.write(json.dumps(orgs_dict))
| agpl-3.0 | -2,526,254,688,868,610,600 | 27.067797 | 78 | 0.657609 | false |
surbas/dtwp-sfwporn | desktop_env.py | 1 | 3193 | from __future__ import division
import logging
import sys
import ctypes
import _winreg as winreg
logger = logging.getLogger(__name__)
class DesktopEnvironment(object):
def __init__(self):
self._ar = None
@staticmethod
def determin_desktop_env():
if sys.platform == 'win32':
return 'win32'
else:
return 'unknown'
@staticmethod
def get_current_desktop_env():
dt_env = DesktopEnvironment.determin_desktop_env()
if dt_env == 'win32':
return WindowsDesktopEnviroment()
def get_desktop_size(self):
raise NotImplementedError('Please get a supported DesktopEnviroment class by calling get_current_desktop_env()')
def get_desktop_aspect_ratio(self):
raise NotImplementedError('Please get a supported DesktopEnviroment class by calling get_current_desktop_env()')
def set_wallpaper(self, file_path, style):
#going to want to translate styles to common name.
return self._set_wallpaper(file_path, style)
class WindowsDesktopEnviroment(DesktopEnvironment):
#Consts
SPI_SETDESKWALLPAPER = 20
SPIF_UPDATEINIFILE = 1
SPIF_SENDCHANGE = 2
SM_CXSCREEN = 0
SM_CYSCREEN = 1
def __init__(self):
return super(WindowsDesktopEnviroment, self).__init__()
def get_desktop_size(self):
return ctypes.windll.user32.GetSystemMetrics(self.SM_CXSCREEN), ctypes.windll.user32.GetSystemMetrics(self.SM_CYSCREEN)
def get_desktop_aspect_ratio(self):
if self._ar is None:
size = self.get_desktop_size()
self._ar = size[0]/size[1]
return self._ar
def _set_wallpaper(self, file_path, style):
"""Modeled on http://code.msdn.microsoft.com/windowsdesktop/CSSetDesktopWallpaper-2107409c/sourcecode?fileId=21700&pathId=734742078"""
if style == 'center':
tw = '0'
wps = '0'
elif style == 'tile':
tw = '1'
wps = '0'
elif style == 'stretch':
tw = '0'
wps = '2'
elif style == 'fill':
tw = '0'
wps = '6'
elif style == 'fit':
tw = '0'
wps = '10'
else:
raise ArgumentError('{} is not supported!'.format(style))
k = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Control Panel\Desktop', 0, winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(k, 'TileWallpaper', 0, winreg.REG_SZ, tw)
winreg.SetValueEx(k, 'WallpaperStyle', 0, winreg.REG_SZ, wps)
winreg.CloseKey(k)
#see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724947%28v=vs.85%29.aspx
rtn = ctypes.windll.user32.SystemParametersInfoA(self.SPI_SETDESKWALLPAPER, 0, file_path,
self.SPIF_UPDATEINIFILE + self.SPIF_SENDCHANGE)
if not rtn:
logger.debug("GetLastError: %s", ctypes.GetLastError())
raise ctypes.WinError()
logger.debug("rtn: %s", rtn) | bsd-3-clause | -1,233,598,089,527,114,200 | 31.927835 | 142 | 0.574695 | false |
andrewyoung1991/scons | test/scons-time/time/no-result.py | 5 | 2270 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the time subcommand's --which option doesn't fail, and prints
an appropriate error message, if a log file doesn't have its specific
requested results.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
header = """\
set key bottom left
plot '-' title "Startup" with lines lt 1
# Startup
"""
footer = """\
e
"""
line_fmt = "%s 11.123456\n"
lines = []
for i in range(9):
logfile_name = 'foo-%s-0.log' % i
if i == 5:
test.write(test.workpath(logfile_name), "NO RESULTS HERE!\n")
else:
test.fake_logfile(logfile_name)
lines.append(line_fmt % i)
expect = [header] + lines + [footer]
stderr = "file 'foo-5-0.log' has no results!\n"
test.run(arguments = 'time --fmt gnuplot --which total foo*.log',
stdout = ''.join(expect),
stderr = stderr)
expect = [header] + [footer]
test.run(arguments = 'time --fmt gnuplot foo-5-0.log',
stdout = ''.join(expect),
stderr = stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -5,335,535,453,959,826,000 | 27.024691 | 73 | 0.696035 | false |
analyst-collective/dbt | test/integration/007_graph_selection_tests/test_graph_selection.py | 1 | 15454 | from test.integration.base import DBTIntegrationTest, use_profile
import yaml
import json
import os
class TestGraphSelection(DBTIntegrationTest):
@property
def schema(self):
return "graph_selection_tests_007"
@property
def models(self):
return "models"
@property
def selectors_config(self):
return yaml.safe_load('''
selectors:
- name: bi_selector
description: This is a BI selector
definition:
method: tag
value: bi
''')
def assert_correct_schemas(self):
with self.get_connection():
exists = self.adapter.check_schema_exists(
self.default_database,
self.unique_schema()
)
self.assertTrue(exists)
schema = self.unique_schema()+'_and_then'
exists = self.adapter.check_schema_exists(
self.default_database,
schema
)
self.assertFalse(exists)
@use_profile('postgres')
def test__postgres__specific_model(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'users'])
self.assertEqual(len(results), 1)
self.assertTablesEqual("seed", "users")
created_models = self.get_models_in_schema()
self.assertFalse('users_rollup' in created_models)
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__tags(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--selector', 'bi_selector'])
self.assertEqual(len(results), 2)
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assertTrue('users' in created_models)
self.assertTrue('users_rollup' in created_models)
self.assert_correct_schemas()
self.assertTrue(os.path.exists('./target/manifest.json'))
with open('./target/manifest.json') as fp:
manifest = json.load(fp)
self.assertTrue('selectors' in manifest)
@use_profile('postgres')
def test__postgres__tags_and_children(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'tag:base+'])
self.assertEqual(len(results), 4)
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assertIn('emails_alt', created_models)
self.assertTrue('users_rollup' in created_models)
self.assertTrue('users' in created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__tags_and_children_limited(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'tag:base+2'])
self.assertEqual(len(results), 3)
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assertIn('emails_alt', created_models)
self.assertIn('users_rollup', created_models)
self.assertIn('users', created_models)
self.assertNotIn('users_rollup_dependency', created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'users'])
self.assertEqual(len(results), 1)
self.assertTablesEqual("SEED", "USERS")
created_models = self.get_models_in_schema()
self.assertFalse('USERS_ROLLUP' in created_models)
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__specific_model_and_children(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'users+'])
self.assertEqual(len(results), 4)
self.assertTablesEqual("seed", "users")
self.assertTablesEqual("summary_expected", "users_rollup")
created_models = self.get_models_in_schema()
self.assertIn('emails_alt', created_models)
self.assertNotIn('base_users', created_models)
self.assertNotIn('emails', created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_and_children(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'users+'])
self.assertEqual(len(results), 4)
self.assertManyTablesEqual(
["SEED", "USERS"],
["SUMMARY_EXPECTED", "USERS_ROLLUP"]
)
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__specific_model_and_children_limited(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', 'users+1'])
self.assertEqual(len(results), 3)
self.assertTablesEqual("seed", "users")
self.assertTablesEqual("summary_expected", "users_rollup")
created_models = self.get_models_in_schema()
self.assertIn('emails_alt', created_models)
self.assertNotIn('base_users', created_models)
self.assertNotIn('emails', created_models)
self.assertNotIn('users_rollup_dependency', created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__specific_model_and_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '+users_rollup'])
self.assertEqual(len(results), 2)
self.assertTablesEqual("seed", "users")
self.assertTablesEqual("summary_expected", "users_rollup")
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_and_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '+users_rollup'])
self.assertEqual(len(results), 2)
self.assertManyTablesEqual(
["SEED", "USERS"],
["SUMMARY_EXPECTED", "USERS_ROLLUP"]
)
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__specific_model_and_parents_limited(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '1+users_rollup'])
self.assertEqual(len(results), 2)
self.assertTablesEqual("seed", "users")
self.assertTablesEqual("summary_expected", "users_rollup")
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__specific_model_with_exclusion(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(
['run', '--models', '+users_rollup', '--exclude', 'models/users_rollup.sql']
)
self.assertEqual(len(results), 1)
self.assertTablesEqual("seed", "users")
created_models = self.get_models_in_schema()
self.assertFalse('base_users' in created_models)
self.assertFalse('users_rollup' in created_models)
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_with_exclusion(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(
['run', '--models', '+users_rollup', '--exclude', 'users_rollup']
)
self.assertEqual(len(results), 1)
self.assertManyTablesEqual(["SEED", "USERS"])
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('USERS_ROLLUP' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__locally_qualified_name(self):
results = self.run_dbt(['run', '--models', 'test.subdir'])
self.assertEqual(len(results), 2)
created_models = self.get_models_in_schema()
self.assertNotIn('users_rollup', created_models)
self.assertNotIn('base_users', created_models)
self.assertNotIn('emails', created_models)
self.assertIn('subdir', created_models)
self.assertIn('nested_users', created_models)
self.assert_correct_schemas()
results = self.run_dbt(['run', '--models', 'models/test/subdir*'])
self.assertEqual(len(results), 2)
created_models = self.get_models_in_schema()
self.assertNotIn('users_rollup', created_models)
self.assertNotIn('base_users', created_models)
self.assertNotIn('emails', created_models)
self.assertIn('subdir', created_models)
self.assertIn('nested_users', created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__childrens_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '@base_users'])
self.assertEqual(len(results), 4)
created_models = self.get_models_in_schema()
self.assertIn('users_rollup', created_models)
self.assertIn('users', created_models)
self.assertIn('emails_alt', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
results = self.run_dbt(
['test', '--models', 'test_name:not_null'],
)
self.assertEqual(len(results), 1)
assert results[0].node.name == 'not_null_emails_email'
@use_profile('postgres')
def test__postgres__more_childrens_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '@users'])
# users, emails_alt, users_rollup, users_rollup_dependency, but not base_users (ephemeral)
self.assertEqual(len(results), 4)
created_models = self.get_models_in_schema()
self.assertIn('users_rollup', created_models)
self.assertIn('users', created_models)
self.assertIn('emails_alt', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
results = self.run_dbt(
['test', '--models', 'test_name:unique'],
)
self.assertEqual(len(results), 2)
assert sorted([r.node.name for r in results]) == ['unique_users_id', 'unique_users_rollup_gender']
@use_profile('snowflake')
def test__snowflake__skip_intermediate(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '@models/users.sql'])
# base_users, emails, users_rollup, users_rollup_dependency
self.assertEqual(len(results), 4)
# now re-run, skipping users_rollup
results = self.run_dbt(['run', '--models', '@users', '--exclude', 'users_rollup'])
self.assertEqual(len(results), 3)
# make sure that users_rollup_dependency and users don't interleave
users = [r for r in results if r.node.name == 'users'][0]
dep = [r for r in results if r.node.name == 'users_rollup_dependency'][0]
user_last_end = users.timing[1].completed_at
dep_first_start = dep.timing[0].started_at
self.assertTrue(
user_last_end <= dep_first_start,
'dependency started before its transitive parent ({} > {})'.format(user_last_end, dep_first_start)
)
@use_profile('postgres')
def test__postgres__concat(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '@emails_alt', 'users_rollup'])
# users, emails_alt, users_rollup
self.assertEqual(len(results), 3)
created_models = self.get_models_in_schema()
self.assertIn('users_rollup', created_models)
self.assertIn('users', created_models)
self.assertIn('emails_alt', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
@use_profile('postgres')
def test__postgres__concat_exclude(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--models', '@emails_alt', 'users_rollup', '--exclude', 'emails_alt'])
# users, users_rollup
self.assertEqual(len(results), 2)
created_models = self.get_models_in_schema()
self.assertIn('users', created_models)
self.assertIn('users_rollup', created_models)
self.assertNotIn('emails_alt', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
@use_profile('postgres')
def test__postgres__concat_exclude_concat(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(
['run', '--models', '@emails_alt', 'users_rollup', '--exclude', 'emails_alt', 'users_rollup']
)
# users
self.assertEqual(len(results), 1)
created_models = self.get_models_in_schema()
self.assertIn('users', created_models)
self.assertNotIn('emails_alt', created_models)
self.assertNotIn('users_rollup', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
results = self.run_dbt(
['test', '--models', '@emails_alt', 'users_rollup', '--exclude', 'emails_alt', 'users_rollup']
)
self.assertEqual(len(results), 1)
assert results[0].node.name == 'unique_users_id'
@use_profile('postgres')
def test__postgres__exposure_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['ls', '--select', '+exposure:seed_ml_exposure'])
assert len(results) == 2
assert sorted(results) == ['exposure:test.seed_ml_exposure', 'source:test.raw.seed']
results = self.run_dbt(['ls', '--select', '1+exposure:user_exposure'])
assert len(results) == 3
assert sorted(results) == ['exposure:test.user_exposure', 'test.users', 'test.users_rollup']
self.run_dbt(['run', '-m', '+exposure:user_exposure'])
# users, users_rollup
assert len(results) == 3
created_models = self.get_models_in_schema()
self.assertIn('users_rollup', created_models)
self.assertIn('users', created_models)
self.assertNotIn('emails_alt', created_models)
self.assertNotIn('subdir', created_models)
self.assertNotIn('nested_users', created_models)
| apache-2.0 | 5,779,511,921,140,154,000 | 37.73183 | 110 | 0.616992 | false |
tareqalayan/ansible | lib/ansible/modules/storage/purestorage/purefb_fs.py | 38 | 9813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: purefb_fs
version_added: "2.6"
short_description: Manage filesystemon Pure Storage FlashBlade`
description:
- This module manages filesystems on Pure Storage FlashBlade.
author: Simon Dodsley (@sdodsley)
options:
name:
description:
- Filesystem Name.
required: true
state:
description:
- Create, delete or modifies a filesystem.
required: false
default: present
choices: [ "present", "absent" ]
eradicate:
description:
- Define whether to eradicate the filesystem on delete or leave in trash.
required: false
type: bool
default: false
size:
description:
- Volume size in M, G, T or P units. See examples.
required: false
default: 32G
nfs:
description:
- Define whether to NFS protocol is enabled for the filesystem.
required: false
type: bool
default: true
nfs_rules:
description:
- Define the NFS rules in operation.
required: false
default: '*(rw,no_root_squash)'
smb:
description:
- Define whether to SMB protocol is enabled for the filesystem.
required: false
type: bool
default: false
http:
description:
- Define whether to HTTP/HTTPS protocol is enabled for the filesystem.
required: false
type: bool
default: false
snapshot:
description:
- Define whether a snapshot directory is enabled for the filesystem.
required: false
type: bool
default: false
fastremove:
description:
- Define whether the fast remove directory is enabled for the filesystem.
required: false
type: bool
default: false
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = '''
- name: Create new filesystem named foo
purefb_fs:
name: foo
size: 1T
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Delete filesystem named foo
purefb_fs:
name: foo
state: absent
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Recover filesystem named foo
purefb_fs:
name: foo
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Eradicate filesystem named foo
purefb_fs:
name: foo
state: absent
eradicate: true
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Modify attributes of an existing filesystem named foo
purefb_fs:
name: foo
size: 2T
nfs : true
nfs_rules: '*(ro)'
snapshot: true
fastremove: true
smb: true
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
RETURN = '''
'''
HAS_PURITY_FB = True
try:
from purity_fb import FileSystem, ProtocolRule, NfsRule
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule, human_to_bytes
from ansible.module_utils.pure import get_blade, purefb_argument_spec
def get_fs(module, blade):
"""Return Filesystem or None"""
fs = []
fs.append(module.params['name'])
try:
res = blade.file_systems.list_file_systems(names=fs)
return res.items[0]
except:
return None
def create_fs(module, blade):
"""Create Filesystem"""
if not module.params['size']:
module.params['size'] = '32G'
size = human_to_bytes(module.params['size'])
if not module.check_mode:
try:
fs_obj = FileSystem(name=module.params['name'],
provisioned=size,
fast_remove_directory_enabled=module.params['fastremove'],
snapshot_directory_enabled=module.params['snapshot'],
nfs=NfsRule(enabled=module.params['nfs'], rules=module.params['nfs_rules']),
smb=ProtocolRule(enabled=module.params['smb']),
http=ProtocolRule(enabled=module.params['http'])
)
blade.file_systems.create_file_systems(fs_obj)
changed = True
except:
changed = False
module.exit_json(changed=changed)
def modify_fs(module, blade):
"""Modify Filesystem"""
changed = False
attr = {}
if not module.check_mode:
fs = get_fs(module, blade)
if fs.destroyed:
attr['destroyed'] = False
changed = True
if module.params['size']:
if human_to_bytes(module.params['size']) > fs.provisioned:
attr['provisioned'] = human_to_bytes(module.params['size'])
changed = True
if module.params['nfs'] and not fs.nfs.enabled:
attr['nfs'] = NfsRule(enabled=module.params['nfs'])
changed = True
if not module.params['nfs'] and fs.nfs.enabled:
attr['nfs'] = NfsRule(enabled=module.params['nfs'])
changed = True
if module.params['nfs'] and fs.nfs.enabled:
if fs.nfs.rules != module.params['nfs_rules']:
attr['nfs'] = NfsRule(rules=module.params['nfs_rules'])
changed = True
if module.params['smb'] and not fs.smb.enabled:
attr['smb'] = ProtocolRule(enabled=module.params['smb'])
changed = True
if not module.params['smb'] and fs.smb.enabled:
attr['smb'] = ProtocolRule(enabled=module.params['smb'])
changed = True
if module.params['http'] and not fs.http.enabled:
attr['http'] = ProtocolRule(enabled=module.params['http'])
changed = True
if not module.params['http'] and fs.http.enabled:
attr['http'] = ProtocolRule(enabled=module.params['http'])
changed = True
if module.params['snapshot'] and not fs.snapshot_directory_enabled:
attr['snapshot_directory_enabled'] = module.params['snapshot']
changed = True
if not module.params['snapshot'] and fs.snapshot_directory_enabled:
attr['snapshot_directory_enabled'] = module.params['snapshot']
changed = True
if module.params['fastremove'] and not fs.fast_remove_directory_enabled:
attr['fast_remove_directory_enabled'] = module.params['fastremove']
changed = True
if not module.params['fastremove'] and fs.fast_remove_directory_enabled:
attr['fast_remove_directory_enabled'] = module.params['fastremove']
changed = True
if changed:
n_attr = FileSystem(**attr)
try:
blade.file_systems.update_file_systems(name=module.params['name'], attributes=n_attr)
except:
changed = False
module.exit_json(changed=changed)
def delete_fs(module, blade):
""" Delete Filesystem"""
if not module.check_mode:
try:
blade.file_systems.update_file_systems(name=module.params['name'],
attributes=FileSystem(nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True)
)
changed = True
if module.params['eradicate']:
try:
blade.file_systems.delete_file_systems(module.params['name'])
changed = True
except:
changed = False
except:
changed = False
module.exit_json(changed=changed)
def eradicate_fs(module, blade):
""" Eradicate Filesystem"""
if not module.check_mode:
try:
blade.file_systems.delete_file_systems(module.params['name'])
changed = True
except:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
eradicate=dict(default='false', type='bool'),
nfs=dict(default='true', type='bool'),
nfs_rules=dict(default='*(rw,no_root_squash)'),
smb=dict(default='false', type='bool'),
http=dict(default='false', type='bool'),
snapshot=dict(default='false', type='bool'),
fastremove=dict(default='false', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
size=dict()
)
)
module = AnsibleModule(argument_spec,
supports_check_mode=True)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
fs = get_fs(module, blade)
if state == 'present' and not fs:
create_fs(module, blade)
elif state == 'present' and fs:
modify_fs(module, blade)
elif state == 'absent' and fs and not fs.destroyed:
delete_fs(module, blade)
elif state == 'absent' and fs and fs.destroyed:
eradicate_fs(module, blade)
elif state == 'absent' and not fs:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,289,447,438,950,408,400 | 30.86039 | 108 | 0.586671 | false |
thesgc/chembiohub_ws | chembl_business_model/models/experimentalData.py | 1 | 3525 | __author__ = 'mnowotka'
import chembl_core_model.models as core
#-----------------------------------------------------------------------------------------------------------------------
class RelationshipType(core.RelationshipType):
#haystack_index = ['relationship_type', 'relationship_desc']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class ConfidenceScoreLookup(core.ConfidenceScoreLookup):
#haystack_index = ['confidence_score', 'description', 'target_mapping']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class CurationLookup(core.CurationLookup):
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class AssayType(core.AssayType):
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class Assays(core.Assays):
haystack_index = ['description']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class DataValidityLookup(core.DataValidityLookup):
#haystack_index = ['activity_type', 'relation']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class ParameterType(core.ParameterType):
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class AssayParameters(core.AssayParameters):
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class Activities(core.Activities):
# haystack_index = ['activity_type', 'relation', 'published_value', 'published_units', 'standard_value',
# 'standard_units', 'standard_flag', 'standard_type', 'activity_comment',
# 'published_activity_type', 'manual_curation_flag', 'data_validity_comment', 'potential_duplicate']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#-----------------------------------------------------------------------------------------------------------------------
class ActivityStdsLookup(core.ActivityStdsLookup):
#haystack_index = ['standard_type', 'definition', 'standard_units', 'normal_range_min', 'normal_range_max']
api_exclude = []
class Meta:
proxy = True
app_label = 'chembl_business_model'
#----------------------------------------------------------------------------------------------------------------------- | gpl-3.0 | 3,141,269,649,631,003,000 | 30.20354 | 120 | 0.396879 | false |
agostodev/substrate | app/lib/substrate/agar/config.py | 5 | 4572 | """
The ``agar.config`` module contains a class to help work with the `google.appengine.api.lib_config`_ configuration library.
"""
import threading
from google.appengine.api import lib_config
class Config(object):
"""
Configurable constants base class for use with the excellent `google.appengine.api.lib_config`_
configuration library.
To use this class, create a subclass that redefines :py:attr:`~agar.config.Config._prefix` to the appengine_config prefix you'd like the
configs to appear under. Then, simply create class-level properties/functions/default values for each constant.
When instantiating an instance of this class, you can override the default values for that instance by passing
in new defaults via the constructor. Of course, if there is an entry in ``appengine_config.py`` for your constant, that
value will supersede any defined in the class or passed in via the constructor.
Example subclass::
class SampleConfig(Config):
_prefix = 'test'
STRING_CONFIG = 'defaultstring'
Example usage::
>>> config = SampleConfig.get_config()
>>> custom_config = SampleConfig.get_config(STRING_CONFIG='customstring')
Assuming there is no override for ``test_STRING_CONFIG`` in ``appengine_config.py``::
>>> config.STRING_CONFIG == 'defaultstring'
True
>>> custom_config.STRING_CONFIG == 'customstring'
True
Assuming ``appengine_config.py`` contains the following line::
test_STRING_CONFIG = 'settingstring'
Then::
>>> config.STRING_CONFIG == custom_config.STRING_CONFIG == 'settingstring'
True
"""
_config = None
_config_lock = threading.RLock()
#: The appengine_config prefix that the configs should appear under. Override in subclasses. The default is ``agar``.
_prefix = 'agar'
def __init__(self, **kwargs):
self.defaults = {}
for setting in self.__class__.__dict__.keys():
if not setting.startswith('_'):
self.defaults[setting] = self.__class__.__dict__[setting]
for key in kwargs.keys():
if key in self.defaults.keys():
self.defaults[key] = kwargs[key]
else:
raise AttributeError('Invalid config key: %s' % key)
def __iter__(self):
c = {}
config = self.get_config()
for key in config._defaults:
c[key] = config.__getattr__(key)
return c
@classmethod
def get_config(cls, _cache=False, **kwargs):
"""
Registers and returns the `google.appengine.api.lib_config`_ ``ConfigHandle`` for the class. Keyword arguments
will override default values defined in the :py:class:`~agar.config.Config` subclass (but, of course,
will still defer to values in the ``appengine_config.py`` file).
The ``ConfigHandle`` is cached on the class level after the first call to this method.
:param _cache: If ``True``, get from and (if necessary) set the class-level cached config. Note that if you are
passing in ``kwargs`` and the config comes out of the cache, your override values may not be applied
(Default: ``False``).
:param kwargs: Defaults to use for the config instance. Values in ``appengine_config.py`` will still override
any values you specify.
:return: The `google.appengine.api.lib_config`_ ``ConfigHandle`` for the class.
"""
if _cache:
with cls._config_lock:
if not cls._config:
cls._config = lib_config.register(cls._prefix, cls(**kwargs).defaults)
else:
return lib_config.register(cls._prefix, cls(**kwargs).defaults)
return cls._config
@classmethod
def get_config_as_dict(cls, **kwargs):
"""
Registers the `google.appengine.api.lib_config`_ ``ConfigHandle`` and returns its settings as a ``dict``.
Keyword arguments will override default values defined in the :py:class:`~agar.config.Config` subclass
(but, of course, will still defer to values in the ``appengine_config.py`` file).
:param kwargs: Defaults to use for the config instance. Values in ``appengine_config.py`` will still override
any values you specify.
:return: A ``dict`` of the configurations.
"""
c = {}
config = cls.get_config(**kwargs)
for key in config._defaults:
c[key] = config.__getattr__(key)
return c
| mit | -8,496,958,380,895,089,000 | 39.821429 | 140 | 0.632108 | false |
DanielSlater/PyGamePlayer | tests/test_pygame_player.py | 2 | 3064 | import time
import pygame
from unittest import TestCase
from pygame_player import PyGamePlayer
class DummyPyGamePlayer(PyGamePlayer):
def __init__(self, force_game_fps=10, run_real_time=False):
super(DummyPyGamePlayer, self).__init__(force_game_fps=force_game_fps, run_real_time=run_real_time)
def get_keys_pressed(self, screen_array, feedback, terminal):
pass
def get_feedback(self):
return 0.0, False
class TestPyGamePlayer(TestCase):
DISPLAY_X = 1
DISPLAY_Y = 1
def setUp(self):
pygame.init()
pygame.display.set_mode((self.DISPLAY_X, self.DISPLAY_Y), 0, 32)
def tearDown(self):
pygame.quit()
def test_restores_pygame_methods_after_exit(self):
pygame_flip, pygame_update, pygame_event = pygame.display.flip, pygame.display.update, pygame.event.get
with PyGamePlayer():
# methods should be replaced
self.assertNotEqual(pygame_flip, pygame.display.flip)
self.assertNotEqual(pygame_update, pygame.display.update)
self.assertNotEqual(pygame_event, pygame.event.get)
# original methods should be restored
self.assertEqual(pygame_flip, pygame.display.flip)
self.assertEqual(pygame_update, pygame.display.update)
self.assertEqual(pygame_event, pygame.event.get)
def test_fixing_frames_per_second(self):
fix_fps_to = 3
with DummyPyGamePlayer(force_game_fps=fix_fps_to):
clock = pygame.time.Clock()
start_time_ms = clock.get_time()
for _ in range(fix_fps_to):
pygame.display.update()
end_time_ms = clock.get_time()
self.assertAlmostEqual(end_time_ms - start_time_ms, 1000.0,
msg='Expected only 1000 milliseconds to have passed on the clock after screen updates')
def test_get_keys_pressed_method_sets_event_get(self):
fixed_key_pressed = 24
class FixedKeysReturned(DummyPyGamePlayer):
def get_keys_pressed(self, screen_array, feedback, terminal):
return [fixed_key_pressed]
with FixedKeysReturned():
pygame.display.update()
key_pressed = pygame.event.get()
self.assertEqual(key_pressed[0].key, fixed_key_pressed)
def test_get_screen_buffer(self):
class TestScreenArray(DummyPyGamePlayer):
def get_keys_pressed(inner_self, screen_array, feedback, terminal):
self.assertEqual(screen_array.shape[0], self.DISPLAY_X)
self.assertEqual(screen_array.shape[1], self.DISPLAY_Y)
with TestScreenArray():
pygame.display.update()
def test_run_real_time(self):
fix_fps_to = 3
with PyGamePlayer(force_game_fps=fix_fps_to, run_real_time=True):
start = time.time()
clock = pygame.time.Clock()
for _ in range(fix_fps_to):
clock.tick(42343)
end = time.time()
self.assertAlmostEqual(end-start, 1.0, delta=0.1)
| mit | -3,096,659,898,672,910,000 | 32.67033 | 118 | 0.629896 | false |
huggingface/pytorch-transformers | tests/test_pipelines_table_question_answering.py | 2 | 11693 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.pipelines import Pipeline, pipeline
from transformers.testing_utils import require_pandas, require_torch, require_torch_scatter, slow
from .test_pipelines_common import CustomInputPipelineCommonMixin
@require_torch_scatter
@require_torch
@require_pandas
class TQAPipelineTests(CustomInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "table-question-answering"
pipeline_running_kwargs = {
"padding": "max_length",
}
small_models = [
"lysandre/tiny-tapas-random-wtq",
"lysandre/tiny-tapas-random-sqa",
]
large_models = ["google/tapas-base-finetuned-wtq"] # Models tested with the @slow decorator
valid_inputs = [
{
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": "how many movies has george clooney played in?",
},
{
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
},
{
"table": {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
"query": [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
},
]
def _test_pipeline(self, table_querier: Pipeline):
output_keys = {"answer", "coordinates", "cells"}
valid_inputs = self.valid_inputs
invalid_inputs = [
{"query": "What does it do with empty context ?", "table": ""},
{"query": "What does it do with empty context ?", "table": None},
]
self.assertIsNotNone(table_querier)
mono_result = table_querier(valid_inputs[0])
self.assertIsInstance(mono_result, dict)
for key in output_keys:
self.assertIn(key, mono_result)
multi_result = table_querier(valid_inputs)
self.assertIsInstance(multi_result, list)
for result in multi_result:
self.assertIsInstance(result, (list, dict))
for result in multi_result:
if isinstance(result, list):
for _result in result:
for key in output_keys:
self.assertIn(key, _result)
else:
for key in output_keys:
self.assertIn(key, result)
for bad_input in invalid_inputs:
self.assertRaises(ValueError, table_querier, bad_input)
self.assertRaises(ValueError, table_querier, invalid_inputs)
def test_aggregation(self):
table_querier = pipeline(
"table-question-answering",
model="lysandre/tiny-tapas-random-wtq",
tokenizer="lysandre/tiny-tapas-random-wtq",
)
self.assertIsInstance(table_querier.model.config.aggregation_labels, dict)
self.assertIsInstance(table_querier.model.config.no_aggregation_label_index, int)
mono_result = table_querier(self.valid_inputs[0])
multi_result = table_querier(self.valid_inputs)
self.assertIn("aggregator", mono_result)
for result in multi_result:
if isinstance(result, list):
for _result in result:
self.assertIn("aggregator", _result)
else:
self.assertIn("aggregator", result)
def test_aggregation_with_sequential(self):
table_querier = pipeline(
"table-question-answering",
model="lysandre/tiny-tapas-random-wtq",
tokenizer="lysandre/tiny-tapas-random-wtq",
)
self.assertIsInstance(table_querier.model.config.aggregation_labels, dict)
self.assertIsInstance(table_querier.model.config.no_aggregation_label_index, int)
with self.assertRaises(ValueError):
table_querier(
{
"table": {},
"query": "how many movies has george clooney played in?",
}
)
with self.assertRaises(ValueError):
table_querier(
{
"query": "how many movies has george clooney played in?",
}
)
with self.assertRaises(ValueError):
table_querier(
{
"table": {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
"query": "",
}
)
with self.assertRaises(ValueError):
table_querier(
{
"table": {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
}
)
def test_empty_errors(self):
table_querier = pipeline(
"table-question-answering",
model="lysandre/tiny-tapas-random-wtq",
tokenizer="lysandre/tiny-tapas-random-wtq",
)
mono_result = table_querier(self.valid_inputs[0], sequential=True)
multi_result = table_querier(self.valid_inputs, sequential=True)
self.assertIn("aggregator", mono_result)
for result in multi_result:
if isinstance(result, list):
for _result in result:
self.assertIn("aggregator", _result)
else:
self.assertIn("aggregator", result)
def test_sequential(self):
table_querier = pipeline(
"table-question-answering",
model="lysandre/tiny-tapas-random-sqa",
tokenizer="lysandre/tiny-tapas-random-sqa",
)
sequential_mono_result_0 = table_querier(self.valid_inputs[0], sequential=True)
sequential_mono_result_1 = table_querier(self.valid_inputs[1], sequential=True)
sequential_multi_result = table_querier(self.valid_inputs, sequential=True)
mono_result_0 = table_querier(self.valid_inputs[0])
mono_result_1 = table_querier(self.valid_inputs[1])
multi_result = table_querier(self.valid_inputs)
# First valid input has a single question, the dict should be equal
self.assertDictEqual(sequential_mono_result_0, mono_result_0)
# Second valid input has several questions, the questions following the first one should not be equal
self.assertNotEqual(sequential_mono_result_1, mono_result_1)
# Assert that we get the same results when passing in several sequences.
for index, (sequential_multi, multi) in enumerate(zip(sequential_multi_result, multi_result)):
if index == 0:
self.assertDictEqual(sequential_multi, multi)
else:
self.assertNotEqual(sequential_multi, multi)
@slow
def test_integration_wtq(self):
table_querier = pipeline("table-question-answering")
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
def test_integration_sqa(self):
table_querier = pipeline(
"table-question-answering",
model="google/tapas-base-finetuned-sqa",
tokenizer="google/tapas-base-finetuned-sqa",
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
| apache-2.0 | 322,898,953,918,020,540 | 40.760714 | 121 | 0.549303 | false |
opencog/opencog | opencog/nlp/anaphora/agents/hobbs.py | 2 | 17858 |
from __future__ import print_function
from pprint import pprint
from opencog.cogserver import MindAgent
from opencog.atomspace import AtomSpace
from opencog.scheme_wrapper import load_scm, scheme_eval_h, scheme_eval, __init__
from opencog.type_constructors import TruthValue
from opencog.cogserver_type_constructors import *
from opencog import logger
import queue
import time
__author__ = 'Hujie Wang'
LOG_LEVEL="fine"
log = logger.create_logger("/tmp/hobbs.log")
log.set_level(LOG_LEVEL)
'''
========================================
Configurations
'''
'''
Number of searching sentences(including the one contains the pronoun)
'''
NUMBER_OF_SEARCHING_SENTENCES = 3
'''
Suppose the decreasing rate is x, then
the ith accepted candidate will have confidence value of
(x^(i-1))(1-x) i starts at 1.
'''
CONFIDENCE_DECREASING_RATE=0.7
'''
Strength for accepted antecedents
'''
STRENGTH_FOR_ACCEPTED_ANTECEDENTS=0.98
'''
Truth Value for antecendents which have been filtered out by filters.
'''
TV_FOR_FILTERED_OUT_ANTECEDENTS=TruthValue(0.02, 0.9)
'''
========================================
'''
class BindLinkExecution():
'''
Executes a (cog-execute! xxx) command and return the results of it
'''
def __init__(self,atomspace,anchorNode, target, command):
'''
Stores necessary information
'''
self.atomspace=atomspace
self.anchorNode=anchorNode
self.target=target
self.command=command
self.response=None
scheme_eval(self.atomspace, "(use-modules (opencog) (opencog exec))")
scheme_eval(self.atomspace, "(use-modules (opencog nlp))")
scheme_eval(self.atomspace, "(use-modules (opencog nlp oc))")
def execution(self):
'''
First binds the "anchorNode" with the "target" if "anchorNode" exists, then executes scheme command "command"
'''
if self.anchorNode != None and self.target != None:
self.tmpLink=self.atomspace.add_link(types.ListLink, [self.anchorNode, self.target], TruthValue(1.0, 100))
else:
self.tmpLink=None
self.response = scheme_eval_h(self.atomspace, self.command)
d=3;
def returnResponse(self):
'''
Returns list of atoms resulted in previous execution of a scheme command
'''
if self.response==None:
return
rv=[]
listOfLinks=self.response.out
for link in listOfLinks:
atom=(link.out)[1]
rv.append(atom)
for link in listOfLinks:
self.atomspace.remove(link)
self.atomspace.remove(self.response)
self.response=None
return rv
def clear(self):
'''
Cleans up the Link between the "anchorNode" and the "target".
'''
if self.tmpLink!=None:
self.atomspace.remove(self.tmpLink)
class HobbsAgent(MindAgent):
'''
Does anaphora resolutions by doing Breadth-First search on the parse tree, rejects any antecedents which are matched by filters
'''
def __init__(self):
self.checked=dict()
self.wordNumber=dict()
self.atomspace = None
self.currentPronoun = None
self.currentPronounNode = None
self.currentTarget = None
self.currentProposal = None
self.pronounNumber = None
self.pronouns = None
self.roots = None
self.confidence = 1.0
self.numOfFilters=7
self.number_of_searching_sentences=3
self.DEBUG = True
log.fine("\n===========================================================\n Starting hobbs agent.....\n=========================================================== ")
def bindLinkExe(self,anchorNode, target, command):
'''
Just combines all the steps of executing a scheme command into a single function.
'''
exe=BindLinkExecution(self.atomspace,anchorNode, target, command)
exe.execution()
rv=exe.returnResponse()
exe.clear()
return rv
def StringToNumber(self,str):
'''
Converts a string to an integer.
'''
# Add 0.1 to avoid float-point rounding error.
return int(float(str) + 0.1)
def getWordNumber(self,node):
'''
Returns the WordSequence number associated with the 'node'
'''
return self.wordNumber[node.name]
def getSentenceNumber(self,node):
'''
Given a ParseNode, returns a SentenceNumber of a SentenceNode associated with it.
'''
rv=self.bindLinkExe(self.currentTarget,node,'(cog-execute! getNumberNode_ParseNode)')
return int(rv[0].name)
def sortNodes(self,list,keyFunc):
'''
Sorts nodes according to their word sequence number and returns the sorted list.
'''
return sorted(list,key=keyFunc)
def getChildren(self,node):
'''
Returns a sorted list of children nodes of current node.
'''
rv=self.bindLinkExe(self.currentTarget,node,'(cog-execute! getChildren)')
return self.sortNodes(rv,self.getWordNumber)
def generateReferenceLink(self,anaphora,antecedent,tv):
'''
Generates a reference Link for a pair of anaphora and antecedent with confidence "confidence".
'''
link = self.atomspace.add_link(types.ReferenceLink, [anaphora, antecedent], tv)
log.fine("Generated a Reference :\n")
log.fine("{0}\n".format(link))
log.fine("===========================================================")
def getConjunction(self,node):
'''
Returning the other part of a conjunction if conjunction exists and anaphor is "Plural"
'''
return self.bindLinkExe(self.currentProposal,node,'(cog-execute! getConjunction)')
def checkConjunctions(self,node):
'''
Checking if conjunction resolution applies to the "node", returning True if it applies, False otherwise.
'''
conjunction=self.getConjunction(node);
if len(conjunction)>0:
conjunction_list=[]
conjunction_list.append(node)
conjunction_list.extend(conjunction)
# We don't want to output this to unit tests
if self.DEBUG and filter!=-1:
print("accepted \n"+str(conjunction_list))
log.fine("accepted \n"+str(conjunction_list))
self.generateReferenceLink(self.currentPronoun,self.atomspace.add_link(types.AndLink, conjunction_list, TruthValue(1.0, 1.0)),TruthValue(STRENGTH_FOR_ACCEPTED_ANTECEDENTS, self.confidence))
self.confidence=self.confidence*CONFIDENCE_DECREASING_RATE
return True
return False
def propose(self,node,filter=-1):
'''
It iterates all filters, reject the antecedent or "node" if it's matched by any filters.
'''
self.currentResolutionLink_pronoun=self.atomspace.add_link(types.ListLink, [self.currentResolutionNode, self.currentPronoun, node], TruthValue(1.0, 100))
rejected = False
filterNumber=-1
self.checkConjunctions(node)
start=1
end=self.numOfFilters+1
'''
For debugging purposes.
'''
if filter!=-1:
start=filter
end=filter+1
for index in range(start,end):
command='(cog-execute! filter-#'+str(index)+')'
rv=self.bindLinkExe(self.currentProposal,node,command)
if len(rv)>0:
'''
Reject it
'''
rejected = True
filterNumber=index
break
if not rejected:
# We don't want to output this to unit tests
if self.DEBUG:
print("accepted "+node.name)
log.fine("accepted "+node.name)
self.generateReferenceLink(self.currentPronoun,node,TruthValue(STRENGTH_FOR_ACCEPTED_ANTECEDENTS, self.confidence))
self.confidence=self.confidence*CONFIDENCE_DECREASING_RATE
else:
self.generateReferenceLink(self.currentPronoun,node,TV_FOR_FILTERED_OUT_ANTECEDENTS)
#if self.DEBUG:
# print("rejected "+node.name+" by filter-#"+str(index))
self.atomspace.remove(self.currentResolutionLink_pronoun)
return not rejected
def Checked(self,node):
'''
Since graph is not necessarily a forest, this agent actually does a Breadth-First search on a general graph for
each pronoun, so we need to avoid cycling around the graph by marking each node as checked if we have visited it once.
'''
if node.name in self.checked:
return True
self.checked[node.name]=True
return False
def bfs(self,node):
'''
Does a Breadth-First search, starts with "node"
'''
'''
rv is used for unit tests
'''
rv=[]
if node==None:
#print("found you bfs")
return
q=queue.Queue()
q.put(node)
while not q.empty():
front=q.get()
rv.append(front)
self.propose(front)
children=self.getChildren(front)
if len(children)>0:
for node in children:
if not self.Checked(node):
q.put(node)
return rv
def getWords(self):
'''
Returns a list of words in the atomspace
'''
rv=self.bindLinkExe(None,None,'(cog-execute! getWords)')
return self.sortNodes(rv,self.getWordNumber)
def getTargets(self,words):
'''
Returns a list of references needed to be resolved.
'''
targets=[]
for word in words:
matched=False
for index in range(1,self.numOfPrePatterns+1):
command='(cog-execute! pre-process-#'+str(index)+')'
rv=self.bindLinkExe(self.currentTarget,word,command)
if len(rv)>0:
matched=True
break
if matched:
targets.append(word)
return targets
def getPronouns(self):
rv=self.bindLinkExe(None,None,'(cog-execute! getPronouns)')
return self.sortNodes(rv,self.getWordNumber)
def getRoots(self):
'''
Return a list of roots(incoming degree of 0)
'''
self.bindLinkExe(None,None,'(cog-execute! connectRootsToParseNodes)')
rv= self.bindLinkExe(None,None,'(cog-execute! getAllParseNodes)')
return self.sortNodes(rv,self.getSentenceNumber)
def getRootOfNode(self,target):
'''
Returns a ParseNode associated with the "target"
'''
rv=self.bindLinkExe(self.currentTarget,target,'(cog-execute! getParseNode)')
return rv[0]
def previousRootExist(self,root):
'''
"previous" means that a root with smaller word sequence number than the word sequence number of current "roots".
'''
return not self.roots[0].name==root.name
def getPrevious(self,root):
'''
Return a previous root.
'''
rootNumber=self.getSentenceNumber(root)
for root in reversed(self.roots):
number=self.getSentenceNumber(root)
if number<rootNumber:
return root
def getAllNumberNodes(self):
'''
Finds word sequence number for each word
'''
rv= self.bindLinkExe(None, None, '(cog-execute! getAllNumberNodes)')
for link in rv:
out=link.out
if out[0].type==types.WordInstanceNode:
self.wordNumber[out[0].name]=self.StringToNumber(out[1].name)
def initilization(self,atomspace):
'''
Initializes necessary variables. Loads rules.
'''
self.atomspace = atomspace
self.PleonasticItNode=atomspace.add_node(types.AnchorNode, 'Pleonastic-it', TruthValue(1.0, 100))
self.currentPronounNode = atomspace.add_node(types.AnchorNode, 'CurrentPronoun', TruthValue(1.0, 100))
self.currentTarget = atomspace.add_node(types.AnchorNode, 'CurrentTarget', TruthValue(1.0, 100))
self.currentProposal = atomspace.add_node(types.AnchorNode, 'CurrentProposal', TruthValue(1.0, 100))
self.resolvedReferences=atomspace.add_node(types.AnchorNode, 'Resolved references', TruthValue(1.0, 100))
self.currentResolutionNode=atomspace.add_node(types.AnchorNode, 'CurrentResolution', TruthValue(1.0, 100))
self.pronounNumber = -1
data=["opencog/nlp/anaphora/rules/getChildren.scm",
"opencog/nlp/anaphora/rules/getNumberNode_WordInstanceNode.scm",
"opencog/nlp/anaphora/rules/getNumberNode_ParseNode.scm",
"opencog/nlp/anaphora/rules/connectRootsToParseNodes.scm",
"opencog/nlp/anaphora/rules/getAllNumberNodes.scm",
"opencog/nlp/anaphora/rules/getAllParseNodes.scm",
"opencog/nlp/anaphora/rules/getConjunction.scm",
"opencog/nlp/anaphora/rules/getParseNode.scm",
"opencog/nlp/anaphora/rules/getWords.scm",
"opencog/nlp/anaphora/rules/isIt.scm",
"opencog/nlp/anaphora/rules/filters/filter-#1.scm",
"opencog/nlp/anaphora/rules/filters/filter-#2.scm",
"opencog/nlp/anaphora/rules/filters/filter-#3.scm",
"opencog/nlp/anaphora/rules/filters/filter-#4.scm",
"opencog/nlp/anaphora/rules/filters/filter-#5.scm",
"opencog/nlp/anaphora/rules/filters/filter-#6.scm",
"opencog/nlp/anaphora/rules/filters/filter-#7.scm",
"opencog/nlp/anaphora/rules/filters/filter-#8.scm",
"opencog/nlp/anaphora/rules/filters/filter-#9.scm",
"opencog/nlp/anaphora/rules/filters/filter-#10.scm",
"opencog/nlp/anaphora/rules/filters/filter-#11.scm",
"opencog/nlp/anaphora/rules/filters/filter-#12.scm",
"opencog/nlp/anaphora/rules/filters/filter-#13.scm",
"opencog/nlp/anaphora/rules/filters/filter-#14.scm",
"opencog/nlp/anaphora/rules/filters/filter-#15.scm",
"opencog/nlp/anaphora/rules/filters/filter-#16.scm",
"opencog/nlp/anaphora/rules/filters/filter-#17.scm",
"opencog/nlp/anaphora/rules/filters/filter-#18.scm",
"opencog/nlp/anaphora/rules/pre-process/pre-process-#1.scm",
"opencog/nlp/anaphora/rules/pre-process/pre-process-#2.scm",
"opencog/nlp/anaphora/rules/pre-process/pre-process-#3.scm",
"opencog/nlp/anaphora/rules/pleonastic-it/pleonastic-it-#1.scm",
"opencog/nlp/anaphora/rules/pleonastic-it/pleonastic-it-#2.scm",
"opencog/nlp/anaphora/rules/pleonastic-it/pleonastic-it-#3.scm",
]
self.numOfFilters=18
self.numOfPrePatterns=3
self.numOfPleonasticItPatterns=3
for item in data:
load_scm(atomspace, item)
self.getAllNumberNodes()
self.pronouns=self.getTargets(self.getWords())
self.roots = self.getRoots()
def addPronounToResolvedList(self,node):
'''
Mark current pronoun as resolved.
'''
self.atomspace.add_link(types.ListLink,[self.resolvedReferences,node],TruthValue(1.0, 100))
def pleonastic_it(self,node):
'''
Check if the node is the word "it".
'''
matched=False
rv=self.bindLinkExe(self.currentTarget,node,'(cog-execute! isIt)')
if len(rv)>0:
for index in range(1,self.numOfPleonasticItPatterns+1):
command='(cog-execute! pleonastic-it-#'+str(index)+')'
rv=self.bindLinkExe(self.currentTarget,node,command)
if len(rv)>0:
matched=True
break
#print("rejected "+node.name+" by filter-#"+str(index))
return matched
def run(self, atomspace):
self.initilization(atomspace)
for pronoun in self.pronouns:
self.checked.clear()
self.pronounNumber=self.getWordNumber(pronoun)
self.confidence=1-CONFIDENCE_DECREASING_RATE
'''
Binds current "pronoun" with "currentPronounNode".
This part is used by pattern matcher.
'''
tmpLink=self.atomspace.add_link(types.ListLink, [self.currentPronounNode, pronoun], TruthValue(1.0, 100))
self.currentPronoun=pronoun
root=self.getRootOfNode(pronoun)
if self.DEBUG:
print("Resolving....")
print(pronoun)
log.fine("Resolving \n{0}".format(pronoun))
'''
Check if it's a pleonastic it.
'''
if self.pleonastic_it(pronoun):
self.generateReferenceLink(pronoun,self.PleonasticItNode,TruthValue(STRENGTH_FOR_ACCEPTED_ANTECEDENTS, self.confidence))
self.confidence=self.confidence*CONFIDENCE_DECREASING_RATE
if self.DEBUG:
print("accepted "+self.PleonasticItNode.name)
log.fine("accepted "+self.PleonasticItNode.name)
sent_counter=1;
while True:
if root==None:
break
self.bfs(root)
if self.previousRootExist(root) and sent_counter<=NUMBER_OF_SEARCHING_SENTENCES:
root=self.getPrevious(root)
sent_counter=sent_counter+1
else:
break
self.atomspace.remove(tmpLink)
self.addPronounToResolvedList(pronoun)
| agpl-3.0 | 2,594,558,292,092,428,000 | 31.827206 | 201 | 0.595643 | false |
Lucas-Armand/genetic-algorithm | dev/4ºSemana/genetic teste.py | 1 | 16555 | # -*- coding: utf-8 -*-
import os
import csv
import random
import numpy as np
import time as Time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from itertools import product, combinations
#m = np.array([[0,1,1,1,1,0,0,0,0,0,0],
# [0,0,0,0,0,.5,.5,0,0,0,0],
# [0,0,0,0,0,.5,.5,0,0,0,0],
# [0,0,0,0,0,0,0,.5,.5,0,0],
# [0,0,0,0,0,0,0,.5,.5,0,0],
# [0,0,0,0,0,0,0,0,0,.5,0],
# [0,0,0,0,0,0,0,0,0,.5,0],
# [0,0,0,0,0,0,0,0,0,0,.5],
# [0,0,0,0,0,0,0,0,0,0,.5],
# [0,0,0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0,0,0,0,0]])
def csv_read(name): #Metodo de leitura, transforma um arquivo CSV em um vetor
CSV=open(name,'r')
dados=CSV.read()
dados=dados.replace(',','.')
dados=dados.replace(';',',')
CSV.close()
CSV=open("temp.csv",'w')
CSV.write(dados)
CSV.close()
CSV=open("temp.csv",'r')
dados=csv.reader(CSV)
v=[]
for i in dados:
I=[]
for j in i:
try:
j = float(j)
except:
pass
I.append(j)
v.append(I)
CSV.close()
os.remove("temp.csv")
return (v)
def printlock(ax,block,clr):
point = block.p
a = block.a
b = block.b
c = block.c
for s, e in combinations(np.array(list(product([0,a],[0,b],[0,c]))), 2):
s=s+point
e=e+point
alfa = round(a, 5)
beta = round(b, 5)
gama = round(c, 5)
delt = round(np.linalg.norm(np.abs(s-e)),5)
if delt in [alfa,beta,gama]:
ax.plot3D(*zip(s,e), color=clr)
def MatrixOfPrecedence(name):
ListOfPrecedence = csv_read(name)
n= len(ListOfPrecedence)
M = np.zeros((n,n))
M.tolist()
for j in ListOfPrecedence:
if ListOfPrecedence.index(j)!=0: #jump the first (title)
try:
elements = j[1].split(' - ')
except:
elements = [j[1]]
elements = map(float,elements)
for i in elements:
M[int(i)][int(j[0])] = 1
for j in range(n):
s = sum(M[:,j])
if s!=0:
M[:,j] = M[:,j]*1./s
return M
def defineGeometry(name):
vect = csv_read(name)
blockNumber ={}
for i in vect:
a = i[1]
b = i[2]
c = i[3]
point = [i[4],i[5],i[6]]
weight = i[7]
btype = i[-1]
block = Block(point,a,b,c,weight,btype)
blockNumber[i[0]] = block
return blockNumber
def Ship3Dplot(fig,blocksList,color):
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
for block in blocksList:
printlock(ax,block,color)
# Create cubic bounding box to simulate equal aspect ratio #
Xb = 0.5*250*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 125
Yb = 0.5*200*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0
Zb = 0.5*200*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 20
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
fig.tight_layout(pad=0.5)
class Genetic:
def __init__(self,teta,Teta,n,N,bNumb,vicinity,m):
self.teta = teta
self.Teta = Teta
self.n = n
self.N = N
self.bNumb = bNumb
self.vicinity = vicinity
self.m = m
def run(self,population = None):
t_run = Time.time()
if population == None:
first = [self.gen_chromosome(self.m) for i in range(self.N)]
time = [self.time(bNumb,vicinity,chromo) for chromo in first]
self.population = [ [t,c] for t,c in zip(time,first)]
else:
self.population = population
best = min(self.population, key = lambda x:x[0])
cont = 0
old_best = 0
while round(best[0],8)> 4202.5:
cont+=1
self.population = self.new_population(self.n,self.teta,self.Teta,self.population,self.m)
best = min(self.population, key = lambda x:x[0])
print cont,best[0]
if cont%20==0:
if cont==40 or best[0] == old_best:
print cont,best[0]
print 'break'
break
else:
old_best = best[0]
if round(best[0],8)< 1000:
print '!!!!!!!!!!!'
global text
text = text+ '\n time(run) = '+str( - t_run + Time.time())
return best
def gen_chromosome(self,m):
t_gen = Time.time()
n = len(m)
itens = [0]
chromo = np.array([])
while itens!=[]:
gene = random.choice(itens)
if gene!=0 :chromo = np.append(chromo,gene)
itens.remove(gene)
for j in range(n):
if m[gene,j]>0:
s = 0
for i in set(np.append(chromo,gene)):
s += m[i][j]
if s==1:
itens.append(j)
global text
text+= '\ntime(gen_chromosome) = '+str( - t_gen + Time.time())
return chromo
def mutation(self,chromosome,m):
t_mut = Time.time()
x = random.choice(chromosome)
n = len(m)
pre = [a for a in range(n) if m[a,x]>0]
pos = [a for a in range(n) if m[x,a]>0]
ind = chromosome.tolist().index(x)
ant = chromosome[:ind]
suc = chromosome[ind+1:]
ant = ant[::-1]
Ant = []
for g in ant:
if g in pre:
break
else:
Ant.append(g)
Suc = []
for g in suc:
if g in pos:
break
else:
Suc.append(g)
possibles_mutations = []
l = len(Ant)
for i in range(l):
y = Ant[i]
pos_y = [a for a in range(n) if m[y,a]>0]
alfa = 'ok'
for j in range(i+1):
z = Ant[j]
if z in pos_y:
alfa = 'stop'
if alfa == 'ok':
possibles_mutations.append(y)
l = len(Suc)
for i in range(l):
y = Suc[i]
pre_y = [a for a in range(n) if m[a,y]>0]
alfa = 'ok'
for j in range(i+1):
z = Suc[j]
if z in pre_y:
alfa = 'stop'
if alfa == 'ok':
possibles_mutations.append(y)
if possibles_mutations!=[]:
w = random.choice(possibles_mutations)
ind_w = chromosome.tolist().index(w)
chromosome[ind] = w
chromosome[ind_w] = x
else:
print 'No possible mutation find!'
global text
text+= '\ntime(mutation) = '+str( - t_mut + Time.time())
return chromosome
def crossover(self,teta,Teta,chromo1,chromo2,m):
t_cro = Time.time()
if random.random()<Teta:
n = len(chromo1)
i = random.choice(range(n))
p1_a = chromo1[:i]
p2_a = chromo2[:i]
p1_b = chromo1[i:]
p2_b = chromo2[i:]
diff_1 = np.extract(-np.in1d(p2_a,p1_a),p2_a)
diff_2 = np.extract(-np.in1d(p1_a,p2_a),p1_a)
p1_a = np.append(p1_a,diff_1)
p2_a = np.append(p2_a,diff_2)
for element in diff_2:
ind = np.where(p2_b == element)
p2_b = np.delete(p2_b,ind)
for element in diff_1 :
ind = np.where(p1_b == element)
p1_b = np.delete(p1_b,ind)
c1 = np.append(p1_a,p2_b)
c2 = np.append(p2_a,p1_b)
else:
c1 = chromo1
c2 = chromo2
if random.random()<teta:
c1 = self.mutation(c1,m)
if random.random()<teta:
c2 = self.mutation(c2,m)
global text
text+= '\ntime(crossover) = '+str( - t_cro + Time.time())
return c1,c2
def time(self,bNumb,vicinity,chromo):
global bTime
bTime = {}
t_time = Time.time()
alfa = 10
beta = 1
built = []
time = 0
for g in chromo:
#welding's time
check = None
block = bNumb[g]
btype = block.t
time_weld = 0
#Oritation of axes:
# block.a = axe "x"
# block.b = axe "y"
# block.c = axe "z"
if btype == 'Trunk deck':
#fixation welding between the deck and the shell (on both sides)
time_weld = beta*2*block.a
for borderer in vicinity[g]:
if borderer in built:
check =True
#welding between two blocks of deck
time_weld += beta*block.b
elif btype == 'Side Shell':
#fixation welding between the side and the bottom (on both sides)
time_weld = beta*block.a
for borderer in vicinity[g]:
if borderer in built and abs(borderer - g)>1: # we don't make weld in
check =True #side blocks in front each
#other
time_weld += beta*block.c
elif btype == 'Bottom':
#don't need fixation welding
for borderer in vicinity[g]:
if borderer in built:
check =True
#welding between two bottom's blocks
if abs(borderer - g) == 1:
#weldin in the axe 'x'
time_weld += beta*block.a
else:
#weldin in the axe 'y'
time_weld += beta*block.b
else:#Cofferdam
#fixation welding between the side and the bottom (on both sides)
time_weld = beta*block.b
for borderer in vicinity[g]:
if borderer in built:
check =True
if check:
time_posi = alfa
time += time_posi+time_weld
else:
time_posi = 10*alfa
time += time_posi+time_weld
built.append(g)
global text
text+= '\ntime(time_fitt) = '+str( - t_time + Time.time())
return time
def tournament(self,population,n):
t_tou = Time.time()
specimens = [random.choice(population) for i in range(n)]
specimens.sort(key = lambda x:x[0])
parent1,parent2 = specimens[0][1],specimens[1][1]
t1,t2 = specimens[0][0],specimens[1][0]
global text
text+= '\ntime(tournament) = '+str( - t_tou + Time.time())
return parent1,parent2,t1,t2
def new_population(self,n,teta,Teta,population,m):
t_new = Time.time()
size = len(population)
new_population= []
while len(new_population) != size:
p1,p2,t_p1,t_p2 = self.tournament(population,n)
ch1,ch2 = self.crossover(teta,Teta,p1,p2,m)
if (ch1 == p1).all():
t1 = t_p1
elif (ch1 == p2).all():
t1 = t_p2
else:
t1 = self.time(bNumb,vicinity,ch1)
if (ch2 == p1).all():
t2 = t_p1
elif (ch2 == p2).all():
t2 = t_p2
else:
t2 = self.time(bNumb,vicinity,ch2)
new_population+=[[t1,ch1],[t2,ch2]]
return new_population
global text
text+= '\ntime(new_population) = '+ str(- t_new + Time.time())
# def check(self,chromosome,m):
#
# n = len(m)
# for x in chromosome:
#
# pre = [a for a in range(n) if m[a,x]>0]
# pos = [a for a in range(n) if m[x,a]>0]
#
# ind = chromosome.tolist().index(x)
#
# ant = chromosome[:ind]
# suc = chromosome[ind+1:]
#
# set_pre = set(pre) - set([0])
# set_pos = set(pos)
#
# set_ant = set(ant)
# set_suc = set(suc)
#
#
#
#
# if set_pre != set_pre&set_ant or set_pos != set_pos&set_suc:
# print
# print
# print chromosome
# print
# print x
# print
# print ant
# print suc
# print
# print pre
# print pos
# print
# print
# return False
# return True
def anim(i,fig,bNumb,order,color):
try:
Ship3Dplot(fig, [bNumb[order[i]]],color)
except:
pass
class Block:
def __init__(self,point,a,b,c,weight,btype):
self.p=point
self.a=a
self.b=b
self.c=c
self.w=weight
self.t=btype
if __name__ == "__main__":
t_exec = Time.time()
global text
text = ''
fig = plt.figure()
bNumb=defineGeometry('GeometriaNavio.csv')
# Define vicinity
#deck
vicinity={1:[2]}
for i in range(2,16):
vicinity[i] = [i-1,i+1]
vicinity[16] = [15]
#side
vicinity[17] = [18,19]
vicinity[18] = [17,20]
for i in range(19,31):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[31] = [29,32]
vicinity[32] = [30,31]
#bott
vicinity[33] = [34,35]
vicinity[34] = [33,36]
for i in range(35,63):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[63] = [61,64]
vicinity[64] = [63,62]
#coff
vicinity[65] = [66]
for i in range(66,70):
vicinity[i] = [i-1,i+1]
vicinity[70] = [69]
#execute
MoP = MatrixOfPrecedence('EstructuralLoP.csv')
G1 = Genetic(0.4,.5,4,200,bNumb,vicinity,MoP)
best_1 = G1.run()
chromosome = best_1[1]
chromosome = [int(i) for i in chromosome.tolist()]
# order = csv_read('Grande Bloco_Meio Navio-PopaProa.csv')
# order = csv_read('Camada_Meio Navio-PopaProa.csv')
# order = csv_read('Piramide_Meio Navio-PopaProa.csv')
## order = csv_read('Grande bloco_Popa-Proa.csv')
# order = csv_read('Camada_Popa-Proa.csv')
# order = csv_read('Piramide_Popa-Proa.csv')
# print order
#
# chromosome = [int(i[0]) for i in order]
# print G1.time(bNumb,vicinity,chromosome)
#
ani = animation.FuncAnimation(fig, anim,fargs=(fig,bNumb,chromosome,'b'), interval=200)
plt.show()
#print 'time(execution) = ', - t_exec + Time.time()
file = open("newfile.txt", "w")
file.write(text)
file.close()
#Beste result until now (0.3,4,600):
#[53.0, 51.0, 35.0, 55.0, 36.0, 49.0, 47.0, 34.0, 57.0, 58.0, 60.0, 59.0, 54.0, 61.0, 52.0, 50.0, 45.0, 46.0, 63.0, 44.0, 37.0, 62.0, 39.0, 33.0, 64.0, 38.0, 40.0, 43.0, 56.0, 42.0, 41.0, 48.0, 21.0, 22.0, 67.0, 23.0, 24.0, 66.0, 65.0, 68.0, 26.0, 20.0, 28.0, 8.0, 25.0, 19.0, 9.0, 7.0, 10.0, 17.0, 27.0, 69.0, 6.0, 30.0, 29.0, 5.0, 70.0, 11.0, 12.0, 4.0, 18.0, 13.0, 14.0, 3.0, 32.0, 31.0, 2.0, 15.0, 16.0, 1.0] | gpl-3.0 | 1,172,673,510,563,227,100 | 27.300855 | 412 | 0.428028 | false |
queria/my-tempest | tempest/api/identity/admin/v3/test_credentials.py | 2 | 4531 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
@classmethod
def resource_setup(cls):
super(CredentialsTestJSON, cls).resource_setup()
cls.projects = list()
cls.creds_list = [['project_id', 'user_id', 'id'],
['access', 'secret']]
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_name('pass-')
for i in range(2):
_, cls.project = cls.client.create_project(
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'))
cls.projects.append(cls.project['id'])
_, cls.user_body = cls.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])
@classmethod
def resource_cleanup(cls):
cls.client.delete_user(cls.user_body['id'])
for p in cls.projects:
cls.client.delete_project(p)
super(CredentialsTestJSON, cls).resource_cleanup()
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
@test.attr(type='smoke')
def test_credentials_create_get_update_delete(self):
keys = [data_utils.rand_name('Access-'),
data_utils.rand_name('Secret-')]
_, cred = self.creds_client.create_credential(
keys[0], keys[1], self.user_body['id'],
self.projects[0])
self.addCleanup(self._delete_credential, cred['id'])
for value1 in self.creds_list[0]:
self.assertIn(value1, cred)
for value2 in self.creds_list[1]:
self.assertIn(value2, cred['blob'])
new_keys = [data_utils.rand_name('NewAccess-'),
data_utils.rand_name('NewSecret-')]
_, update_body = self.creds_client.update_credential(
cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
project_id=self.projects[1])
self.assertEqual(cred['id'], update_body['id'])
self.assertEqual(self.projects[1], update_body['project_id'])
self.assertEqual(self.user_body['id'], update_body['user_id'])
self.assertEqual(update_body['blob']['access'], new_keys[0])
self.assertEqual(update_body['blob']['secret'], new_keys[1])
_, get_body = self.creds_client.get_credential(cred['id'])
for value1 in self.creds_list[0]:
self.assertEqual(update_body[value1],
get_body[value1])
for value2 in self.creds_list[1]:
self.assertEqual(update_body['blob'][value2],
get_body['blob'][value2])
@test.attr(type='smoke')
def test_credentials_list_delete(self):
created_cred_ids = list()
fetched_cred_ids = list()
for i in range(2):
_, cred = self.creds_client.create_credential(
data_utils.rand_name('Access-'),
data_utils.rand_name('Secret-'),
self.user_body['id'], self.projects[0])
created_cred_ids.append(cred['id'])
self.addCleanup(self._delete_credential, cred['id'])
_, creds = self.creds_client.list_credentials()
for i in creds:
fetched_cred_ids.append(i['id'])
missing_creds = [c for c in created_cred_ids
if c not in fetched_cred_ids]
self.assertEqual(0, len(missing_creds),
"Failed to find cred %s in fetched list" %
', '.join(m_cred for m_cred in missing_creds))
class CredentialsTestXML(CredentialsTestJSON):
_interface = 'xml'
| apache-2.0 | -5,099,943,118,611,193,000 | 39.81982 | 78 | 0.598985 | false |
mantlepro/radcast | radcast/radcast.py | 1 | 8585 | #/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""radcast.py: main functionality of radcast"""
import os
import subprocess
import jinja2
import logging
from . import settings
from .utilities import slugify
__copyright__ = "Copyright 2017, Josh Wheeler"
__license__ = "GPL-3.0"
__status__ = "Development"
# radcast: radical podcast automation
# Copyright (C) 2017 Josh Wheeler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# load values from config file into cfg
cfg = settings.cfg
preset = settings.presets[cfg["preset"]]
def deserialize_clip(clip_dict):
"""Deserialize clip from python dictionary to clip object"""
c = clip_dict
logging.debug("Deserialize %s" % c)
clip = Clip(c["filename"])
clip.in_frame = c.get("in_frame", 0)
clip.out_frame = c.get("out_frame", None)
clip.fade = c.get("fade", None)
clip.start_frame = c.get("start_frame", None)
return clip.get_clip()
class Clip(object):
"""Clip object and attributes
fades can either be a value or True. If True, default transition
length from user's settings file will be used.
"""
def __init__(self, filename=None, color=None):
self.filename = filename
self.in_frame = 0
self.out_frame = None
self.current_frame = None
self.fade = None
self.mos = False
self.still = 5
def get_clip(self):
"""Return clip object"""
transition_length = cfg.get("transition_length", 12)
# fades can either be True or a value. If true, they get default
# transition length set in config file. If no default transition length
# is set, they will get 24 frames for default
if self.fade is True:
self.fade = transition_length
# if clip is a still frame, apply mos and length
fname, ext = os.path.splitext(self.filename)
formats = [".jpg", ".jpeg", ".jp2", ".png", ".tif", ".tiff", ".gif", "bmp"]
if any(ext.lower() in s for s in formats):
self.mos = True
self.out_frame = self.still * cfg.get("fps", 24)
# hacky solution to mos the ending logo for a melt audio fade
if os.path.basename(fname).lower() == "logo":
self.mos = True
return self
class Podcast(object):
def __init__(self):
self.title = "Untitled"
self.description = ""
self.sequence = []
self.program = None
self.preset = settings.presets[cfg["preset"]]
self.path = None
self.padding = cfg.get("padding", True)
self.pre_roll = []
self.post_roll = []
self.output_dir = "/media/output"
self.output_file = None
# pre-roll and post-roll from cfg
cfg_pre_roll = cfg.get("pre_roll", None)
cfg_post_roll = cfg.get("post_roll", None)
# if output directory is missing, set to current directory
if not os.path.isdir(self.output_dir):
self.output_dir = os.getcwd()
if cfg_pre_roll:
for c in cfg_pre_roll:
self.pre_roll.append(deserialize_clip(c))
logging.info("Adding pre-roll from config: %s" % cfg_pre_roll)
if cfg_post_roll:
for c in cfg_post_roll:
self.post_roll.append(deserialize_clip(c))
logging.info("Adding post-roll from config: %s" % cfg_post_roll)
# if padding isn't a numeric value, set a default length
if self.padding is True:
self.padding = cfg.get("fps", 24)
def generate_melt(self, template="simple.melt"):
"""generate a melt file based on template given.
If no template is given, default.melt is used."""
self.melt_file = "%s/%s.melt" % (self.output_dir, slugify(self.title))
loader = jinja2.PackageLoader("radcast", "templates")
env = jinja2.Environment(loader=loader)
env.trim_blocks = True
env.lstrip_blocks = True
f = open(self.melt_file, "w+")
template_xml = env.get_template(template)
# set up template variables
render = template_xml.render(
program=self.program,
pre_roll=self.pre_roll,
post_roll=self.post_roll,
padding=self.padding,
)
f.write(render)
f.close()
return self.melt_file
def get_melt_command(self):
# set producer to filename of generated melt file
self.generate_melt()
if not self.output_file:
self.output_file = slugify(self.title) + ".m4v"
self.output_file = ("%s/%s") % (self.output_dir, self.output_file)
command = [
'melt', '-profile', cfg.get("mlt_profile", "atsc_720p_24"),
'-producer', self.melt_file,
'-consumer', 'avformat:' + self.output_file,
'vcodec=' + preset.get("vcodec", "libx264"),
'color_primaries=' + preset.get("color_primaries", "bt709"),
'preset=' + preset.get("preset", "slower"),
'b=' + preset.get("b:v", "7500k"),
'acodec=' + preset.get("acodec", "aac"),
'ab=' + preset.get("b:a", "320k"),
'ac=' + preset.get("ac", "2"),
]
for f in preset.get("ffmpeg_params", None):
command.append(f)
logging.debug("Encode command: %s" % command)
return command
def encode(self):
"""Encode video and audio"""
# render video
subprocess.call(self.get_melt_command())
# convert audio to flac and save file handle for trim operation
flac = self.encode_flac(self.output_file)
# remove silence and save file handle for mp3 encoding
trimmed = self.remove_silence(flac)
# encode mp3
self.encode_mp3(trimmed)
if os.path.isfile(trimmed):
os.remove(trimmed)
def remove_silence(self, infile):
"""Remove silent periods from audio at beginning and end"""
input_file = infile
output_file = "%s/%s" % (self.output_dir, slugify(self.title) + ".wav")
logging.info("Trimming silence from %s\nSaving to %s" % (
input_file,
output_file
)
)
command = [
'sox',
input_file,
output_file,
'silence',
'1', '0.1', '0.1%',
'reverse',
'silence',
'1', '0.1', '0.1%',
'reverse'
]
logging.debug("sox command: %s" % command)
subprocess.call(command)
return output_file
def encode_flac(self, infile):
"""Encode flac using ffmpeg"""
input_file = infile
output_file = "%s/%s" % (self.output_dir, slugify(self.title) + ".flac")
logging.info("Encoding flac from %s\nSaving to %s" % (
input_file,
output_file
)
)
command = [
'ffmpeg',
'-loglevel', 'panic',
'-hide_banner',
'-y',
'-i', input_file,
'-ac', '1',
output_file,
]
logging.debug("ffmpeg command: %s" % command)
subprocess.call(command)
return output_file
def encode_mp3(self, infile):
"""Encode audio to mp3 using ffmpeg"""
input_file = infile
output_file = "%s/%s" % (self.output_dir, slugify(self.title) + ".mp3")
logging.info("Encoding podcast mp3 from %s\nSaving to %s" % (
input_file,
output_file
)
)
command = [
'ffmpeg',
'-loglevel', 'panic',
'-hide_banner',
'-y',
'-i', input_file,
'-acodec', 'mp3',
'-ab', '64k',
'-ac', '1',
output_file,
]
logging.debug("ffmpeg command: %s" % command)
subprocess.call(command)
return output_file
| gpl-3.0 | -8,764,997,944,920,481,000 | 29.335689 | 83 | 0.558066 | false |
terrycojones/dark-matter | bin/fasta-base-indices.py | 3 | 2968 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
import argparse
from dark.reads import (
addFASTACommandLineOptions, parseFASTACommandLineOptions, unambiguousBases)
parser = argparse.ArgumentParser(
description=(
'Given FASTA on standard input, and bases to look for, write the '
'1-based indices of where the bases occur in all sequences to '
'standard output. If standard output is a terminal, these will be '
'space separated, else newline separated. Use --any to print '
'indices that match any sequence.'),
epilog=(
'This can be used to find all columns of a FASTA multiple '
'sequence alignment that contain gaps, ambiguous nucleotides or '
'AAs, or to find all columns that do not contain such things. '
'Note that if sequences are of uneven lengths and --any is not '
'specified, only indices up to the length of the shortest input '
'sequence can be printed (i.e., there is a strict interpretation '
'of all sequences needing to have a matching base at an index: '
'if the index does not exist in even one sequence, then no base '
'can occur in all sequences at that index).'))
parser.add_argument(
'--bases',
help=('The sequence bases whose indices should be printed. If not '
'specified, this will be the defined set of bases for the input '
'sequence type (i.e., "ACGT" for DNA). This will have the effect of '
'printing the indices for which any sequence has an ambiguous or '
'missing base.'))
parser.add_argument(
'--matchCase', default=False, action='store_true',
help='If specified, sequence case will be considered in matching.')
parser.add_argument(
'--any', default=False, action='store_true',
help=('If specified, print indices of bases that match any sequence, '
'otherwise (the default) indices are only printed if they match '
'all sequences.'))
addFASTACommandLineOptions(parser)
args = parser.parse_args()
reads = parseFASTACommandLineOptions(args)
if args.bases is None:
# No target bases were given. Use the set of unambiguous bases for
# the read type. Unless --any has been given, this will print
# indices in which no ambiguous bases or gaps appear in any
# sequence.
targets = unambiguousBases[args.readClass]
else:
targets = set(args.bases)
indices = reads.indicesMatching(targets, args.matchCase, args.any)
nIndices = len(indices)
if nIndices:
separator = ' ' if os.isatty(1) else '\n'
# Add one to indices to maximize happy humans.
print(separator.join(map(lambda x: str(x + 1), sorted(indices))))
print('Found %d %s where %s a base from the set {%s}.' %
(nIndices,
'index' if nIndices == 1 else 'indices',
'any sequence has' if args.any else 'all sequences have',
', '.join(sorted(targets))),
file=sys.stderr)
| mit | -3,157,371,626,060,144,600 | 39.108108 | 79 | 0.677898 | false |
MrSenko/Kiwi | tcms/kiwi_auth/tests/test_profiles.py | 2 | 1829 | # pylint: disable=invalid-name
# -*- coding: utf-8 -*-
from http import HTTPStatus
from django.http import HttpResponseForbidden
from django.urls import reverse
from tcms.tests import LoggedInTestCase, create_request_user, user_should_have_perm
class TestProfilesView(LoggedInTestCase):
"""Test the profiles view functionality"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.somebody_else = create_request_user("somebody-else", "password")
def test_user_can_view_their_own_profile(self):
url = reverse("tcms-profile", args=[self.tester.username])
response = self.client.get(url, follow=True)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertContains(response, self.tester.username)
self.assertContains(response, self.tester.email)
self.assertContains(response, 'name="_save"')
def test_user_cant_view_profile_of_another_user_without_permission(self):
url = reverse("tcms-profile", args=[self.somebody_else.username])
response = self.client.get(url, follow=True)
self.assertIsInstance(response, HttpResponseForbidden)
def test_user_can_view_profile_of_another_user_with_permission(self):
user_should_have_perm(self.tester, "auth.view_user")
url = reverse("tcms-profile", args=[self.somebody_else.username])
response = self.client.get(url, follow=True)
self.assertContains(response, self.somebody_else.username)
self.assertContains(response, self.somebody_else.email)
self.assertNotContains(response, 'name="_save"')
def test_view_if_user_is_invalid(self):
response = self.client.get(
reverse("tcms-profile", args=["non-existing-username"])
)
self.assertEqual(response.status_code, 404)
| gpl-2.0 | 4,518,893,693,998,045,000 | 36.326531 | 83 | 0.694369 | false |
gaocegege/treadmill | treadmill/zknamespace.py | 3 | 4317 | """Treadmill constants."""
import collections
import functools
ALLOCATIONS = '/allocations'
APPGROUPS = '/app-groups'
APPMONITORS = '/app-monitors'
ARCHIVE_CONFIG = '/archive/config'
BLACKEDOUT_APPS = '/blackedout.apps'
BLACKEDOUT_SERVERS = '/blackedout.servers'
BUCKETS = '/buckets'
CELL = '/cell'
ENDPOINTS = '/endpoints'
EVENTS = '/events'
IDENTITY_GROUPS = '/identity-groups'
PLACEMENT = '/placement'
RUNNING = '/running'
SCHEDULED = '/scheduled'
SCHEDULER = '/scheduler'
SERVERS = '/servers'
PARTITIONS = '/partitions'
REBOOTS = '/reboots'
SERVER_PRESENCE = '/server.presence'
STRATEGIES = '/strategies'
FINISHED = '/finished'
FINISHED_HISTORY = '/finished.history'
TRACE = '/trace'
TRACE_HISTORY = '/trace.history'
TICKET_LOCKER = '/ticket-locker'
TREADMILL = '/treadmill'
VERSION = '/version'
VERSION_ID = '/version-id'
ZOOKEEPER = '/zookeeper'
ELECTION = '/election'
CRON_JOBS = '/cron-jobs'
TRACE_SHARDS_COUNT = 256
def join_zookeeper_path(root, *child):
""""Returns zookeeper path joined by slash."""
return '/'.join((root,) + child)
def _make_path_f(zkpath):
""""Return closure that will construct node path."""
return staticmethod(functools.partial(join_zookeeper_path, zkpath))
@staticmethod
def _path_trace_shard(shard_id):
"""Returns path of a trace shard."""
shard = '{:04X}'.format(int(shard_id) % TRACE_SHARDS_COUNT)
return '/'.join([TRACE, shard])
def trace_shards():
"""Return list of trace shards."""
return ['/'.join([TRACE, '{:04X}'.format(idx)])
for idx in range(0, TRACE_SHARDS_COUNT)]
@staticmethod
def _path_trace(instancename, event=None):
"""Returns path of a trace object for given app instance."""
instance_id = instancename[instancename.find('#') + 1:]
shard = '{:04X}'.format(int(instance_id) % TRACE_SHARDS_COUNT)
if event:
nodename = '%s,%s' % (instancename, event)
return '/'.join([TRACE, shard, nodename])
else:
return '/'.join([TRACE, shard])
@staticmethod
def _path_endpoint(name, proto, endpoint):
"""Returns path to Zk app endpoint node by name.
The name is assumed to be <proid>.<xxx> which will result in the path:
/endpoints/<proid>/<xxx>:<proto>:<endpoint>
"""
prefix, _sep, rest = name.partition('.')
return '/'.join(
[ENDPOINTS, prefix, ':'.join([rest, proto, str(endpoint)])]
)
@staticmethod
def _path_endpoint_proid(name):
"""Returns path to Zk app endpoint proid node path by name.
The name is assumed to be <proid>.<xxx> which will result in the path:
/endpoints/<proid>
"""
proid, _sep, _rest = name.partition('.')
return '/'.join([ENDPOINTS, proid])
# pylint: disable=C0103
path = collections.namedtuple('path', """
allocation
blackedout_server
bucket
cell
chroot
event
placement
running
scheduled
scheduler
server_presence
server
strategy
ticket_locker
version
version_id
zookeeper
endpoint
task
""")
path.allocation = _make_path_f(ALLOCATIONS)
path.appgroup = _make_path_f(APPGROUPS)
path.appmonitor = _make_path_f(APPMONITORS)
path.blackedout_app = _make_path_f(BLACKEDOUT_APPS)
path.blackedout_server = _make_path_f(BLACKEDOUT_SERVERS)
path.bucket = _make_path_f(BUCKETS)
path.cell = _make_path_f(CELL)
path.chroot = _make_path_f(TREADMILL)
path.event = _make_path_f(EVENTS)
path.identity_group = _make_path_f(IDENTITY_GROUPS)
path.partition = _make_path_f(PARTITIONS)
path.placement = _make_path_f(PLACEMENT)
path.reboot = _make_path_f(REBOOTS)
path.running = _make_path_f(RUNNING)
path.scheduled = _make_path_f(SCHEDULED)
path.scheduler = _make_path_f(SCHEDULER)
path.server_presence = _make_path_f(SERVER_PRESENCE)
path.server = _make_path_f(SERVERS)
path.strategy = _make_path_f(STRATEGIES)
path.ticket_locker = _make_path_f(TICKET_LOCKER)
path.version = _make_path_f(VERSION)
path.version_id = _make_path_f(VERSION_ID)
path.zookeeper = _make_path_f(ZOOKEEPER)
path.election = _make_path_f(ELECTION)
path.finished = _make_path_f(FINISHED)
path.finished_history = _make_path_f(FINISHED_HISTORY)
path.trace_history = _make_path_f(TRACE_HISTORY)
path.trace_shard = _make_path_f(TRACE)
# Special methods
path.endpoint = _path_endpoint
path.endpoint_proid = _path_endpoint_proid
path.trace = _path_trace
| apache-2.0 | 7,355,630,578,549,305,000 | 26.496815 | 74 | 0.683577 | false |
gkc1000/pyscf | pyscf/cc/test/test_uccsd.py | 1 | 31315 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
from functools import reduce
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf import mp
from pyscf import cc
from pyscf import ao2mo
from pyscf.cc import uccsd
from pyscf.cc import gccsd
from pyscf.cc import addons
from pyscf.cc import uccsd_rdm
from pyscf.fci import direct_uhf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
rhf = scf.RHF(mol)
rhf.conv_tol_grad = 1e-8
rhf.kernel()
mf = scf.addons.convert_to_uhf(rhf)
myucc = cc.UCCSD(mf).run(conv_tol=1e-10)
mol_s2 = gto.Mole()
mol_s2.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol_s2.basis = '631g'
mol_s2.spin = 2
mol_s2.verbose = 5
mol_s2.output = '/dev/null'
mol_s2.build()
mf_s2 = scf.UHF(mol_s2).run()
eris = uccsd.UCCSD(mf_s2).ao2mo()
def tearDownModule():
global mol, rhf, mf, myucc, mol_s2, mf_s2, eris
mol.stdout.close()
mol_s2.stdout.close()
del mol, rhf, mf, myucc, mol_s2, mf_s2, eris
class KnownValues(unittest.TestCase):
# def test_with_df(self):
# mf = scf.UHF(mol).density_fit(auxbasis='weigend').run()
# mycc = cc.UCCSD(mf).run()
# self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7)
def test_ERIS(self):
ucc1 = cc.UCCSD(mf)
nao,nmo = mf.mo_coeff[0].shape
numpy.random.seed(1)
mo_coeff = numpy.random.random((2,nao,nmo))
eris = cc.uccsd._make_eris_incore(ucc1, mo_coeff)
self.assertAlmostEqual(lib.finger(eris.oooo), 4.9638849382825754, 11)
self.assertAlmostEqual(lib.finger(eris.ovoo),-1.3623681896983584, 11)
self.assertAlmostEqual(lib.finger(eris.ovov), 125.81550684442163, 11)
self.assertAlmostEqual(lib.finger(eris.oovv), 55.123681017639598, 11)
self.assertAlmostEqual(lib.finger(eris.ovvo), 133.48083527898248, 11)
self.assertAlmostEqual(lib.finger(eris.ovvv), 59.421927525288183, 11)
self.assertAlmostEqual(lib.finger(eris.vvvv), 43.556602622204778, 11)
self.assertAlmostEqual(lib.finger(eris.OOOO),-407.05319440524585, 11)
self.assertAlmostEqual(lib.finger(eris.OVOO), 56.284299937160796, 11)
self.assertAlmostEqual(lib.finger(eris.OVOV),-287.72899895597448, 11)
self.assertAlmostEqual(lib.finger(eris.OOVV),-85.484299959144522, 11)
self.assertAlmostEqual(lib.finger(eris.OVVO),-228.18996145476956, 11)
self.assertAlmostEqual(lib.finger(eris.OVVV),-10.715902258877399, 11)
self.assertAlmostEqual(lib.finger(eris.VVVV),-89.908425473958303, 11)
self.assertAlmostEqual(lib.finger(eris.ooOO),-336.65979260175226, 11)
self.assertAlmostEqual(lib.finger(eris.ovOO),-16.405125847288176, 11)
self.assertAlmostEqual(lib.finger(eris.ovOV), 231.59042209500075, 11)
self.assertAlmostEqual(lib.finger(eris.ooVV), 20.338077193028354, 11)
self.assertAlmostEqual(lib.finger(eris.ovVO), 206.48662856981386, 11)
self.assertAlmostEqual(lib.finger(eris.ovVV),-71.273249852220516, 11)
self.assertAlmostEqual(lib.finger(eris.vvVV), 172.47130671068496, 11)
self.assertAlmostEqual(lib.finger(eris.OVoo),-19.927660309103977, 11)
self.assertAlmostEqual(lib.finger(eris.OOvv),-27.761433381797019, 11)
self.assertAlmostEqual(lib.finger(eris.OVvo),-140.09648311337384, 11)
self.assertAlmostEqual(lib.finger(eris.OVvv), 40.700983950220547, 11)
uccsd.MEMORYMIN, bak = 0, uccsd.MEMORYMIN
ucc1.max_memory = 0
eris1 = ucc1.ao2mo(mo_coeff)
uccsd.MEMORYMIN = bak
self.assertAlmostEqual(abs(numpy.array(eris1.oooo)-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovoo)-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovov)-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.oovv)-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvo)-eris.ovvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvv)-eris.ovvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvvv)-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOOO)-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOO)-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOV)-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOVV)-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVO)-eris.OVVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVV)-eris.OVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.VVVV)-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooOO)-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOO)-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOV)-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooVV)-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVO)-eris.ovVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVV)-eris.ovVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvVV)-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVoo)-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOvv)-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvo)-eris.OVvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvv)-eris.OVvv).max(), 0, 11)
# Testing the complex MO integrals
def ao2mofn(mos):
if isinstance(mos, numpy.ndarray) and mos.ndim == 2:
mos = [mos]*4
nmos = [mo.shape[1] for mo in mos]
eri_mo = ao2mo.kernel(mf._eri, mos, compact=False).reshape(nmos)
return eri_mo * 1j
eris1 = cc.uccsd._make_eris_incore(ucc1, mo_coeff, ao2mofn=ao2mofn)
self.assertAlmostEqual(abs(eris1.oooo.imag-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovoo.imag-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovov.imag-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.oovv.imag-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovvo.imag-eris.ovvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovvv.imag-eris.ovvv).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvvv.imag-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOOO.imag-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOO.imag-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOV.imag-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOVV.imag-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVVO.imag-eris.OVVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVVV.imag-eris.OVVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.VVVV.imag-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooOO.imag-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOO.imag-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOV.imag-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooVV.imag-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovVO.imag-eris.ovVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovVV.imag-eris.ovVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvVV.imag-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVoo.imag-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOvv.imag-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVvo.imag-eris.OVvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVvv.imag-eris.OVvv).max(), 0, 11)
def test_amplitudes_from_rccsd(self):
e, t1, t2 = cc.RCCSD(rhf).set(conv_tol=1e-10).kernel()
t1, t2 = myucc.amplitudes_from_rccsd(t1, t2)
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 6)
def test_uccsd_frozen(self):
ucc1 = copy.copy(myucc)
ucc1.frozen = 1
self.assertEqual(ucc1.nmo, (12,12))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [0,1]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [[0,1], [0,1]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [1,9]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [[1,9], [1,9]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [9,10,12]
self.assertEqual(ucc1.nmo, (10,10))
self.assertEqual(ucc1.nocc, (5,5))
ucc1.nmo = (13,12)
ucc1.nocc = (5,4)
self.assertEqual(ucc1.nmo, (13,12))
self.assertEqual(ucc1.nocc, (5,4))
def test_uccsd_frozen(self):
# Freeze 1s electrons
frozen = [[0,1], [0,1]]
ucc = cc.UCCSD(mf_s2, frozen=frozen)
ucc.diis_start_cycle = 1
ecc, t1, t2 = ucc.kernel()
self.assertAlmostEqual(ecc, -0.07414978284611283, 8)
def test_rdm(self):
nocc = 5
nvir = 7
mol = gto.M()
mf = scf.UHF(mol)
mf.mo_occ = numpy.zeros((2,nocc+nvir))
mf.mo_occ[:,:nocc] = 1
mycc = uccsd.UCCSD(mf)
def antisym(t2):
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,2,3)
return t2
orbspin = numpy.zeros((nocc+nvir)*2, dtype=int)
orbspin[1::2] = 1
numpy.random.seed(1)
t1 = numpy.random.random((2,nocc,nvir))*.1 - .1
t2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
t2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2 = (t2aa,t2ab,t2bb)
l1 = numpy.random.random((2,nocc,nvir))*.1 - .1
l2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
l2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2 = (l2aa,l2ab,l2bb)
dm1a, dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa, dm2ab, dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
ia = orbspin == 0
ib = orbspin == 1
oa = orbspin[:nocc*2] == 0
ob = orbspin[:nocc*2] == 1
va = orbspin[nocc*2:] == 0
vb = orbspin[nocc*2:] == 1
t1 = addons.spatial2spin(t1, orbspin)
t2 = addons.spatial2spin(t2, orbspin)
l1 = addons.spatial2spin(l1, orbspin)
l2 = addons.spatial2spin(l2, orbspin)
mf1 = scf.GHF(mol)
mf1.mo_occ = numpy.zeros((nocc+nvir)*2)
mf.mo_occ[:,:nocc*2] = 1
mycc1 = gccsd.GCCSD(mf1)
dm1 = mycc1.make_rdm1(t1, t2, l1, l2)
dm2 = mycc1.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1[ia][:,ia]-dm1a).max(), 0, 9)
self.assertAlmostEqual(abs(dm1[ib][:,ib]-dm1b).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ia][:,:,:,ia]-dm2aa).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ib][:,:,:,ib]-dm2ab).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ib][:,ib][:,:,ib][:,:,:,ib]-dm2bb).max(), 0, 9)
def test_h2o_rdm(self):
mol = mol_s2
mf = mf_s2
mycc = uccsd.UCCSD(mf)
mycc.frozen = 2
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1a,dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa,dm2ab,dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
mo_a = mf.mo_coeff[0]
mo_b = mf.mo_coeff[1]
nmoa = mo_a.shape[1]
nmob = mo_b.shape[1]
eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4)
eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4)
eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)
eriab = eriab.reshape([nmoa,nmoa,nmob,nmob])
hcore = mf.get_hcore()
h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2aa) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2ab)
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2bb) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
d1 = uccsd_rdm._gamma1_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2)
mycc.max_memory = 0
d2 = uccsd_rdm._gamma2_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2, True)
dm2 = uccsd_rdm._make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True)
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2[0]) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2[1])
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2[2]) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
def test_h4_rdm(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 2
mol.basis = '6-31g'
mol.build()
mf = scf.UHF(mol).set(init_guess='1e').run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
mycc = uccsd.UCCSD(mf).run()
mycc.solve_lambda()
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
efci, fcivec = direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
dm1ref, dm2ref = direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
t1, t2 = mycc.t1, mycc.t2
l1, l2 = mycc.l1, mycc.l2
rdm1 = mycc.make_rdm1(t1, t2, l1, l2)
rdm2 = mycc.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1ref[0] - rdm1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm1ref[1] - rdm1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[0] - rdm2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[1] - rdm2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[2] - rdm2[2]).max(), 0, 6)
def test_eris_contract_vvvv_t2(self):
mol = gto.Mole()
nocca, noccb, nvira, nvirb = 5, 4, 12, 13
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
numpy.random.seed(9)
t2 = numpy.random.random((nocca,noccb,nvira,nvirb))
eris = uccsd._ChemistsERIs()
eris.vvVV = numpy.random.random((nvira_pair,nvirb_pair))
eris.mol = mol
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_vvVV_t2(myucc, t2, eris.vvVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 12.00904827896089, 11)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
vvVV = eris.vvVV[:,idxb][idxa]
ref = lib.einsum('acbd,ijcd->ijab', vvVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
# _contract_VVVV_t2, testing complex and real mixed contraction
VVVV =(numpy.random.random((nvirb,nvirb,nvirb,nvirb)) +
numpy.random.random((nvirb,nvirb,nvirb,nvirb))*1j - (.5+.5j))
VVVV = VVVV + VVVV.transpose(1,0,3,2).conj()
VVVV = VVVV + VVVV.transpose(2,3,0,1)
eris.VVVV = VVVV
t2 = numpy.random.random((noccb,noccb,nvirb,nvirb))
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,3,2)
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_VVVV_t2(myucc, t2, eris.VVVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 47.903883794299404-50.501573400833429j, 11)
ref = lib.einsum('acbd,ijcd->ijab', eris.VVVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
def test_update_amps1(self):
mf = scf.UHF(mol_s2)
numpy.random.seed(9)
nmo = mf_s2.mo_occ[0].size
mf.mo_coeff = numpy.random.random((2,nmo,nmo)) - 0.5
mf.mo_occ = numpy.zeros((2,nmo))
mf.mo_occ[0,:6] = 1
mf.mo_occ[1,:5] = 1
mycc = uccsd.UCCSD(mf)
nocca, noccb = 6, 5
nvira, nvirb = nmo-nocca, nmo-noccb
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
eris = mycc.ao2mo()
fakeris = uccsd._ChemistsERIs()
fakeris.mo_coeff = eris.mo_coeff
fakeris.vvVV = eris.vvVV
fakeris.mol = mol_s2
t2ab = numpy.random.random((nocca,noccb,nvira,nvirb))
t1a = numpy.zeros((nocca,nvira))
t1b = numpy.zeros((noccb,nvirb))
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
fakeris.vvVV = None
mycc.direct = True
mycc.max_memory = 0
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
t1 = (numpy.random.random((nocca,nvira)), numpy.random.random((noccb,nvirb)))
t2 = (numpy.random.random((nocca,nocca,nvira,nvira)),
numpy.random.random((nocca,noccb,nvira,nvirb)),
numpy.random.random((noccb,noccb,nvirb,nvirb)))
t1, t2 = mycc.vector_to_amplitudes(mycc.amplitudes_to_vector(t1, t2))
t1, t2 = mycc.update_amps(t1, t2, eris)
self.assertAlmostEqual(lib.finger(t1[0]), 49.912690337392938, 10)
self.assertAlmostEqual(lib.finger(t1[1]), 74.596097348134776, 10)
self.assertAlmostEqual(lib.finger(t2[0]), -41.784696524955393, 10)
self.assertAlmostEqual(lib.finger(t2[1]), -9675.7677695314342, 7)
self.assertAlmostEqual(lib.finger(t2[2]), 270.75447826471577, 8)
self.assertAlmostEqual(lib.finger(mycc.amplitudes_to_vector(t1, t2)), 4341.9623137256776, 6)
def test_vector_to_amplitudes(self):
t1, t2 = myucc.vector_to_amplitudes(myucc.amplitudes_to_vector(myucc.t1, myucc.t2))
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 12)
def test_vector_size(self):
self.assertEqual(myucc.vector_size(), 2240)
def test_update_amps2(self): # compare to gccsd.update_amps
mol = mol_s2
mf = mf_s2
myucc = uccsd.UCCSD(mf)
nocca, noccb = 6,4
nmo = mol.nao_nr()
nvira,nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
mo_a = mf.mo_coeff[0] + numpy.sin(mf.mo_coeff[0]) * .01j
mo_b = mf.mo_coeff[1] + numpy.sin(mf.mo_coeff[1]) * .01j
nao = mo_a.shape[0]
eri = ao2mo.restore(1, mf._eri, nao)
eri0aa = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_a.conj(), mo_a)
eri0ab = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_b.conj(), mo_b)
eri0bb = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_b.conj(), mo_b, mo_b.conj(), mo_b)
eri0ba = eri0ab.transpose(2,3,0,1)
nvira = nao - nocca
nvirb = nao - noccb
eris = uccsd._ChemistsERIs(mol)
eris.oooo = eri0aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.ovoo = eri0aa[:nocca,nocca:,:nocca,:nocca].copy()
eris.oovv = eri0aa[:nocca,:nocca,nocca:,nocca:].copy()
eris.ovvo = eri0aa[:nocca,nocca:,nocca:,:nocca].copy()
eris.ovov = eri0aa[:nocca,nocca:,:nocca,nocca:].copy()
eris.ovvv = eri0aa[:nocca,nocca:,nocca:,nocca:].copy()
eris.vvvv = eri0aa[nocca:,nocca:,nocca:,nocca:].copy()
eris.OOOO = eri0bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.OVOO = eri0bb[:noccb,noccb:,:noccb,:noccb].copy()
eris.OOVV = eri0bb[:noccb,:noccb,noccb:,noccb:].copy()
eris.OVVO = eri0bb[:noccb,noccb:,noccb:,:noccb].copy()
eris.OVOV = eri0bb[:noccb,noccb:,:noccb,noccb:].copy()
eris.OVVV = eri0bb[:noccb,noccb:,noccb:,noccb:].copy()
eris.VVVV = eri0bb[noccb:,noccb:,noccb:,noccb:].copy()
eris.ooOO = eri0ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.ovOO = eri0ab[:nocca,nocca:,:noccb,:noccb].copy()
eris.ooVV = eri0ab[:nocca,:nocca,noccb:,noccb:].copy()
eris.ovVO = eri0ab[:nocca,nocca:,noccb:,:noccb].copy()
eris.ovOV = eri0ab[:nocca,nocca:,:noccb,noccb:].copy()
eris.ovVV = eri0ab[:nocca,nocca:,noccb:,noccb:].copy()
eris.vvVV = eri0ab[nocca:,nocca:,noccb:,noccb:].copy()
eris.OOoo = eri0ba[:noccb,:noccb,:nocca,:nocca].copy()
eris.OVoo = eri0ba[:noccb,noccb:,:nocca,:nocca].copy()
eris.OOvv = eri0ba[:noccb,:noccb,nocca:,nocca:].copy()
eris.OVvo = eri0ba[:noccb,noccb:,nocca:,:nocca].copy()
eris.OVov = eri0ba[:noccb,noccb:,:nocca,nocca:].copy()
eris.OVvv = eri0ba[:noccb,noccb:,nocca:,nocca:].copy()
eris.VVvv = eri0ba[noccb:,noccb:,nocca:,nocca:].copy()
eris.focka = numpy.diag(mf.mo_energy[0])
eris.fockb = numpy.diag(mf.mo_energy[1])
eris.mo_energy = mf.mo_energy
t1[0] = t1[0] + numpy.sin(t1[0]) * .05j
t1[1] = t1[1] + numpy.sin(t1[1]) * .05j
t2[0] = t2[0] + numpy.sin(t2[0]) * .05j
t2[1] = t2[1] + numpy.sin(t2[1]) * .05j
t2[2] = t2[2] + numpy.sin(t2[2]) * .05j
t1new_ref, t2new_ref = uccsd.update_amps(myucc, t1, t2, eris)
nocc = nocca + noccb
orbspin = numpy.zeros(nao*2, dtype=int)
orbspin[1::2] = 1
orbspin[nocc-1] = 0
orbspin[nocc ] = 1
eri1 = numpy.zeros([nao*2]*4, dtype=numpy.complex)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
eri1[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] = eri0aa
eri1[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] = eri0ab
eri1[idxb[:,None,None,None],idxb[:,None,None],idxa[:,None],idxa] = eri0ba
eri1[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] = eri0bb
eri1 = eri1.transpose(0,2,1,3) - eri1.transpose(0,2,3,1)
erig = gccsd._PhysicistsERIs()
erig.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
erig.ooov = eri1[:nocc,:nocc,:nocc,nocc:].copy()
erig.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
erig.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
erig.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
erig.ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
erig.vvvv = eri1[nocc:,nocc:,nocc:,nocc:].copy()
mo_e = numpy.empty(nao*2)
mo_e[orbspin==0] = mf.mo_energy[0]
mo_e[orbspin==1] = mf.mo_energy[1]
erig.fock = numpy.diag(mo_e)
erig.mo_energy = mo_e.real
myccg = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
t1 = myccg.spatial2spin(t1, orbspin)
t2 = myccg.spatial2spin(t2, orbspin)
t1new, t2new = gccsd.update_amps(myccg, t1, t2, erig)
t1new = myccg.spin2spatial(t1new, orbspin)
t2new = myccg.spin2spatial(t2new, orbspin)
self.assertAlmostEqual(abs(t1new[0] - t1new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1new[1] - t1new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[0] - t2new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[1] - t2new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[2] - t2new_ref[2]).max(), 0, 12)
def test_mbpt2(self):
myucc = uccsd.UCCSD(mf)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.12886859466216125, 10)
emp2 = mp.MP2(mf).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
myucc = uccsd.UCCSD(mf_s2)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.096257842171487293, 10)
emp2 = mp.MP2(mf_s2).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
def test_uintermediats(self):
from pyscf.cc import uintermediates
self.assertTrue(eris.get_ovvv().ndim == 4)
self.assertTrue(eris.get_ovVV().ndim == 4)
self.assertTrue(eris.get_OVvv().ndim == 4)
self.assertTrue(eris.get_OVVV().ndim == 4)
self.assertTrue(eris.get_ovvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_ovVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(uintermediates._get_vvvv(eris).ndim == 4)
self.assertTrue(uintermediates._get_vvVV(eris).ndim == 4)
self.assertTrue(uintermediates._get_VVVV(eris).ndim == 4)
def test_add_vvvv(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
numpy.zeros((noccb,nvirb))]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
eris1 = copy.copy(eris)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
ref =(lib.einsum('acbd,ijcd->ijab', eris1.vvvv[:,idxa][idxa], t2[0]),
lib.einsum('acbd,ijcd->ijab', eris1.vvVV[:,idxb][idxa], t2[1]),
lib.einsum('acbd,ijcd->ijab', eris1.VVVV[:,idxb][idxb], t2[2]))
t2a = myucc._add_vvvv((t1[0]*0,t1[1]*0), t2, eris, t2sym=False)
self.assertAlmostEqual(abs(ref[0]-t2a[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2a[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2a[2]).max(), 0, 12)
myucc.direct = True
eris1.vvvv = None # == with_ovvv=True in the call below
eris1.VVVV = None
eris1.vvVV = None
t1 = None
myucc.mo_coeff, eris1.mo_coeff = eris1.mo_coeff, None
t2b = myucc._add_vvvv(t1, t2, eris1)
self.assertAlmostEqual(abs(ref[0]-t2b[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2b[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2b[2]).max(), 0, 12)
def test_add_vvVV(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
numpy.zeros((noccb,nvirb))]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
eris1 = copy.copy(eris)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
vvVV = eris1.vvVV[:,idxb][idxa]
ref = lib.einsum('acbd,ijcd->ijab', vvVV, t2[1])
t2a = myucc._add_vvVV((t1[0]*0,t1[1]*0), t2[1], eris)
self.assertAlmostEqual(abs(ref-t2a).max(), 0, 12)
myucc.direct = True
eris1.vvvv = None # == with_ovvv=True in the call below
eris1.VVVV = None
eris1.vvVV = None
t1 = None
myucc.mo_coeff, eris1.mo_coeff = eris1.mo_coeff, None
t2b = myucc._add_vvVV(t1, t2[1], eris1)
self.assertAlmostEqual(abs(ref-t2b).max(), 0, 12)
def test_zero_beta_electrons(self):
mol = gto.M(atom='H', basis=('631g', [[0, (.2, 1)], [0, (.5, 1)]]),
spin=1, verbose=0)
mf = scf.UHF(mol).run()
mycc = uccsd.UCCSD(mf).run()
self.assertAlmostEqual(mycc.e_corr, 0, 9)
mol = gto.M(atom='He', basis=('631g', [[0, (.2, 1)], [0, (.5, 1)]]),
spin=2, verbose=0)
mf = scf.UHF(mol).run()
mycc = uccsd.UCCSD(mf).run()
self.assertAlmostEqual(mycc.e_corr, -2.6906675843462455e-05, 9)
self.assertEqual(mycc.t1[1].size, 0)
self.assertEqual(mycc.t2[1].size, 0)
self.assertEqual(mycc.t2[2].size, 0)
def test_reset(self):
mycc = cc.CCSD(scf.UHF(mol).newton())
mycc.reset(mol_s2)
self.assertTrue(mycc.mol is mol_s2)
self.assertTrue(mycc._scf.mol is mol_s2)
if __name__ == "__main__":
print("Full Tests for UCCSD")
unittest.main()
| apache-2.0 | 1,183,927,104,912,183,600 | 45.949025 | 102 | 0.594539 | false |
ujjwal96/mitmproxy | mitmproxy/utils/arg_check.py | 3 | 3972 | import sys
DEPRECATED = """
--confdir
-Z
--body-size-limit
--stream
--palette
--palette-transparent
--follow
--order
--no-mouse
--reverse
--http2-priority
--no-http2-priority
--no-websocket
--websocket
--spoof-source-address
--upstream-bind-address
--ciphers-client
--ciphers-server
--client-certs
--no-upstream-cert
--add-upstream-certs-to-client-chain
--upstream-trusted-confdir
--upstream-trusted-ca
--ssl-version-client
--ssl-version-server
--no-onboarding
--onboarding-host
--onboarding-port
--server-replay-use-header
--no-pop
--replay-ignore-content
--replay-ignore-payload-param
--replay-ignore-param
--replay-ignore-host
--replace-from-file
"""
REPLACED = """
-t
-u
--wfile
-a
--afile
-z
-b
--bind-address
--port
-I
--ignore
--tcp
--cert
--insecure
-c
--replace
-i
-f
--filter
--socks
"""
REPLACEMENTS = {
"--stream": "stream_large_bodies",
"--palette": "console_palette",
"--palette-transparent": "console_palette_transparent:",
"--follow": "console_focus_follow",
"--order": "view_order",
"--no-mouse": "console_mouse",
"--reverse": "view_order_reversed",
"--no-http2-priority": "http2_priority",
"--no-websocket": "websocket",
"--no-upstream-cert": "upstream_cert",
"--upstream-trusted-confdir": "ssl_verify_upstream_trusted_confdir",
"--upstream-trusted-ca": "ssl_verify_upstream_trusted_ca",
"--no-onboarding": "onboarding",
"--no-pop": "server_replay_nopop",
"--replay-ignore-content": "server_replay_ignore_content",
"--replay-ignore-payload-param": "server_replay_ignore_payload_params",
"--replay-ignore-param": "server_replay_ignore_params",
"--replay-ignore-host": "server_replay_ignore_host",
"--replace-from-file": "replacements (use @ to specify path)",
"-t": "--stickycookie",
"-u": "--stickyauth",
"--wfile": "--save-stream-file",
"-a": "-w Prefix path with + to append.",
"--afile": "-w Prefix path with + to append.",
"-z": "--anticomp",
"-b": "--listen-host",
"--bind-address": "--listen-host",
"--port": "--listen-port",
"-I": "--ignore-hosts",
"--ignore": "--ignore-hosts",
"--tcp": "--tcp-hosts",
"--cert": "--certs",
"--insecure": "--ssl-insecure",
"-c": "-C",
"--replace": "--replacements",
"-i": "--intercept",
"-f": "--view-filter",
"--filter": "--view-filter",
"--socks": "--mode socks5"
}
def check():
args = sys.argv[1:]
print()
if "-U" in args:
print("-U is deprecated, please use --mode upstream:SPEC instead")
if "-T" in args:
print("-T is deprecated, please use --mode transparent instead")
for option in ("-e", "--eventlog", "--norefresh"):
if option in args:
print("{} has been removed.".format(option))
for option in ("--nonanonymous", "--singleuser", "--htpasswd"):
if option in args:
print(
'{} is deprecated.\n'
'Please use `--proxyauth SPEC` instead.\n'
'SPEC Format: "username:pass", "any" to accept any user/pass combination,\n'
'"@path" to use an Apache htpasswd file, or\n'
'"ldap[s]:url_server_ldap:dn_auth:password:dn_subtree" '
'for LDAP authentication.'.format(option))
for option in REPLACED.splitlines():
if option in args:
print(
"{} is deprecated.\n"
"Please use `{}` instead.".format(
option,
REPLACEMENTS.get(option)
)
)
for option in DEPRECATED.splitlines():
if option in args:
print(
"{} is deprecated.\n"
"Please use `--set {}=value` instead.\n"
"To show all options and their default values use --options".format(
option,
REPLACEMENTS.get(option, None) or option.lstrip("-").replace("-", "_")
)
)
| mit | 4,166,546,711,444,165,600 | 25.657718 | 92 | 0.568983 | false |
tomokinakamaru/perth | perth/perth.py | 1 | 1057 | # coding:utf-8
import copy
import threading
class Perth(object):
def __init__(self, seeds={}):
self.__dict__['_threadlocal'] = threading.local()
self.__dict__['_seeds'] = {}
self._seeds.update(seeds)
def get_seed(self, name):
return self._seeds[name]
def set_seed(self, name, v):
self._seeds[name] = v
def set_seed_f(self, f):
self._seeds[f.__name__] = f()
def remove_seed(self, name):
del self._seeds[name]
def __getattr__(self, name):
if hasattr(self._threadlocal, name):
return getattr(self._threadlocal, name)
else:
if name in self._seeds:
obj = copy.deepcopy(self._seeds[name])
setattr(self._threadlocal, name, obj)
return obj
else:
return super(Perth, self).__getattribute__(name)
def __setattr__(self, name, value):
setattr(self._threadlocal, name, value)
def __delattr__(self, name):
delattr(self._threadlocal, name)
| mit | -2,699,648,459,719,007,000 | 24.166667 | 64 | 0.543046 | false |
dbarenas/Katja-hop | web/rest-test6/tutorial/candidates/api/resources.py | 1 | 6098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from restless.dj import DjangoResource
from candidates.models import Candidate
from core.api.dj import DjangoAngularResource
from core.api.helpers import from_data_to_instance
from core.restless.preparers import FieldsNullPreparer
from jobs.api.resources import AcademicBackgroundResource, LanguageBackgroundResource, ExperienceBackgroundResource, \
DesiredProfessionResource, SkillBackgroundResource, VolunteeringBackgroundResource, HobbyBackgroundResource, \
PortfolioBackgroundResource, ExperienceJobBackgroundResource, DrivingLicenseResource, \
DrivingLicenseBackgroundResource
class CandidateResource(DjangoAngularResource):
preparer = FieldsNullPreparer(fields={
"id": "id",
"freelance": "freelance",
"user_id": "user.id",
"nationality": "nationality",
"country": "country",
"state": "state",
"city": "city",
"address": "address",
"postcode": "postcode",
"gender": "gender",
"extra_phone": "extra_phone",
"disability_situation": "disability_situation.pk",
"disability_grade": "disability_grade.pk",
"disability_certification": "disability_certification",
"objective": "objective",
"own_vehicle": "own_vehicle",
"minimum_salary_range": "minimum_salary_range.pk",
"internship": "internship",
"mobility": "mobility",
"employment_status": "employment_status.pk",
"availability": "availability.pk",
"unemployment_card": "unemployment_card",
"academic_backgrounds": "academic_backgrounds",
"language_backgrounds": "language_backgrounds",
"experience_backgrounds": "experience_backgrounds",
"desired_professions": "desired_professions",
"skill_backgrounds": "skill_backgrounds",
"portfolio_backgrounds": "portfolio_backgrounds",
"volunteering_backgrounds": "volunteering_backgrounds",
"hobby_backgrounds": "hobby_backgrounds",
"driving_licenses": "driving_licenses",
"cover_letter": "cover_letter",
"picture": "picture",
"hidden": "hidden",
"birth_date": "birth_date",
})
def is_authenticated(self):
return self.request.user.is_authenticated()
@classmethod
def urls(cls, name_prefix=None):
return patterns(
'',
url(r'^$', cls.as_detail(), name=cls.build_url_name('detail', name_prefix)),
)
def detail(self):
user = self.request.user
return Candidate.objects.get(user=user)
def update(self):
"""Create works as an update of the current candidate.
:return:
"""
user = self.request.user
candidate = Candidate.objects.get(user=user)
candidate = from_data_to_instance(Candidate, self.data, candidate)
if self.data.get("hidden") is not None:
candidate.hidden = bool(int(self.data.get("hidden")))
candidate.save()
return candidate
@staticmethod
def _prepare_m2m(field, resource_class):
resource = resource_class()
serialized_fields = list()
for item in field.all():
serialized_fields.append(resource.preparer.prepare(item))
return serialized_fields
@staticmethod
def _prepare_experience_backgrounds(field):
resource = ExperienceBackgroundResource()
jobs_resource = ExperienceJobBackgroundResource()
serialized_fields = list()
for item in field.all():
serialized_item = resource.preparer.prepare(item)
serialized_item["jobs"] = list()
for job in item.jobs.all():
serialized_item["jobs"].append(jobs_resource.preparer.prepare(job))
serialized_fields.append(serialized_item)
return serialized_fields
def prepare(self, data):
prepped = super(CandidateResource, self).prepare(data)
prepped['academic_backgrounds'] = self._prepare_m2m(prepped['academic_backgrounds'], AcademicBackgroundResource)
prepped['language_backgrounds'] = self._prepare_m2m(prepped['language_backgrounds'], LanguageBackgroundResource)
prepped['experience_backgrounds'] = self._prepare_experience_backgrounds(prepped['experience_backgrounds'])
prepped['desired_professions'] = self._prepare_m2m(prepped['desired_professions'], DesiredProfessionResource)
prepped['skill_backgrounds'] = self._prepare_m2m(prepped['skill_backgrounds'], SkillBackgroundResource)
prepped['portfolio_backgrounds'] = self._prepare_m2m(prepped['portfolio_backgrounds'], PortfolioBackgroundResource)
prepped['volunteering_backgrounds'] = self._prepare_m2m(prepped['volunteering_backgrounds'], VolunteeringBackgroundResource)
prepped['hobby_backgrounds'] = self._prepare_m2m(prepped['hobby_backgrounds'], HobbyBackgroundResource)
prepped['driving_licenses'] = self._prepare_m2m(prepped['driving_licenses'], DrivingLicenseBackgroundResource)
prepped['cover_letter'] = {
"url": prepped['cover_letter'].url if prepped['cover_letter'] else None
}
prepped['picture'] = {
"type": "image",
"urls": {
"original": prepped['picture'].url if prepped['picture'] else None,
"preview": prepped['picture'].get_thumbnail(
{'size': (109, 109), 'crop': 'smart', 'upscale': True, 'quality': 90}
).url if prepped['picture'] else None
}
}
prepped['hidden'] = int(prepped['hidden'])
prepped['internship'] = int(prepped['internship'])
prepped['unemployment_card'] = int(prepped['unemployment_card'])
prepped['disability_certification'] = int(prepped['disability_certification'])
prepped['mobility'] = int(prepped['mobility'])
prepped['own_vehicle'] = int(prepped['own_vehicle'])
prepped['freelance'] = int(prepped['freelance'])
return prepped | gpl-2.0 | 7,765,768,127,108,530,000 | 44.514925 | 132 | 0.652181 | false |
bxshi/gem5 | src/mem/slicc/symbols/Action.py | 92 | 1911 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class Action(Symbol):
def __init__(self, table, ident, resources, location, pairs):
super(Action, self).__init__(table, ident, location, pairs)
self.resources = resources
def __repr__(self):
return "[Action: %s]" % self.ident
__all__ = [ "Action" ]
| bsd-3-clause | -6,480,456,919,841,365,000 | 49.289474 | 72 | 0.770801 | false |
NarlikarLab/DIVERSITY | weblogoMod/corebio/db/astral.py | 2 | 12519 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon.
# Copyright 2005 by Regents of the University of California. All rights reserved
# (Major rewrite for conformance to corebio. Gavin Crooks)
#
# This code is derived from the Biopython distribution and is governed by it's
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""ASTRAL: Compendium for Sequence and Structure Analysis.
The ASTRAL compendium provides databases and tools useful for analyzing protein structures and their sequences. It is partially derived from, and augments the SCOP: Structural Classification of Proteins database. Most of the resources depend upon the coordinate files maintained and distributed by the Protein Data Bank.
Ref:
http://astral.berkeley.edu/
* Classes :
- Raf -- A file of ASTRAL RAF (Rapid Access Format) Sequence Maps.
- RafSeqMap -- A sequence map, a RAF record.
- Res -- A single residue mapping from a RAF record.
* Functions :
- parse_domain -- Convert an ASTRAL fasta header string into a Scop domain.
- normalize_letters -- Normalize RAF amino acid codes.
"""
import re
from copy import copy
from corebio.db.scop import Domain, Residues
from corebio.data import extended_three_to_one as to_one_letter_code
from corebio.utils import FileIndex
__all__ = ('astral_evalues', 'astral_percent_identities',
'astral_evalues_filenames', 'normalize_letters', 'parse_domain',
'Raf', 'RafSeqMap', 'Res')
# Percentage identity filtered ASTRAL SCOP genetic domain sequence subset
astral_percent_identities = [10,20,25,30,35,40,50,70,90,95,100]
# E-value filtered ASTRAL SCOP genetic domain sequence subsets, based on PDB SEQRES records.
astral_evalues = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,1e-20, 1e-25, 1e-50]
# A map between evalues and astral filename suffixes.
astral_evalues_filenames = {
10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
def normalize_letters(one_letter_code) :
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.' :
return 'X'
else :
return one_letter_code.upper()
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str) :
"""Convert an ASTRAL fasta header string into a SCOP domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. the parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError("Domain: "+ str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid :
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
class Raf(FileIndex) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment)
and the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in
the CIF files are corrected manually, with the original PDB file serving as
the final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of the PDB residue
sequence number (up to 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
The RAF file itself is about 50 MB. Each line consists of a sequence map of
a different protein chain. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
This class does not load the entire RAF file into memory. Instead, it
reads the file once, noting the location and content of each RafSeqMap.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs. Custom maps of subsequences or spanning multiple chains can
be constructed with the get_seqmap method.
"""
def __init__(self, raf_file) :
def linekey(line) :
if not line or len(line)<5 or line.isspace() or line[0]=='#':
return None
return line[0:5]
def parser( f) : return RafSeqMap(f.readline())
FileIndex.__init__(self, raf_file, linekey, parser)
def get_seqmap(self, residues) :
"""Get the sequence map for a collection of residues.
residues -- A SCOP style description of a collection of residues from a
PDB strucure, (e.g. '(1bba A:10-20,B:)'), as a string or a
scop.Residues instance.
"""
if type(residues)== str :
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags :
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
sid = pdbid + chainid
sm = self[sid]
# Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap is None :
seqMap = sm
else :
seqMap += sm
return seqMap
# End Raf
class RafSeqMap(object) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
RafSeqMap is a list like object; you can find the location of particular
residues with index(), slice this RafSeqMap into fragments, and glue
fragments back together with extend().
- pdbid -- The PDB 4 character ID
- pdb_datestamp -- From the PDB file
- version -- The RAF format version. e.g. 0.01
- flags -- RAF flags. (See release notes for more information.)
- res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, raf_record=None) :
"""Parses a RAF record into a RafSeqMap object."""
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if not raf_record : return
header_len = 38
line = raf_record.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
# Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02") :
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7) :
f = line[i : i+7]
if len(f)!=7:
raise ValueError("Corrupt Field: ("+f+")" )
r = Res()
r.chainid = chainid
r.resid = f[0:5].strip()
r.atom = normalize_letters(f[5:6])
r.seqres = normalize_letters(f[6:7])
self.res.append(r)
# end __init__
#@staticmethod
def records(raf_file) :
"""Iterates over a Raf file, generating RafSeqMaps """
for line in raf_file:
if line[0] =='#': continue # A comment
if line.isspace() : continue
yield RafSeqMap(line)
records = staticmethod(records)
def index(self, resid, chainid="_") :
for i in range(0, len(self.res)) :
if self.res[i].resid == resid and self.res[i].chainid == chainid :
return i
raise KeyError("No such residue "+chainid+resid)
def __getslice__(self, i, j) :
s = copy(self)
s.res = s.res[i:j]
return s
def append(self, res) :
"""Append another Res object onto the list of residue mappings."""
self.res.append(res)
def extend(self, other) :
"""Append another RafSeqMap onto the end of self.
Both RafSeqMaps must have the same PDB ID, PDB datestamp and
RAF version. The RAF flags are erased if they are inconsistent. This
may happen when fragments are taken from different chains.
"""
if not isinstance(other, RafSeqMap):
raise TypeError("Can only extend a RafSeqMap with a RafSeqMap.")
if self.pdbid != other.pdbid :
raise TypeError("Cannot add fragments from different proteins.")
if self.version != other.version :
raise TypeError("Incompatible rafs.")
if self.pdb_datestamp != other.pdb_datestamp :
raise TypeError("Different pdb dates!")
if self.flags != other.flags :
self.flags = ''
self.res += other.res
def __iadd__(self, other) :
self.extend(other)
return self
def __add__(self, other) :
s = copy(self)
s.extend(other)
return s
def extract_atoms(self, pdb_handle, out_handle) :
"""Extract all relevant ATOM and HETATOM records from a PDB file.
The PDB file is scanned for ATOM and HETATOM records. If the
chain ID, residue ID (seqNum and iCode), and residue type match
a residue in this sequence map, then the record is echoed to the
output handle.
This is typically used to find the coordinates of a domain, or other
residue subset.
pdb_file -- A handle to the relevant PDB file.
out_file -- All output is written to this stream.
"""
resSet = {}
for r in self.res :
if r.atom=='X' : # Unknown residue type
continue
chainid = r.chainid
if chainid == '_':
chainid = ' '
resid = r.resid
resSet[(chainid,resid)] = r
resFound = {}
for line in pdb_handle :
if line.startswith("ATOM ") or line.startswith("HETATM") :
chainid = line[21:22]
resid = line[22:27].strip()
key = (chainid, resid)
if key in resSet:
res = resSet[key]
atom_aa = res.atom
resName = line[17:20].capitilize()
if resName in to_one_letter_code :
if to_one_letter_code[resName] == atom_aa :
out_handle.write(line)
resFound[key] = res
if len(resSet) != len(resFound) :
raise RuntimeError('I could not find at least one ATOM or '
'HETATM record for each and every residue in this sequence map.')
class Res(object) :
""" A single residue mapping from a RAF record.
- chainid -- A single character chain ID.
- resid -- The residue ID.
- atom -- amino acid one-letter code from ATOM records.
- seqres -- amino acid one-letter code from SEQRES records.
"""
def __init__(self) :
self.chainid = ''
self.resid = ''
self.atom = ''
self.seqres = ''
| gpl-3.0 | 8,535,867,999,074,070,000 | 36.038462 | 321 | 0.596134 | false |
ppanczyk/ansible | lib/ansible/module_utils/basic.py | 7 | 112564 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, frozenset, KeysView)
except:
SEQUENCETYPE = (Sequence, frozenset)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket', '_ansible_shell_executable']
self._options_context = list()
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chattr failed', details=to_native(e),
exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif k == '_ansible_shell_executable' and v:
self._shell = v
elif check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ','.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (','.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ','.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ",".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but the following are missing: %s" % (key, val, ','.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec is None or not params[k]:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', [])
current_attribs = ''.join(current_attribs)
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand shellisms
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| gpl-3.0 | -3,413,303,107,129,768,400 | 38.510004 | 155 | 0.555195 | false |
OpenQbo/qbo_webi | src/teleoperation/sip2rtmp/p2p-sip/src/app/sipapi.py | 4 | 13614 | # Copyright (c) 2007-2009, Kundan Singh. All rights reserved. See LICENSING for details.
'''
High level application programming interface (API) to program SIP devices such as user agents and servers.
The language is derived from understanding of (1) CPL, (2) LESS, (3) SER, (3) VoiceXML?
TODO: Should the script file be a top-level controller (like SER) or installable plugin (like CPL)?
See the sipd.py module as an example on how to implement a SIP proxy and registrar server.
'''
import os, sys, socket, time, traceback, multitask
from exceptions import Exception
from std.rfc2396 import Address
from std.rfc3261 import Stack, Message, Header, UserAgent, Proxy, TransportInfo
from std.rfc2617 import createAuthenticate
from std.kutil import getlocaladdr, Timer
_debug = False
class Event(object):
'''Base class for all events that are handled by Dispatcher. The type property determines the event type.'''
def __init__(self, type, **kwargs):
self.type = type
for k,w in kwargs.iteritems(): self.__dict__[k] = w
class MessageEvent(Event):
'''A MessageEvent encapsulates a SIP message and provides container and attribute access for SIP headers.'''
def __init__(self, type, msg, **kwargs):
Event.__init__(self, type, msg=msg, **kwargs)
def __str__(self): return str(self.msg)
# attribute access: use the msg
def __getattr__(self, name): return self.msg.__getattribute__(name)
def __getitem__(self, name): return self.msg[name]
def __setitem__(self, name, value): self.msg[name] = value
def __delitem__(self, name): del self.msg[name]
def __contains__(self, name): return name in self.msg
class IncomingEvent(MessageEvent):
'''An IncomingEvenet indicates an incoming message, and has action property to support accept, reject, proxy, redirect, etc.'''
def __init__(self, type, msg, **kwargs):
MessageEvent.__init__(self, type, msg, **kwargs)
self.action = self; self.location = []
def accept(self, contacts=None):
response = self.ua.createResponse(200, 'OK');
if contacts is not None:
for h in map(lambda x: Header(str(x), 'Contact'), contacts): response.insert(h, append=True)
response.Expires = self['Expires'] if self['Expires'] else Header('3600', 'Expires')
self.ua.sendResponse(response)
def reject(self, code, reason=None):
self.ua.sendResponse(code, reason)
def challenge(self, realm):
response = self.ua.createResponse(401, 'Unauthorized')
response.insert(Header(createAuthenticate(realm=realm, domain=str(self.uri), stale=('FALSE' if auth==401 else 'TRUE')), 'WWW-Authenticate'), append=True)
self.ua.sendResponse(response)
def proxy(self, recordRoute=False):
location = self.location if isinstance(self.location, list) else [self.location]
for c in location:
proxied = self.ua.createRequest(self.method, c, recordRoute=recordRoute)
self.ua.sendRequest(proxied)
if not location:
self.ua.sendResponse(480, 'Temporarily Unavailable')
def redirect(self):
location = self.location if isinstance(self.location, list) else [self.location]
response = self.ua.createResponse(302, 'Moved Temporarily')
for c in location: response.insert(c, append=True)
self.ua.sendResponse(response)
def default(self): # invoked when nothing else (action) was invoked in the application
if _debug: print 'IncomingEvent default handler called'
self.ua.sendResponse(501, 'Not Implemented')
class OutgoingEvent(MessageEvent):
def __init__(self, type, msg, **kwargs):
MessageEvent.__init__(self, type, msg, **kwargs)
class Dispatcher(object): # TODO: move this to kutil.py module
'''A event dispatcher similar to ActionScript's EventDispatcher. Should be used very very carefully, because all references are
strong references and must be explictly removed for cleanup.'''
def __init__(self): self._handler = {}
def __del__(self): self._handler.clear()
def attach(self, event, func):
'''Attach an event name (str) to the event handler func which takes one argument (event).'''
if event in self._handler:
if func not in self._handler[event]: self._handler[event].append(func)
else: self._handler[event] = [func]
def detach(self, event, func):
'''Detach the event name (str) from the event handler func. If no event is supplied, remove all handlers.'''
if event is not None:
if event in self._handler and func in self._handler[event]: self._handler[event].remove(func)
if len(self._handler[event]) == 0: del self._handler[event]
else: self._handler.clear()
def dispatch(self, event):
'''Dispatch a given event to all the handlers that were attached to the event type.'''
count = 0
if event.type in self._handler:
for func in self._handler[event.type]:
func(event) # TODO: should we suppress the exceptions?
count = count + 1
if not count and hasattr(event, 'action') and hasattr(event.action, 'default') and callable(event.action.default):
event.action.default() # invoke the default handler if no other handler was found.
class Agent(Dispatcher):
'''This represents a listening endpoint that interfaces with the SIP stack and exposes various API methods on the endpoint.'''
def __init__(self, sipaddr=('0.0.0.0', 5060), transports=('tcp','udp'), stack=Stack):
'''Construct a new Agent. The sipaddr argument indicates the listening address for incoming SIP messages or connections, and
transports tuple contains list of supported transports such as 'udp' and 'tcp'. The caller may change the SIP stack from the
default one defined in rfc3261.py module.'''
Dispatcher.__init__(self)
if _debug: print 'starting agent on', sipaddr, 'with transports', transports
self.conn, self.stack = dict(), dict() # tables: (host, port)=>TCP sock, (transport type=>stack)
for t in transports:
sock = socket.socket(type=socket.SOCK_DGRAM if t == 'udp' else socket.SOCK_STREAM)
sock.bind(sipaddr)
if t == 'tcp': sock.listen(5)
self.stack[t] = s = stack(self, TransportInfo(sock))
s.sock = sock
self._gens = []
def __del__(self):
'''Delete the object and internal member references.'''
try:
for s in self.stack.values(): s.sock.close()
del self.stack, self._gens
except: pass
Dispatcher.__del__(self)
def start(self):
'''Start the listening tasks in this agent. It returns self for cascaded method calls.'''
for s in self.stack.values(): gen = self._sipreceiver(s); self._gens.append(gen); multitask.add(gen)
return self
def stop(self):
'''Stop the listening tasks in this agent. It returns self for cascaded method calls.'''
for gen in self._gens: gen.close();
self._gens[:] = []
return self
def _sipreceiver(self, stack, maxsize=16386):
'''Handle the messages or connections on the given SIP stack's socket, and pass it to the stack so that stack can invoke
appropriate callback on this object such as receivedRequest.'''
sock = stack.sock
def tcpreceiver(sock, remote): # handle the messages on the given TCP connection.
while True:
data = yield multitask.recv(sock, maxsize)
if _debug: print '%r=>%r on type=%r\n%s'%(remote, sock.getsockname(), sock.type, data)
if data: stack.received(data, remote)
while True:
if sock.type == socket.SOCK_DGRAM:
data, remote = yield multitask.recvfrom(sock, maxsize)
if _debug: print '%r=>%r on type=%r\n%s'%(remote, sock.getsockname(), sock.type, data)
if data: stack.received(data, remote)
elif sock.type == socket.SOCK_STREAM:
conn, remote = yield multitask.accept(sock)
if conn:
self.conn[remote] = conn
multitask.add(tcpreceiver(conn, remote))
else: raise ValueError, 'invalid socket type'
# following callbacks are invoked by the SIP stack
def send(self, data, remote, stack):
'''Send a given data to remote for the SIP stack.'''
def _send(self, data, remote, stack): # a generator function that does the sending
if _debug: print '%r=>%r on type=%r\n%s'%(stack.sock.getsockname(), remote, stack.sock.type, data)
if stack.sock.type == socket.SOCK_STREAM: # for TCP send only if a connection exists to the remote.
if remote in self.conn:
yield multitask.send(self.conn[remote], data) # and send using that connected TCP socket.
else: # for UDP send using the stack's UDP socket.
yield multitask.sendto(stack.sock, data, remote)
multitask.add(_send(self, data, remote, stack))
def createServer(self, request, uri, stack):
'''Create a Proxy UAS for all requests except CANCEL.'''
return (request.method != 'CANCEL') and Proxy(stack, request) or None
def sending(self, ua, message, stack):
if message.method:
if _debug: print 'sending request on stack', message.method
self.dispatch(OutgoingEvent(type='outgoing', msg=message, ua=ua, stack=stack, agent=self))
def receivedRequest(self, ua, request, stack):
if _debug: print 'received request from stack', request.method
self.dispatch(IncomingEvent(type='incoming', msg=request, ua=ua, stack=stack, agent=self))
def receivedResponse(self, ua, response, stack): pass
def cancelled(self, ua, request, stack): pass
def dialogCreated(self, dialog, ua, stack): pass
def authenticate(self, ua, header, stack):
print "sipapi.py autheticate!!!!"
return True
def createTimer(self, app, stack): return Timer(app)
# Methods and classes inspired by SER (SIP Express Router) to support server functions
class Subscriber(dict):
'''A simple subscriber table using in-memory dict. The application can store subscribers in this, and use this to authenticate
incoming SIP request. '''
def __init__(self):
dict.__init__(self)
def store(self, uri, realm, password):
'''Store a new user and his realm and password in this table.'''
self[uri] = [realm, password]
def authenticate(self, request, realm='localhost'):
print "sipapi.py otro autheticante!"
'''Returns 200 on success, 401 on failure, 0 if missing or invalid nonce, and 404 if no password/user information available.'''
auths = filter(lambda x: x['realm']==realm, request.all('Authorization', 'Proxy-Authorization')) # search all our authenticate headers
if not auths: return 0 # missing authenticate header
# TODO: check for valid nonce. for now just assume all nonce to be valid.
uri = request.From.value.uri
if uri not in self: return 404
return 200
class Location(dict):
'''A simple location service using in-memory dict. Subclass may override this to support databases such as MySQL.'''
def __init__(self):
dict.__init__(self)
def save(self, msg, uri, defaultExpires=3600):
'''Save the contacts from REGISTER or PUBLISH msg.'''
expires = int(msg['Expires'].value if msg['Expires'] else defaultExpires)
if uri in self: existing = self[uri]
else: existing = self[uri] = [] # initialize that user's contacts list
if msg['Contact'] and msg.first('Contact').value == '*': # single contact: * header
if msg['Expires'] and msg['Expires'].value == '0': # unregistration msg
del self[uri] # unregister by removing the contacts
else: # handle individual contact headers in the msg
now = time.time()
for c in msg.all('Contact'): # for all contacts in the new msg
e = now + (expires if 'expires' not in c else int(c.expires)) # expiration for this contact.
existing[:] = filter(lambda x: x[0].value.uri!=c.value.uri, existing) # remove matching contacts
existing.insert(0, (c, e)) # insert the new contact in the beginning
existing[:] = filter(lambda x: x[1]>now, existing) # filter out expired contacts
if not existing: # no more contacts
del self[uri] # remove from the table as well
if _debug: print 'save', self
return True
def locate(self, uri):
'''Return all saved contacts for the given uri.'''
if _debug: print 'locate', uri, self
existing = self.get(str(uri), [])
now = time.time()
existing[:] = filter(lambda x: x[1]>now, existing) # remove expired headers
for c in existing: c[0]['expires'] = str(int(c[1]-now)) # update the expires header with relative seconds
return map(lambda x: x[0], existing) # return the contact headers
# Global methods available to the controller script
def run():
'''The run loop which runs the multitask's main loop. This can be terminated by KeyboardInterrupt.'''
try: multitask.run()
except KeyboardInterrupt: pass
# Unit testing
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-2.0 | -1,225,310,935,278,797,300 | 51.972763 | 161 | 0.641839 | false |
pektin/jam | compiler/lekvar/stats.py | 2 | 1290 |
class Stats:
__attrs__ = ['static', 'forward']
static = False
forward = False
def __init__(self, parent):
if parent is not None:
if parent.stats.static_transitive:
self.static = parent.stats.static
if parent.stats.forward_transitive:
self.forward = parent.stats.forward
def __repr__(self):
results = []
for attr in self.__attrs__:
results.append("{}: {}".format(attr, getattr(self, attr)))
return "{{}}".format(", ".join(results))
class SoftScopeStats(Stats):
__attrs__ = Stats.__attrs__ + ['definitely_returns', 'might_return']
definitely_returns = False
might_return = False
def merge(self, other):
self.definitely_returns = self.definitely_returns and other.definitely_returns
self.might_return = self.definitely_returns or other.definitely_returns
def update(self, other):
self.definitely_returns = self.definitely_returns or other.definitely_returns
self.might_return = self.definitely_returns or other.definitely_returns
class ScopeStats(SoftScopeStats):
__attrs__ = SoftScopeStats.__attrs__ + ['static_transitive', 'forward_transitive']
static_transitive = True
forward_transitive = True
| mit | -1,975,081,410,591,634,200 | 31.25 | 86 | 0.633333 | false |
matmutant/sl4a | python/src/Lib/heapq.py | 49 | 15994 | # -*- coding: Latin-1 -*-
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, repeat, count, imap, izip, tee
from operator import itemgetter, neg
import bisect
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(heap)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(xrange(n//2)):
_siftup(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
# For smaller values of n, the bisect method is faster than a minheap.
# It is also memory efficient, consuming only n elements of space.
it = iter(iterable)
result = sorted(islice(it, 0, n))
if not result:
return result
insort = bisect.insort
pop = result.pop
los = result[-1] # los --> Largest of the nsmallest
for elem in it:
if los <= elem:
continue
insort(result, elem)
pop()
los = result[-1]
return result
# An alternative approach manifests the whole iterable in memory but
# saves comparisons by heapifying all at once. Also, saves time
# over bisect.insort() which has O(n) data movement time for every
# insertion. Finding the n smallest of an m length iterable requires
# O(m) + O(n log m) comparisons.
h = list(iterable)
heapify(h)
return map(heappop, repeat(h, min(n, len(h))))
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom __cmp__ methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest, heappushpop
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.next
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while 1:
try:
while 1:
v, itnum, next = s = h[0] # raises IndexError when h is empty
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
except IndexError:
return
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
if key is None:
it = izip(iterable, count()) # decorate
result = _nsmallest(n, it)
return map(itemgetter(0), result) # undecorate
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return map(itemgetter(2), result) # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
if key is None:
it = izip(iterable, imap(neg, count())) # decorate
result = _nlargest(n, it)
return map(itemgetter(0), result) # undecorate
in1, in2 = tee(iterable)
it = izip(imap(key, in1), imap(neg, count()), in2) # decorate
result = _nlargest(n, it)
return map(itemgetter(2), result) # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print sort
import doctest
doctest.testmod()
| apache-2.0 | -498,115,952,297,014,000 | 39.697201 | 96 | 0.662686 | false |
IsCoolEntertainment/debpkg_libcloud | libcloud/test/compute/test_joyent.py | 2 | 4037 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeState
from libcloud.compute.drivers.joyent import JoyentNodeDriver
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import JOYENT_PARAMS
class JoyentTestCase(unittest.TestCase):
def setUp(self):
JoyentNodeDriver.connectionCls.conn_classes = (None, JoyentHttp)
self.driver = JoyentNodeDriver(*JOYENT_PARAMS)
def test_instantiate_invalid_location(self):
try:
JoyentNodeDriver('user', 'key', location='invalid')
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(sizes[0].ram, 16384)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(images[0].name, 'nodejs')
def test_list_nodes_with_and_without_credentials(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 2)
node = nodes[0]
self.assertEqual(node.public_ips[0], '165.225.129.129')
self.assertEqual(node.private_ips[0], '10.112.1.130')
self.assertEqual(node.state, NodeState.RUNNING)
node = nodes[1]
self.assertEqual(node.public_ips[0], '165.225.129.128')
self.assertEqual(node.private_ips[0], '10.112.1.131')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.extra['password'], 'abc')
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(image=image, size=size, name='testlc')
self.assertEqual(node.name, 'testlc')
def test_ex_stop_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_stop_node(node))
def test_ex_start_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_start_node(node))
class JoyentHttp(MockHttp):
fixtures = ComputeFileFixtures('joyent')
def _my_packages(self, method, url, body, headers):
body = self.fixtures.load('my_packages.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _my_datasets(self, method, url, body, headers):
body = self.fixtures.load('my_datasets.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _my_machines(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('my_machines.json')
elif method == 'POST':
body = self.fixtures.load('my_machines_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _my_machines_2fb67f5f_53f2_40ab_9d99_b9ff68cfb2ab(self, method, url,
body, headers):
return (httplib.ACCEPTED, '', {}, httplib.responses[httplib.ACCEPTED])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 2,047,282,844,915,494,700 | 36.37963 | 78 | 0.672777 | false |
soellman/copernicus | cpc/server/message/direct_message.py | 2 | 4260 | import json
import logging
from cpc.network.com.input import Input
from cpc.network.com.server_connection import ServerConnection
from cpc.network.server_request import ServerRequest
from cpc.util import json_serializer
from cpc.util.conf.server_conf import ServerConf
log=logging.getLogger(__name__)
class DirectServerMessage(ServerConnection):
"""
Messages that should only between trusted neighbouring nodes.
These messages should not need a network topology
"""
def networkTopology(self,topology):
"""
topology:Nodes
"""
cmdstring="network-topology"
fields = []
input = Input('cmd', cmdstring)
fields.append(input)
fields.append(Input('version', "1"))
input2 = Input('topology',json.dumps(topology
,default = json_serializer.toJson,indent=4))
fields.append(input2)
msg = ServerRequest.prepareRequest(fields,[])
response = self.putRequest(msg)
return response
def pingServer(self,serverId):
cmdstring='ping'
fields = []
input = Input('cmd', cmdstring)
fields.append(input)
fields.append(Input('version', "1"))
headers = dict()
if serverId!= None:
headers['server-id'] = serverId
msg = ServerRequest.prepareRequest(fields,[],headers)
response= self.putRequest(msg)
return response
class PersistentServerMessage(ServerConnection):
"""
The purpose of this class is to handle persistent server to server
connections
It contains to message types persistIncomingConnections and
persistOutgoingconnections.
persistIncomingConnection returns the underlying socket instead of
putting it back to the connection pool.
The server should be responsible for monitoring this socket for
incoming requests
persistOutgoingConnection is simpler. it puts back the connection to
the pool and assumes that the receiving server will monitor this
connection for requests.
"""
INBOUND_CONNECTION = "IN"
OUTBOUND_CONNECTION = "OUT"
def __persistConnection(self,direction,headers = dict()):
headers['persistent-connection'] = direction
#message body is actually irrellevant and is not read on the other
# side.
#we just need to conform to the http protocol
fields = []
fields.append(Input('cmd', "persist-connection"))
#sending along the connection parameters for this server
conf = ServerConf()
connectionParams = dict()
connectionParams['serverId'] = conf.getServerId()
connectionParams['hostname'] = conf.getHostName()
connectionParams['fqdn'] = conf.getFqdn()
connectionParams['client_secure_port'] = conf\
.getClientSecurePort()
connectionParams['server_secure_port'] = conf\
.getServerSecurePort()
input2 = Input('connectionParams',
json.dumps(connectionParams,default = json_serializer.toJson,
indent=4)) # a json structure that needs to be dumped
fields.append(input2)
response= self.putRequest(ServerRequest.prepareRequest(fields, [],
headers))
return response
#NOTE does not return a response,it returns the socket used for the
# connection
def persistIncomingConnection(self):
self.storeInConnectionPool = False
self.createConnection = True
#OUTBOUND FOR THE RECEIVING END
headers = dict()
response = self.__persistConnection(self.OUTBOUND_CONNECTION,headers)
return self.getSocket()
def persistOutgoingConnection(self):
"""
This is just a simple ping message, the keep-alive header will
ensure that the other end wont close the connection
"""
self.createConnection = True
fields = []
# fields.append(Input('cmd', "ping"))
headers = dict()
# response= self.putRequest(ServerRequest.prepareRequest(fields, [],
# headers))
#return response
#inbound for the receiving end
return self.__persistConnection(self.INBOUND_CONNECTION,headers)
| gpl-2.0 | -6,072,598,728,848,552,000 | 32.28125 | 77 | 0.655164 | false |
jeasoft/odoo | comunity_modules/tko_partner_multiple_emails/res_partner.py | 3 | 5424 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp import api
class res_partner(osv.osv):
_name = "res.partner"
_inherit = "res.partner"
_description = "inherited partner class"
def create(self, cr, uid, vals, context=False):
mail_obj = self.pool.get('res.partner.email')
res = super(res_partner, self).create(cr, uid, vals, context=context)
email_ids = mail_obj.search(cr, uid, [('res_partner_id', '=', res)])
if email_ids:
mail_obj.write(cr, uid, email_ids[0], {'is_active' : True})
vals.update({'email' : mail_obj.browse(cr, uid, email_ids[0]).email})
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(res_partner, self).write(cr, uid, ids, vals, context=context)
mail_obj = self.pool.get('res.partner.email')
email_ids = mail_obj.search(cr, uid, [('res_partner_id', '=', ids[0])])
if len(email_ids) == 1:
mail_obj.write(cr, uid, email_ids[0], {'is_active' : True})
return res
def _get_email_id(self, cr, uid, ids, name, args, context=False):
res = {}
email_obj = self.pool.get('res.partner.email')
for record in self.browse(cr, uid, ids):
email_ids = email_obj.search(cr, uid, [('res_partner_id', '=', record.id), ('is_active', '=', True)])
if email_ids:
email = email_obj.browse(cr, uid, email_ids[0]).email
res[record.id] = email
else:
res[record.id] = False
return res
def _set_email_id(self, cr, uid, ids, name, value, args, context=None):
mail_obj = self.pool.get('res.partner.email')
if value and ids:
for record in self.browse(cr, uid, ids):
mail_ids = mail_obj.search(cr, uid, [('res_partner_id', '=', record.id), ('email', 'ilike', value)])
if mail_ids:
previous_mail_ids = mail_obj.search(cr, uid, [('res_partner_id', '=', record.id)])
mail_obj.write(cr, uid, previous_mail_ids, {'is_active' : False})
mail_obj.write(cr, uid, mail_ids, {'is_active' : True})
if not mail_ids:
active_mail_ids = mail_obj.search(cr, uid, [('res_partner_id', '=', record.id), ('is_active', '=', True)])
if len(active_mail_ids):
mail_obj.write(cr, uid, active_mail_ids, {'email' : value})
else:
# mail_obj.write(cr, uid, previous_mail_ids, {'is_active' : False})
mail_obj.create(cr, uid, {'res_partner_id' : record.id,
'email' : value,
'is_active' : True})
if not value:
for record in self.browse(cr, uid, ids):
mail_id = mail_obj.search(cr, uid, [('res_partner_id', '=', record.id), ('is_active', '=', True)])
if len(mail_id):
mail_obj.unlink(cr, uid, mail_id)
return True
def _get_partner(self, cr, uid, ids, context=None):
result = {}
for part in self.pool.get('res.partner.email').browse(cr, uid, ids, context=context):
result[part.res_partner_id.id] = True
return result.keys()
def _get_mail_ids(self, cr, uid, ids, fields, arg, context=None):
res = {}
for mail in ids:
res[mail] = self.pool.get('res.partner.email').search(cr , uid, [('res_partner_id', '=', mail)])
return res
_columns = {
'email' :fields.function(_get_email_id, type='char', fnct_inv=_set_email_id, string='Email',
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['email_ids'], 10),
'res.partner.email': (_get_partner, ['email', 'is_active'], 10), }
),
'email_ids': fields.one2many('res.partner.email', 'res_partner_id', 'Emails'),
'email_ids_readonly': fields.function(_get_mail_ids, type='one2many', relation='res.partner.email', string='Emails')
}
| agpl-3.0 | 7,512,513,978,802,194,000 | 47.4375 | 126 | 0.520833 | false |
trabacus-softapps/openerp-8.0-cc | openerp/addons/base/res/res_config.py | 6 | 29051 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 715,615,663,369,446,500 | 42.751506 | 201 | 0.606038 | false |
littlstar/chromium.src | tools/perf/benchmarks/benchmark_unittest.py | 32 | 3513 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the first page of every benchmark that has a composable measurement.
Ideally this test would be comprehensive, but the above serves as a
kind of smoke test.
"""
import os
import unittest
from telemetry import benchmark as benchmark_module
from telemetry.core import discover
from telemetry.page import page_test
from telemetry.unittest import options_for_unittests
from telemetry.unittest import progress_reporter
def SmokeTestGenerator(benchmark):
# NOTE TO SHERIFFS: DO NOT DISABLE THIS TEST.
#
# This smoke test dynamically tests all benchmarks. So disabling it for one
# failing or flaky benchmark would disable a much wider swath of coverage
# than is usally intended. Instead, if a particular benchmark is failing,
# disable it in tools/perf/benchmarks/*.
@benchmark_module.Disabled('chromeos') # crbug.com/351114
def BenchmarkSmokeTest(self):
# Only measure a single page so that this test cycles reasonably quickly.
benchmark.options['pageset_repeat'] = 1
benchmark.options['page_repeat'] = 1
class SinglePageBenchmark(benchmark): # pylint: disable=W0232
def CreatePageSet(self, options):
# pylint: disable=E1002
ps = super(SinglePageBenchmark, self).CreatePageSet(options)
for p in ps.pages:
if not p.disabled:
p.skip_waits = True
ps.pages = [p]
break
return ps
# Set the benchmark's default arguments.
options = options_for_unittests.GetCopy()
options.output_format = 'none'
options.suppress_gtest_report = True
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
benchmark.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
self.assertEqual(0, SinglePageBenchmark().Run(options),
msg='Failed: %s' % benchmark)
return BenchmarkSmokeTest
def load_tests(_, _2, _3):
suite = progress_reporter.TestSuite()
benchmarks_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(benchmarks_dir)
measurements_dir = os.path.join(top_level_dir, 'measurements')
all_measurements = discover.DiscoverClasses(
measurements_dir, top_level_dir, page_test.PageTest,
pattern='*.py').values()
all_benchmarks = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
pattern='*.py').values()
for benchmark in all_benchmarks:
if benchmark.PageTestClass() not in all_measurements:
# If the benchmark is not in measurements, then it is not composable.
# Ideally we'd like to test these as well, but the non-composable
# benchmarks are usually long-running benchmarks.
continue
# TODO(tonyg): Smoke doesn't work with session_restore yet.
if benchmark.Name().startswith('session_restore'):
continue
if hasattr(benchmark, 'generated_profile_archive'):
# We'd like to test these, but don't know how yet.
continue
class BenchmarkSmokeTest(unittest.TestCase):
pass
setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark))
suite.addTest(BenchmarkSmokeTest(benchmark.Name()))
return suite
| bsd-3-clause | 4,207,061,898,930,261,500 | 34.846939 | 80 | 0.719898 | false |
rmak/splunk-sdk-python | examples/explorer/explorer.py | 1 | 1962 | #!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import server
import webbrowser
import sys
import os
import utils
import urllib
PORT = 8080
def main(argv):
usage = "usage: %prog [options]"
redirect_port_args = {
"redirectport": {
"flags": ["--redirectport"],
"default": PORT,
"help": "Port to use for redirect server (default: %s)" % PORT,
},
}
opts = utils.parse(argv, redirect_port_args, ".splunkrc", usage=usage)
# We have to provide a sensible value for namespace
namespace = opts.kwargs["namespace"]
namespace = namespace if namespace else "-"
# Encode these arguments
args = urllib.urlencode([
("scheme", opts.kwargs["scheme"]),
("host", opts.kwargs["host"]),
("port", opts.kwargs["port"]),
("redirecthost", "localhost"),
("redirectport", opts.kwargs["redirectport"]),
("username", opts.kwargs["username"]),
("password", opts.kwargs["password"]),
("namespace", namespace)
]),
# Launch the browser
webbrowser.open("file://%s" % os.path.join(os.getcwd(), "explorer.html?%s" % args))
# And server the files
server.serve(opts.kwargs["redirectport"])
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
pass
except:
raise | apache-2.0 | -533,208,896,955,905,800 | 27.867647 | 87 | 0.620795 | false |
kaedroho/django | tests/apps/tests.py | 39 | 17269 | import os
from django.apps import AppConfig, apps
from django.apps.registry import Apps
from django.contrib.admin.models import LogEntry
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import extend_sys_path, isolate_apps
from .default_config_app.apps import CustomConfig
from .models import SoAlternative, TotallyNormal, new_apps
# Small list with a variety of cases for tests that iterate on installed apps.
# Intentionally not in alphabetical order to check if the order is preserved.
SOME_INSTALLED_APPS = [
'apps.apps.MyAdmin',
'apps.apps.MyAuth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
SOME_INSTALLED_APPS_NAMES = [
'django.contrib.admin',
'django.contrib.auth',
] + SOME_INSTALLED_APPS[2:]
HERE = os.path.dirname(__file__)
class AppsTests(SimpleTestCase):
def test_singleton_master(self):
"""
Only one master registry can exist.
"""
with self.assertRaises(RuntimeError):
Apps(installed_apps=None)
def test_ready(self):
"""
Tests the ready property of the master registry.
"""
# The master app registry is always ready when the tests run.
self.assertIs(apps.ready, True)
# Non-master app registries are populated in __init__.
self.assertIs(Apps().ready, True)
# The condition is set when apps are ready
self.assertIs(apps.ready_event.is_set(), True)
self.assertIs(Apps().ready_event.is_set(), True)
def test_bad_app_config(self):
"""
Tests when INSTALLED_APPS contains an incorrect app config.
"""
msg = "'apps.apps.BadConfig' must supply a name attribute."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=['apps.apps.BadConfig']):
pass
def test_not_an_app_config(self):
"""
Tests when INSTALLED_APPS contains a class that isn't an app config.
"""
msg = "'apps.apps.NotAConfig' isn't a subclass of AppConfig."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=['apps.apps.NotAConfig']):
pass
def test_no_such_app(self):
"""
Tests when INSTALLED_APPS contains an app that doesn't exist, either
directly or via an app config.
"""
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=['there is no such app']):
pass
msg = "Cannot import 'there is no such app'. Check that 'apps.apps.NoSuchApp.name' is correct."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=['apps.apps.NoSuchApp']):
pass
def test_no_such_app_config(self):
msg = "No module named 'apps.NoSuchConfig'"
with self.assertRaisesMessage(ImportError, msg):
with self.settings(INSTALLED_APPS=['apps.NoSuchConfig']):
pass
def test_no_such_app_config_with_choices(self):
msg = (
"'apps.apps' does not contain a class 'NoSuchConfig'. Choices are: "
"'BadConfig', 'MyAdmin', 'MyAuth', 'NoSuchApp', 'PlainAppsConfig', "
"'RelabeledAppsConfig'."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=['apps.apps.NoSuchConfig']):
pass
def test_default_app_config(self):
with self.settings(INSTALLED_APPS=['apps.default_config_app']):
config = apps.get_app_config('default_config_app')
self.assertIsInstance(config, CustomConfig)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_configs(self):
"""
Tests apps.get_app_configs().
"""
app_configs = apps.get_app_configs()
self.assertEqual([app_config.name for app_config in app_configs], SOME_INSTALLED_APPS_NAMES)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_config(self):
"""
Tests apps.get_app_config().
"""
app_config = apps.get_app_config('admin')
self.assertEqual(app_config.name, 'django.contrib.admin')
app_config = apps.get_app_config('staticfiles')
self.assertEqual(app_config.name, 'django.contrib.staticfiles')
with self.assertRaises(LookupError):
apps.get_app_config('admindocs')
msg = "No installed app with label 'django.contrib.auth'. Did you mean 'myauth'"
with self.assertRaisesMessage(LookupError, msg):
apps.get_app_config('django.contrib.auth')
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_is_installed(self):
"""
Tests apps.is_installed().
"""
self.assertIs(apps.is_installed('django.contrib.admin'), True)
self.assertIs(apps.is_installed('django.contrib.auth'), True)
self.assertIs(apps.is_installed('django.contrib.staticfiles'), True)
self.assertIs(apps.is_installed('django.contrib.admindocs'), False)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_model(self):
"""
Tests apps.get_model().
"""
self.assertEqual(apps.get_model('admin', 'LogEntry'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('admin', 'LogExit')
# App label is case-sensitive, Model name is case-insensitive.
self.assertEqual(apps.get_model('admin', 'loGentrY'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('Admin', 'LogEntry')
# A single argument is accepted.
self.assertEqual(apps.get_model('admin.LogEntry'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('admin.LogExit')
with self.assertRaises(ValueError):
apps.get_model('admin_LogEntry')
@override_settings(INSTALLED_APPS=['apps.apps.RelabeledAppsConfig'])
def test_relabeling(self):
self.assertEqual(apps.get_app_config('relabeled').name, 'apps')
def test_duplicate_labels(self):
with self.assertRaisesMessage(ImproperlyConfigured, "Application labels aren't unique"):
with self.settings(INSTALLED_APPS=['apps.apps.PlainAppsConfig', 'apps']):
pass
def test_duplicate_names(self):
with self.assertRaisesMessage(ImproperlyConfigured, "Application names aren't unique"):
with self.settings(INSTALLED_APPS=['apps.apps.RelabeledAppsConfig', 'apps']):
pass
def test_import_exception_is_not_masked(self):
"""
App discovery should preserve stack traces. Regression test for #22920.
"""
with self.assertRaisesMessage(ImportError, "Oops"):
with self.settings(INSTALLED_APPS=['import_error_package']):
pass
def test_models_py(self):
"""
The models in the models.py file were loaded correctly.
"""
self.assertEqual(apps.get_model("apps", "TotallyNormal"), TotallyNormal)
with self.assertRaises(LookupError):
apps.get_model("apps", "SoAlternative")
with self.assertRaises(LookupError):
new_apps.get_model("apps", "TotallyNormal")
self.assertEqual(new_apps.get_model("apps", "SoAlternative"), SoAlternative)
def test_models_not_loaded(self):
"""
apps.get_models() raises an exception if apps.models_ready isn't True.
"""
apps.models_ready = False
try:
# The cache must be cleared to trigger the exception.
apps.get_models.cache_clear()
with self.assertRaisesMessage(AppRegistryNotReady, "Models aren't loaded yet."):
apps.get_models()
finally:
apps.models_ready = True
def test_dynamic_load(self):
"""
Makes a new model at runtime and ensures it goes into the right place.
"""
old_models = list(apps.get_app_config("apps").get_models())
# Construct a new model in a new app registry
body = {}
new_apps = Apps(["apps"])
meta_contents = {
'app_label': "apps",
'apps': new_apps,
}
meta = type("Meta", (), meta_contents)
body['Meta'] = meta
body['__module__'] = TotallyNormal.__module__
temp_model = type("SouthPonies", (models.Model,), body)
# Make sure it appeared in the right place!
self.assertEqual(list(apps.get_app_config("apps").get_models()), old_models)
with self.assertRaises(LookupError):
apps.get_model("apps", "SouthPonies")
self.assertEqual(new_apps.get_model("apps", "SouthPonies"), temp_model)
def test_model_clash(self):
"""
Test for behavior when two models clash in the app registry.
"""
new_apps = Apps(["apps"])
meta_contents = {
'app_label': "apps",
'apps': new_apps,
}
body = {}
body['Meta'] = type("Meta", (), meta_contents)
body['__module__'] = TotallyNormal.__module__
type("SouthPonies", (models.Model,), body)
# When __name__ and __module__ match we assume the module
# was reloaded and issue a warning. This use-case is
# useful for REPL. Refs #23621.
body = {}
body['Meta'] = type("Meta", (), meta_contents)
body['__module__'] = TotallyNormal.__module__
msg = (
"Model 'apps.southponies' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models."
)
with self.assertRaisesMessage(RuntimeWarning, msg):
type("SouthPonies", (models.Model,), body)
# If it doesn't appear to be a reloaded module then we expect
# a RuntimeError.
body = {}
body['Meta'] = type("Meta", (), meta_contents)
body['__module__'] = TotallyNormal.__module__ + '.whatever'
with self.assertRaisesMessage(RuntimeError, "Conflicting 'southponies' models in application 'apps':"):
type("SouthPonies", (models.Model,), body)
def test_get_containing_app_config_apps_not_ready(self):
"""
apps.get_containing_app_config() should raise an exception if
apps.apps_ready isn't True.
"""
apps.apps_ready = False
try:
with self.assertRaisesMessage(AppRegistryNotReady, "Apps aren't loaded yet"):
apps.get_containing_app_config('foo')
finally:
apps.apps_ready = True
@isolate_apps('apps', kwarg_name='apps')
def test_lazy_model_operation(self, apps):
"""
Tests apps.lazy_model_operation().
"""
model_classes = []
initial_pending = set(apps._pending_operations)
def test_func(*models):
model_classes[:] = models
class LazyA(models.Model):
pass
# Test models appearing twice, and models appearing consecutively
model_keys = [('apps', model_name) for model_name in ['lazya', 'lazyb', 'lazyb', 'lazyc', 'lazya']]
apps.lazy_model_operation(test_func, *model_keys)
# LazyModelA shouldn't be waited on since it's already registered,
# and LazyModelC shouldn't be waited on until LazyModelB exists.
self.assertEqual(set(apps._pending_operations) - initial_pending, {('apps', 'lazyb')})
# Multiple operations can wait on the same model
apps.lazy_model_operation(test_func, ('apps', 'lazyb'))
class LazyB(models.Model):
pass
self.assertEqual(model_classes, [LazyB])
# Now we are just waiting on LazyModelC.
self.assertEqual(set(apps._pending_operations) - initial_pending, {('apps', 'lazyc')})
class LazyC(models.Model):
pass
# Everything should be loaded - make sure the callback was executed properly.
self.assertEqual(model_classes, [LazyA, LazyB, LazyB, LazyC, LazyA])
class Stub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AppConfigTests(SimpleTestCase):
"""Unit tests for AppConfig class."""
def test_path_set_explicitly(self):
"""If subclass sets path as class attr, no module attributes needed."""
class MyAppConfig(AppConfig):
path = 'foo'
ac = MyAppConfig('label', Stub())
self.assertEqual(ac.path, 'foo')
def test_explicit_path_overrides(self):
"""If path set as class attr, overrides __path__ and __file__."""
class MyAppConfig(AppConfig):
path = 'foo'
ac = MyAppConfig('label', Stub(__path__=['a'], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'foo')
def test_dunder_path(self):
"""If single element in __path__, use it (in preference to __file__)."""
ac = AppConfig('label', Stub(__path__=['a'], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'a')
def test_no_dunder_path_fallback_to_dunder_file(self):
"""If there is no __path__ attr, use __file__."""
ac = AppConfig('label', Stub(__file__='b/__init__.py'))
self.assertEqual(ac.path, 'b')
def test_empty_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is empty, use __file__ if set."""
ac = AppConfig('label', Stub(__path__=[], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'b')
def test_multiple_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is length>1, use __file__ if set."""
ac = AppConfig('label', Stub(__path__=['a', 'b'], __file__='c/__init__.py'))
self.assertEqual(ac.path, 'c')
def test_no_dunder_path_or_dunder_file(self):
"""If there is no __path__ or __file__, raise ImproperlyConfigured."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub())
def test_empty_dunder_path_no_dunder_file(self):
"""If the __path__ attr is empty and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub(__path__=[]))
def test_multiple_dunder_path_no_dunder_file(self):
"""If the __path__ attr is length>1 and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub(__path__=['a', 'b']))
def test_duplicate_dunder_path_no_dunder_file(self):
"""
If the __path__ attr contains duplicate paths and there is no
__file__, they duplicates should be deduplicated (#25246).
"""
ac = AppConfig('label', Stub(__path__=['a', 'a']))
self.assertEqual(ac.path, 'a')
def test_repr(self):
ac = AppConfig('label', Stub(__path__=['a']))
self.assertEqual(repr(ac), '<AppConfig: label>')
class NamespacePackageAppTests(SimpleTestCase):
# We need nsapp to be top-level so our multiple-paths tests can add another
# location for it (if its inside a normal package with an __init__.py that
# isn't possible). In order to avoid cluttering the already-full tests/ dir
# (which is on sys.path), we add these new entries to sys.path temporarily.
base_location = os.path.join(HERE, 'namespace_package_base')
other_location = os.path.join(HERE, 'namespace_package_other_base')
app_path = os.path.join(base_location, 'nsapp')
def test_single_path(self):
"""
A Py3.3+ namespace package can be an app if it has only one path.
"""
with extend_sys_path(self.base_location):
with self.settings(INSTALLED_APPS=['nsapp']):
app_config = apps.get_app_config('nsapp')
self.assertEqual(app_config.path, self.app_path)
def test_multiple_paths(self):
"""
A Py3.3+ namespace package with multiple locations cannot be an app.
(Because then we wouldn't know where to load its templates, static
assets, etc. from.)
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=['nsapp']):
pass
def test_multiple_paths_explicit_path(self):
"""
Multiple locations are ok only if app-config has explicit path.
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.settings(INSTALLED_APPS=['nsapp.apps.NSAppConfig']):
app_config = apps.get_app_config('nsapp')
self.assertEqual(app_config.path, self.app_path)
| bsd-3-clause | 3,829,302,539,916,321,000 | 38.247727 | 111 | 0.614338 | false |
thinker0/aurproxy | tellapart/aurproxy/source/sources/static.py | 1 | 4061 | # Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tellapart.aurproxy.config import SourceEndpoint, ShareEndpoint
from tellapart.aurproxy.exception import AurProxyConfigException
from tellapart.aurproxy.source import ProxySource
from tellapart.aurproxy.util import get_logger
logger = get_logger(__name__)
class StaticProxySource(ProxySource):
def __init__(self,
signal_update_fn=None,
share_adjuster_factories=None,
**kwargs):
super(StaticProxySource, self).__init__(signal_update_fn,
share_adjuster_factories)
self._name = kwargs.get('name')
self._host = kwargs.get('host')
self._port = kwargs.get('port')
self._endpoint = SourceEndpoint(self._host, self._port)
err_fmt = '"{0}" required on StaticProxySource'
if not self._name:
raise AurProxyConfigException(err_fmt.format('name'))
if not self._host:
raise AurProxyConfigException(err_fmt.format('host'))
if not self._port:
raise AurProxyConfigException(err_fmt.format('port'))
@property
def blueprint(self):
return None
@property
def slug(self):
return '{0}__{1}__{2}'.format(self._name,
self._host,
self._port)
def start(self):
self.add(self._endpoint)
def stop(self):
self.remove(self._endpoint)
class StaticListProxySource(ProxySource):
"""
ServerListProxy
"""
def __init__(self,
signal_update_fn=None,
share_adjuster_factories=None,
**kwargs):
"""
:param signal_update_fn:
:param share_adjuster_factories:
:param kwargs:
"""
super(StaticListProxySource, self).__init__(signal_update_fn,
share_adjuster_factories)
self._server_set = []
server_list = kwargs.get('server_list')
logger.info('ServerList: {0}'.format(server_list))
err_fmt = '"{0}" required on StaticListProxySource'
for idx, server_info in enumerate(server_list):
_host = server_info.get('host')
_port = server_info.get('port')
_share = server_info.get('share') if server_info.get('share') else 1.0
_context = {'source': "{0}.{1}.{2}.{3}.{4}".format(kwargs.get('cluster'),
kwargs.get('role'),
kwargs.get('environment'),
kwargs.get('job'),
idx)}
if not _host:
raise AurProxyConfigException(err_fmt.format('host'))
if not _port:
raise AurProxyConfigException(err_fmt.format('port'))
self._server_set.append(ShareEndpoint(_host, _port, _share, 1.0, _context))
if self._server_set.count == 0:
raise AurProxyConfigException(err_fmt.format('server_list'))
@property
def blueprint(self):
return None
@property
def slug(self):
slugs = []
for server_info in self._server_set:
slugs.append('{0}_{1}'.format(server_info.host, server_info.port))
return '__'.join(slugs)
def start(self):
for server in self._server_set:
logger.debug("Add ServerList: {0}:{1}".format(server.host, server.port))
self.add(server)
def stop(self):
for server in self._server_set:
logger.debug("Remove ServerList: {0}:{1}".format(server.host, server.port))
self.remove(server)
| apache-2.0 | -551,057,974,956,958,300 | 33.709402 | 83 | 0.602807 | false |
jaidevd/scikit-learn | examples/svm/plot_rbf_parameters.py | 26 | 8016 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause | -5,852,591,178,242,499,000 | 39.897959 | 80 | 0.70771 | false |
sajeeshcs/nested_quota_final | nova/spice/__init__.py | 72 | 1705 | #!/usr/bin/env python
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for SPICE Proxying."""
from oslo_config import cfg
spice_opts = [
cfg.StrOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help='Location of spice HTML5 console proxy, in the form '
'"http://127.0.0.1:6082/spice_auto.html"'),
cfg.StrOpt('server_listen',
default='127.0.0.1',
help='IP address on which instance spice server should listen'),
cfg.StrOpt('server_proxyclient_address',
default='127.0.0.1',
help='The address to which proxy clients '
'(like nova-spicehtml5proxy) should connect'),
cfg.BoolOpt('enabled',
default=False,
help='Enable spice related features'),
cfg.BoolOpt('agent_enabled',
default=True,
help='Enable spice guest agent support'),
cfg.StrOpt('keymap',
default='en-us',
help='Keymap for spice'),
]
CONF = cfg.CONF
CONF.register_opts(spice_opts, group='spice')
| apache-2.0 | -1,871,231,745,509,334,000 | 36.888889 | 79 | 0.618182 | false |
erikdejonge/business-rules | tests/test_engine_logic.py | 4 | 7550 | from business_rules import engine
from business_rules.variables import BaseVariables
from business_rules.operators import StringType
from business_rules.actions import BaseActions
from mock import patch, MagicMock
from . import TestCase
class EngineTests(TestCase):
###
### Run
###
@patch.object(engine, 'run')
def test_run_all_some_rule_triggered(self, *args):
""" By default, does not stop on first triggered rule. Returns True if
any rule was triggered, otherwise False
"""
rule1 = {'conditions': 'condition1', 'actions': 'action name 1'}
rule2 = {'conditions': 'condition2', 'actions': 'action name 2'}
variables = BaseVariables()
actions = BaseActions()
def return_action1(rule, *args, **kwargs):
return rule['actions'] == 'action name 1'
engine.run.side_effect = return_action1
result = engine.run_all([rule1, rule2], variables, actions)
self.assertTrue(result)
self.assertEqual(engine.run.call_count, 2)
# switch order and try again
engine.run.reset_mock()
result = engine.run_all([rule2, rule1], variables, actions)
self.assertTrue(result)
self.assertEqual(engine.run.call_count, 2)
@patch.object(engine, 'run', return_value=True)
def test_run_all_stop_on_first(self, *args):
rule1 = {'conditions': 'condition1', 'actions': 'action name 1'}
rule2 = {'conditions': 'condition2', 'actions': 'action name 2'}
variables = BaseVariables()
actions = BaseActions()
result = engine.run_all([rule1, rule2], variables, actions,
stop_on_first_trigger=True)
self.assertEqual(result, True)
self.assertEqual(engine.run.call_count, 1)
engine.run.assert_called_once_with(rule1, variables, actions)
@patch.object(engine, 'check_conditions_recursively', return_value=True)
@patch.object(engine, 'do_actions')
def test_run_that_triggers_rule(self, *args):
rule = {'conditions': 'blah', 'actions': 'blah2'}
variables = BaseVariables()
actions = BaseActions()
result = engine.run(rule, variables, actions)
self.assertEqual(result, True)
engine.check_conditions_recursively.assert_called_once_with(
rule['conditions'], variables)
engine.do_actions.assert_called_once_with(rule['actions'], actions)
@patch.object(engine, 'check_conditions_recursively', return_value=False)
@patch.object(engine, 'do_actions')
def test_run_that_doesnt_trigger_rule(self, *args):
rule = {'conditions': 'blah', 'actions': 'blah2'}
variables = BaseVariables()
actions = BaseActions()
result = engine.run(rule, variables, actions)
self.assertEqual(result, False)
engine.check_conditions_recursively.assert_called_once_with(
rule['conditions'], variables)
self.assertEqual(engine.do_actions.call_count, 0)
@patch.object(engine, 'check_condition', return_value=True)
def test_check_all_conditions_with_all_true(self, *args):
conditions = {'all': [{'thing1': ''}, {'thing2': ''}]}
variables = BaseVariables()
result = engine.check_conditions_recursively(conditions, variables)
self.assertEqual(result, True)
# assert call count and most recent call are as expected
self.assertEqual(engine.check_condition.call_count, 2)
engine.check_condition.assert_called_with({'thing2': ''}, variables)
###
### Check conditions
###
@patch.object(engine, 'check_condition', return_value=False)
def test_check_all_conditions_with_all_false(self, *args):
conditions = {'all': [{'thing1': ''}, {'thing2': ''}]}
variables = BaseVariables()
result = engine.check_conditions_recursively(conditions, variables)
self.assertEqual(result, False)
engine.check_condition.assert_called_once_with({'thing1': ''}, variables)
def test_check_all_condition_with_no_items_fails(self):
with self.assertRaises(AssertionError):
engine.check_conditions_recursively({'all': []}, BaseVariables())
@patch.object(engine, 'check_condition', return_value=True)
def test_check_any_conditions_with_all_true(self, *args):
conditions = {'any': [{'thing1': ''}, {'thing2': ''}]}
variables = BaseVariables()
result = engine.check_conditions_recursively(conditions, variables)
self.assertEqual(result, True)
engine.check_condition.assert_called_once_with({'thing1': ''}, variables)
@patch.object(engine, 'check_condition', return_value=False)
def test_check_any_conditions_with_all_false(self, *args):
conditions = {'any': [{'thing1': ''}, {'thing2': ''}]}
variables = BaseVariables()
result = engine.check_conditions_recursively(conditions, variables)
self.assertEqual(result, False)
# assert call count and most recent call are as expected
self.assertEqual(engine.check_condition.call_count, 2)
engine.check_condition.assert_called_with({'thing2': ''}, variables)
def test_check_any_condition_with_no_items_fails(self):
with self.assertRaises(AssertionError):
engine.check_conditions_recursively({'any': []}, BaseVariables())
def test_check_all_and_any_together(self):
conditions = {'any': [], 'all': []}
variables = BaseVariables()
with self.assertRaises(AssertionError):
engine.check_conditions_recursively(conditions, variables)
@patch.object(engine, 'check_condition')
def test_nested_all_and_any(self, *args):
conditions = {'all': [
{'any': [{'name': 1}, {'name': 2}]},
{'name': 3}]}
bv = BaseVariables()
def side_effect(condition, _):
return condition['name'] in [2,3]
engine.check_condition.side_effect = side_effect
engine.check_conditions_recursively(conditions, bv)
self.assertEqual(engine.check_condition.call_count, 3)
engine.check_condition.assert_any_call({'name': 1}, bv)
engine.check_condition.assert_any_call({'name': 2}, bv)
engine.check_condition.assert_any_call({'name': 3}, bv)
###
### Operator comparisons
###
def test_check_operator_comparison(self):
string_type = StringType('yo yo')
with patch.object(string_type, 'contains', return_value=True):
result = engine._do_operator_comparison(
string_type, 'contains', 'its mocked')
self.assertTrue(result)
string_type.contains.assert_called_once_with('its mocked')
###
### Actions
###
def test_do_actions(self):
actions = [ {'name': 'action1'},
{'name': 'action2',
'params': {'param1': 'foo', 'param2': 10}}]
defined_actions = BaseActions()
defined_actions.action1 = MagicMock()
defined_actions.action2 = MagicMock()
engine.do_actions(actions, defined_actions)
defined_actions.action1.assert_called_once_with()
defined_actions.action2.assert_called_once_with(param1='foo', param2=10)
def test_do_with_invalid_action(self):
actions = [{'name': 'fakeone'}]
err_string = "Action fakeone is not defined in class BaseActions"
with self.assertRaisesRegexp(AssertionError, err_string):
engine.do_actions(actions, BaseActions())
| mit | -1,828,487,822,967,401,700 | 37.717949 | 81 | 0.631788 | false |
llvm-mirror/lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/refpointer-recursion/TestDataFormatterRefPtrRecursion.py | 5 | 1564 | """
Test that ValueObjectPrinter does not cause an infinite loop when a reference to a struct that contains a pointer to itself is printed.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class DataFormatterRefPtrRecursionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_with_run_command(self):
"""Test that ValueObjectPrinter does not cause an infinite loop when a reference to a struct that contains a pointer to itself is printed."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("frame variable foo", substrs=[])
self.expect("frame variable foo --ptr-depth=1", substrs=['ID = 1'])
self.expect("frame variable foo --ptr-depth=2", substrs=['ID = 1'])
self.expect("frame variable foo --ptr-depth=3", substrs=['ID = 1'])
| apache-2.0 | -718,413,751,005,785,300 | 37.146341 | 149 | 0.648338 | false |
howie6879/novels-search | owllook/spiders/zongheng_novel_info.py | 2 | 3729 | # -*- coding:utf-8 -*-
# !/usr/bin/env python
import asyncio
import time
from pprint import pprint
from ruia import Spider, Item, TextField, AttrField
from ruia_ua import middleware as ua_middleware
from owllook.database.mongodb import MotorBase
from owllook.spiders.middlewares import owl_middleware
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
class ZHNovelInfoItem(Item):
"""
定义继承自item的Item类
"""
novel_name = TextField(css_select='div.main div.status h1 a')
author = TextField(css_select='div.main div.status div.booksub a')
# 当提取的值是属性的时候,要定义AttrField
cover = AttrField(css_select='div.main div.book_cover img', attr='src')
abstract = TextField(css_select='div.main div.status div.info_con p')
status = AttrField(css_select='div.main div.status h1 em', attr='title')
novels_type = TextField(css_select='div.main div.status div.booksub a')
novel_chapter_url = AttrField(css_select='div.main div.status div.book_btn span.list a', attr='href')
async def clean_author(self, author):
if isinstance(author, list):
return author[0].text
else:
return author
async def clean_status(self, status):
"""
当目标值的对象只有一个,默认将值提取出来,否则返回list,可以在这里定义一个函数进行循环提取
:param ele_tag:
:return:
"""
if isinstance(status, list):
return '#'.join([i.get('title').strip().replace('作品', '') for i in status])
else:
return status
async def clean_novels_type(self, novels_type):
if isinstance(novels_type, list):
try:
return novels_type[1].text
except:
return ''
else:
return ''
class ZHNovelInfoSpider(Spider):
start_urls = []
request_config = {
'RETRIES': 3,
'DELAY': 2,
'TIMEOUT': 10
}
motor_db = MotorBase(loop=loop).get_db()
async def parse(self, res):
item = await ZHNovelInfoItem.get_item(html=res.html)
item_data = {
'novel_name': item.novel_name,
'author': item.author,
'cover': item.cover,
'abstract': item.abstract,
'status': item.status,
'novels_type': item.novels_type,
'novel_chapter_url': item.novel_chapter_url,
'target_url': res.url,
'spider': 'zongheng',
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime()),
}
print('获取 {} 小说信息成功'.format(item_data['novel_name']))
print(item_data)
await self.motor_db.all_novels_info.update_one(
{'novel_name': item_data['novel_name'], 'spider': 'zongheng'},
{'$set': item_data},
upsert=True)
if __name__ == '__main__':
import random
# 其他多item示例:https://gist.github.com/howie6879/3ef4168159e5047d42d86cb7fb706a2f
ZHNovelInfoSpider.start_urls = ['http://book.zongheng.com/book/672340.html']
ZHNovelInfoSpider.start(middleware=[ua_middleware, owl_middleware])
# def all_novels_info():
# all_urls = []
#
# for each in ZHNovelInfoSpider.all_novels_col.find({'spider': 'zongheng'}):
# if 'zongheng' in each['novel_url']:
# all_urls.append(each['novel_url'])
# random.shuffle(all_urls)
#
# ZHNovelInfoSpider.start_urls = all_urls
# ZHNovelInfoSpider.start()
#
#
# all_novels_info()
| apache-2.0 | -3,188,035,711,750,168,600 | 29.228814 | 105 | 0.599383 | false |
jelmer/samba | buildtools/wafsamba/samba_headers.py | 11 | 6588 | # specialist handling of header files for Samba
import os, re, sys, fnmatch
import Build, Logs, Utils
from samba_utils import TO_LIST, os_path_relpath
def header_install_path(header, header_path):
'''find the installation path for a header, given a header_path option'''
if not header_path:
return ''
if not isinstance(header_path, list):
return header_path
for (p1, dir) in header_path:
for p2 in TO_LIST(p1):
if fnmatch.fnmatch(header, p2):
return dir
# default to current path
return ''
re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M)
# a dictionary mapping source header paths to public header paths
header_map = {}
def find_suggested_header(hpath):
'''find a suggested header path to use'''
base = os.path.basename(hpath)
ret = []
for h in header_map:
if os.path.basename(h) == base:
ret.append('<%s>' % header_map[h])
ret.append('"%s"' % h)
return ret
def create_public_header(task):
'''create a public header from a private one, output within the build tree'''
src = task.inputs[0].abspath(task.env)
tgt = task.outputs[0].bldpath(task.env)
if os.path.exists(tgt):
os.unlink(tgt)
relsrc = os_path_relpath(src, task.env.TOPDIR)
infile = open(src, mode='r')
outfile = open(tgt, mode='w')
linenumber = 0
search_paths = [ '', task.env.RELPATH ]
for i in task.env.EXTRA_INCLUDES:
if i.startswith('#'):
search_paths.append(i[1:])
for line in infile:
linenumber += 1
# allow some straight substitutions
if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace:
outfile.write(task.env.public_headers_replace[line.strip()] + '\n')
continue
# see if its an include line
m = re_header.match(line)
if m is None:
outfile.write(line)
continue
# its an include, get the header path
hpath = m.group(1)
if hpath.startswith("bin/default/"):
hpath = hpath[12:]
# some are always allowed
if task.env.public_headers_skip and hpath in task.env.public_headers_skip:
outfile.write(line)
continue
# work out the header this refers to
found = False
for s in search_paths:
p = os.path.normpath(os.path.join(s, hpath))
if p in header_map:
outfile.write("#include <%s>\n" % header_map[p])
found = True
break
if found:
continue
if task.env.public_headers_allow_broken:
Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc))
outfile.write(line)
continue
# try to be nice to the developer by suggesting an alternative
suggested = find_suggested_header(hpath)
outfile.close()
os.unlink(tgt)
sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % (
os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested))
raise Utils.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % (
hpath, relsrc, task.env.RELPATH))
infile.close()
outfile.close()
def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True):
'''install some headers - simple version, no munging needed
'''
if not public_headers_install:
return
for h in TO_LIST(public_headers):
inst_path = header_install_path(h, header_path)
if h.find(':') != -1:
s = h.split(":")
h_name = s[0]
inst_name = s[1]
else:
h_name = h
inst_name = os.path.basename(h)
bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name)
def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True):
'''install some headers
header_path may either be a string that is added to the INCLUDEDIR,
or it can be a dictionary of wildcard patterns which map to destination
directories relative to INCLUDEDIR
'''
bld.SET_BUILD_GROUP('final')
if not bld.env.build_public_headers:
# in this case no header munging neeeded. Used for tdb, talloc etc
public_headers_simple(bld, public_headers, header_path=header_path,
public_headers_install=public_headers_install)
return
# create the public header in the given path
# in the build tree
for h in TO_LIST(public_headers):
inst_path = header_install_path(h, header_path)
if h.find(':') != -1:
s = h.split(":")
h_name = s[0]
inst_name = s[1]
else:
h_name = h
inst_name = os.path.basename(h)
relpath1 = os_path_relpath(bld.srcnode.abspath(), bld.curdir)
relpath2 = os_path_relpath(bld.curdir, bld.srcnode.abspath())
targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path))
if not os.path.exists(os.path.join(bld.curdir, targetdir)):
raise Utils.WafError("missing source directory %s for public header %s" % (targetdir, inst_name))
target = os.path.join(targetdir, inst_name)
# the source path of the header, relative to the top of the source tree
src_path = os.path.normpath(os.path.join(relpath2, h_name))
# the install path of the header, relative to the public include directory
target_path = os.path.normpath(os.path.join(inst_path, inst_name))
header_map[src_path] = target_path
t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name),
group='headers',
rule=create_public_header,
source=h_name,
target=target)
t.env.RELPATH = relpath2
t.env.TOPDIR = bld.srcnode.abspath()
if not bld.env.public_headers_list:
bld.env.public_headers_list = []
bld.env.public_headers_list.append(os.path.join(inst_path, inst_name))
if public_headers_install:
bld.INSTALL_FILES('${INCLUDEDIR}',
target,
destname=os.path.join(inst_path, inst_name), flat=True)
Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS
| gpl-3.0 | -7,714,147,704,137,984,000 | 35.6 | 109 | 0.589405 | false |
scitran/data | scitran/data/test/test_data.py | 2 | 4582 | # @Author: Kevin S Hahn
"""Test nimsdata package."""
import os
import glob
import numpy as np
from nose.plugins.attrib import attr
from numpy.testing.decorators import skipif
from nose.tools import ok_, eq_, raises, assert_raises
import scitran.data as scidata
import scitran.data.tempdir as tempfile
# data is stored separately in nimsdata_testdata
# located at the top level of the testing directory
DATADIR = os.path.join(os.path.dirname(__file__), 'testdata')
if not os.path.isdir(DATADIR):
DATADIR = None
# check that all types definied in MODULES.json can
# be accessed via <tier>_properties_by_type_list
# these tests serve two purposes
# 1. test the top level function to get various propreties by type list
# 2. test that each module defined in modules.json have properties define
# makes use of dict_merge and module_by_type
class test_properties_by_type_list(object):
def setUp(self):
types = scidata.data.MODULES
self.type_list = []
for t in types:
if '.' in t:
type_tuple = t.split('.')
else:
type_tuple = (t, None)
self.type_list.append(type_tuple)
def test_fail_module_by_type(self):
assert_raises(scidata.DataError, scidata.data.module_by_type, ('fake', 'type'))
def test_fail_dict_merge(self):
assert_raises(scidata.DataError, scidata.dict_merge, [], [])
def test_acquisition(self):
ok_(scidata.acquisition_properties_by_type_list(self.type_list))
def test_session(self):
ok_(scidata.session_properties_by_type_list(self.type_list))
def test_project(self):
ok_(scidata.project_properties_by_type_list(self.type_list))
# Test the parse interface, without getting into any of the parsers
# make sure errors are being raised in the right places
# makes use of _get_handler, and _parse_dataset
class test_parse(object):
@skipif(not DATADIR)
def setUp(self):
pass
def test_get_handler(self):
READERS = scidata.data.READERS
assert_raises(scidata.DataError, scidata.get_handler, 'fake.fake.fake', READERS) # doesn't exost
assert_raises(scidata.DataError, scidata.get_handler, 'fake.fake', READERS) # FIXME
assert_raises(scidata.DataError, scidata.get_handler, 'fake', READERS) # FIXME
@skipif(not DATADIR)
def test_input_not_found(self):
assert_raises(scidata.DataError, scidata.parse, './fake.tgz')
@skipif(not DATADIR)
def test_ignore_json_without_filetype(self):
assert_raises(scidata.DataError, scidata.parse, './fake.tgz', ignore_json=True)
@skipif(not DATADIR)
def test_no_json_in_tgz(self):
assert_raises(scidata.DataError, scidata.parse, './nojson.tgz')
@skipif(not DATADIR)
def test_invalid_input(self):
assert_raises(scidata.DataError, scidata.parse, './')
assert_raises(scidata.DataError, scidata.parse, __file__)
class test_all_readers(object):
@skipif(not DATADIR)
def setUp(self):
self.readers = scidata.data.READERS
@skipif(not DATADIR)
def all_readers(object):
# good test data must follow specific naming convention
for filetype in self.readers.iterkeys():
parser = scidata.get_handler(filetype)
# test the write interface, without actually getting into any of the writers
class test_write(object):
@skipif(not DATADIR)
def setUp(self):
testdata = os.path.join(DATADIR, 'ge_dcm_mr_localizer.tgz')
self.ds = scidata.parse(testdata, load_data=True)
@skipif(not DATADIR)
def test_nifti_write(self):
with tempfile.TemporaryDirectory() as tempdir:
outbase = os.path.join(tempdir, 'trashme')
scidata.write(self.ds, self.ds.data, filetype='nifti', outbase=outbase)
print glob.glob(outbase + '*')
assert (len(glob.glob(outbase + '*')) >= 1)
@skipif(not DATADIR)
def test_no_filetype(self):
assert_raises(scidata.DataError, scidata.write, self.ds, self.ds.data, filetype=None, outbase='trashme')
@skipif(not DATADIR)
def test_empty_meta(self):
eq_(scidata.write(None, self.ds.data, filetype='nifti', outbase='trashme'), [])
@skipif(not DATADIR)
def test_empty_data(self):
eq_(scidata.write(self.ds, None, filetype='nifti', outbase='trashme'), [])
# how to write tests for the abstract classes NIMSReader and NIMSWriter
# they are non instantiable, and have no class methods that can be tested
# XXX. i'm not sure what the best approcah is.
| mit | -5,609,635,501,689,800,000 | 33.977099 | 112 | 0.672196 | false |
davebridges/Lab-Website | communication/feeds.py | 2 | 2184 | '''This package controls syndication for the :mod:communication, only for :class:`~communication.models.Post` objects.
The available feeds are as follows:
+--------------------+---------------------------+---------------------------------------------+
| Feed | Feed Location | Feed Class |
+====================+===========================+=============================================+
| Posts | /feeds/posts | :class:`~commentary.feeds.PostssFeed |
+--------------------+---------------------------+---------------------------------------------+
A main page describing all feeds is available at **/feeds**.
'''
from datetime import datetime, time
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from communication.models import Post
class PostsFeed(Feed):
'''This class defines the feed for posts.'''
title = "Posts by the %s" % settings.LAB_NAME
link = "/feeds/posts"
description = "Blog posts, about papers or interesting topics to our group."
def items(self):
'''The items returned by this feed are all Post objects.'''
return Post.objects.all()
def item_title(self, item):
'''The title of each item will be the unicode representation'''
return item.__unicode__()
def item_author_name(self, item):
'''The author of the item.'''
return item.author
def item_author_link(self, item):
'''The link to the author's page.'''
return item.author.get_absolute_url()
def item_pubdate(self, item):
'''The date of publication of this commentary, not the modification date.'''
return datetime.combine(item.created, time())
def item_updateddate(self, item):
'''The date when this commentary was updated.'''
return datetime.combine(item.modified, time())
def item_copyright(self, item):
'''The copyright is always CC-BY for posts.'''
return "%s by %s is licensed under a Creative Commons Attribution 4.0 Unported License." %(item, item.author)
| mit | 1,394,260,057,340,244,700 | 37.315789 | 118 | 0.556319 | false |
znick/anytask | anytask/search/views.py | 1 | 7149 | # -*- coding: utf-8 -*-
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render
from haystack.query import SearchQuerySet
from courses.models import Course
from schools.models import School
from users.models import UserProfile
@login_required()
def search_page(request):
user = request.user
query = request.GET.get('q', '')
context = {
'user': request.user,
'user_is_teacher': True if Course.objects.filter(teachers=user).count() else False,
'query': query,
'user_profiles': search_users(query, user)[1],
'courses': search_courses(query, user)[1],
}
return render(request, 'search.html', context)
@login_required()
def ajax_search_users(request):
if 'q' not in request.GET:
return HttpResponseForbidden()
if 'max' in request.GET:
max_result = int(request.GET["max"])
else:
max_result = 3
result, _ = search_users(request.GET.get('q', ''), request.user, max_result + 1)
return HttpResponse(json.dumps({'result': result[:max_result],
'is_limited': True if len(result) > max_result else False}),
content_type='application/json')
@login_required()
def ajax_search_courses(request):
if 'q' not in request.GET:
return HttpResponseForbidden()
if 'max' in request.GET:
max_result = int(request.GET["max"])
else:
max_result = 3
result, _ = search_courses(request.GET.get('q', ''), request.user, max_result + 1)
return HttpResponse(json.dumps({'result': result[:max_result],
'is_limited': True if len(result) > max_result else False}),
content_type='application/json')
def search_users(query, user, max_result=None):
result = []
result_objs = []
if query:
user_is_staff = user.is_staff
user_is_teacher = None
if not user_is_staff:
user_is_teacher = True if Course.objects.filter(teachers=user).count() else False
sgs = SearchQuerySet().models(UserProfile).exclude(user_id=user.id)
sgs_fullname = sgs.autocomplete(fullname_auto=query)
sgs_login = sgs.autocomplete(login_auto=query)
if user_is_staff or user_is_teacher:
sgs_ya_contest_login = sgs.autocomplete(ya_contest_login_auto=query)
sgs_ya_passport_email = sgs.autocomplete(ya_passport_email_auto=query)
sgs_email = sgs.autocomplete(email_auto=query)
else:
sgs_ya_contest_login = sgs.none()
sgs_ya_passport_email = sgs.none()
sgs_email = sgs.none()
sgs = sgs_fullname | sgs_login | sgs_ya_contest_login | sgs_ya_passport_email | sgs_email
if not user_is_staff:
groups = user.group_set.all()
courses = Course.objects.filter(groups__in=groups)
schools = School.objects.filter(courses__in=courses)
courses_teacher = Course.objects.filter(teachers=user)
schools_teacher = School.objects.filter(courses__in=courses_teacher)
for sg in sgs:
user_to_show = sg.object.user
groups_user_to_show = user_to_show.group_set.all()
courses_user_to_show = Course.objects.filter(groups__in=groups_user_to_show)
schools_user_to_show = School.objects.filter(courses__in=courses_user_to_show)
courses_user_to_show_teacher = Course.objects.filter(teachers=user_to_show)
schools_user_to_show_teacher = School.objects.filter(courses__in=courses_user_to_show_teacher)
user_school_user_to_show = False
if (schools_user_to_show | schools_user_to_show_teacher) & (schools | schools_teacher):
user_school_user_to_show = True
if not user_school_user_to_show:
continue
user_to_show_teach_user = False
if courses_user_to_show_teacher & courses:
user_to_show_teach_user = True
user_teach_user_to_show = False
if courses_teacher & courses_user_to_show:
user_teach_user_to_show = True
show_email = \
sg.object.show_email or \
user_teach_user_to_show or \
user_to_show_teach_user
result.append({
"fullname": user_to_show.get_full_name(),
"username": user_to_show.username,
"ya_contest_login": sg.object.ya_contest_login if user_is_teacher else '',
"url": user_to_show.get_absolute_url(),
"avatar": sg.object.avatar.url if sg.object.avatar else '',
"email": user_to_show.email if show_email else '',
"ya_passport_email": sg.object.ya_passport_email if show_email else '',
"id": user_to_show.id,
"statuses": list(sg.object.user_status.values_list('name', 'color'))
})
result_objs.append(sg.object)
if len(result) == max_result:
break
else:
for sg in sgs[:max_result]:
result.append({
"fullname": sg.object.user.get_full_name(),
"username": sg.object.user.username,
"ya_contest_login": sg.object.ya_contest_login,
"url": sg.object.user.get_absolute_url(),
"avatar": sg.object.avatar.url if sg.object.avatar else '',
"email": sg.object.user.email,
"ya_passport_email": sg.object.ya_passport_email,
"id": sg.object.user.id,
"statuses": list(sg.object.user_status.values_list('name', 'color'))
})
result_objs.append(sg.object)
return result, result_objs
def search_courses(query, user, max_result=None):
result = []
result_objs = []
if query:
user_is_staff = user.is_staff
sgs_name = SearchQuerySet().models(Course).order_by('-is_active')
if not user_is_staff:
groups = user.group_set.all()
courses_ids = (Course.objects.filter(groups__in=groups) | Course.objects.filter(teachers=user)) \
.values_list('id', flat=True)
sgs_name = sgs_name.filter(course_id__in=courses_ids).autocomplete(name_auto=query)
else:
sgs_name = sgs_name.autocomplete(name_auto=query)
for sg in sgs_name[:max_result]:
result.append({
'name': unicode(sg.object.name),
'year': unicode(sg.object.year),
'url': sg.object.get_absolute_url(),
'schools': [sch.name for sch in sg.object.school_set.all()],
'is_active': sg.object.is_active
})
result_objs.append(sg.object)
return result, result_objs
| mit | -7,859,395,769,213,018,000 | 37.435484 | 110 | 0.567492 | false |
mirkobronzi/ml-code | ml_code/tensorflow/models.py | 1 | 2072 | """
script containing some of the most used models.
"""
import tensorflow as tf
class Linear:
def __init__(self, input_dim, output_dim, l2_reg=None, description=None):
self.input_dim = input_dim
self.output_dim = output_dim
self.l2_reg = l2_reg
self.description = 'Linear (l2_reg: {})'.format(l2_reg) if \
description is None else description
def build(self, x, y_):
W = tf.Variable(tf.truncated_normal([self.input_dim, self.output_dim]),
'W')
b = tf.Variable(tf.zeros([self.output_dim]), 'b')
y = tf.matmul(x, W) + b
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
if self.l2_reg is not None:
l2reg_loss = tf.nn.l2_loss(W)
loss += self.l2_reg * l2reg_loss
train_step = tf.train.AdamOptimizer().minimize(loss)
return y, loss, train_step
class MLP:
def __init__(self, input_dim, output_dim, hidden=512, l2_reg=None,
description=None):
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden = hidden
self.l2_reg = l2_reg
self.description = 'MLP (hidden: {}, l2_reg: {})'.format(
hidden, l2_reg) if description is None else description
def build(self, x, y_):
W1 = tf.Variable(tf.truncated_normal([self.input_dim, self.hidden]),
'W1')
b1 = tf.Variable(tf.zeros([self.hidden]), 'b1')
y1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = tf.Variable(tf.truncated_normal([self.hidden, self.output_dim]),
'W2')
b2 = tf.Variable(tf.zeros([self.output_dim]), 'b2')
y = tf.matmul(y1, W2) + b2
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
if self.l2_reg is not None:
l2reg_loss = tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2)
loss += self.l2_reg * l2reg_loss
train_step = tf.train.AdamOptimizer().minimize(loss)
return y, loss, train_step
| apache-2.0 | 5,692,412,360,373,031,000 | 33.533333 | 79 | 0.563224 | false |
7kbird/chrome | tools/telemetry/telemetry/core/platform/power_monitor/cros_power_monitor.py | 2 | 6206 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import re
from telemetry import decorators
from telemetry.core.platform import cros_sysfs_platform
from telemetry.core.platform.power_monitor import sysfs_power_monitor
class CrosPowerMonitor(sysfs_power_monitor.SysfsPowerMonitor):
"""PowerMonitor that relies on 'power_supply_info' to monitor power
consumption of a single ChromeOS application.
"""
def __init__(self, cri):
"""Constructor.
Args:
cri: Chrome interface.
Attributes:
_cri: The Chrome interface.
_initial_power: The result of 'power_supply_info' before the test.
_start_time: The epoch time at which the test starts executing.
"""
super(CrosPowerMonitor, self).__init__(
cros_sysfs_platform.CrosSysfsPlatform(cri))
self._cri = cri
self._initial_power = None
self._start_time = None
@decorators.Cache
def CanMonitorPower(self):
return super(CrosPowerMonitor, self).CanMonitorPower()
def StartMonitoringPower(self, browser):
super(CrosPowerMonitor, self).StartMonitoringPower(browser)
if self._IsOnBatteryPower():
sample = self._cri.RunCmdOnDevice(
['power_supply_info;', 'date', '+%s'])[0]
self._initial_power, self._start_time = CrosPowerMonitor.SplitSample(
sample)
def StopMonitoringPower(self):
cpu_stats = super(CrosPowerMonitor, self).StopMonitoringPower()
power_stats = {}
if self._IsOnBatteryPower():
sample = self._cri.RunCmdOnDevice(
['power_supply_info;', 'date', '+%s'])[0]
final_power, end_time = CrosPowerMonitor.SplitSample(sample)
# The length of the test is used to measure energy consumption.
length_h = (end_time - self._start_time) / 3600.0
power_stats = CrosPowerMonitor.ParsePower(self._initial_power,
final_power, length_h)
return CrosPowerMonitor.CombineResults(cpu_stats, power_stats)
@staticmethod
def SplitSample(sample):
"""Splits a power and time sample into the two separate values.
Args:
sample: The result of calling 'power_supply_info; date +%s' on the
device.
Returns:
A tuple of power sample and epoch time of the sample.
"""
sample = sample.strip()
index = sample.rfind('\n')
power = sample[:index]
time = sample[index + 1:]
return power, int(time)
@staticmethod
def IsOnBatteryPower(status, board):
"""Determines if the devices is being charged.
Args:
status: The parsed result of 'power_supply_info'
board: The name of the board running the test.
Returns:
True if the device is on battery power; False otherwise.
"""
on_battery = status['Line Power']['online'] == 'no'
# Butterfly can incorrectly report AC online for some time after unplug.
# Check battery discharge state to confirm.
if board == 'butterfly':
on_battery |= status['Battery']['state'] == 'Discharging'
return on_battery
def _IsOnBatteryPower(self):
"""Determines if the device is being charged.
Returns:
True if the device is on battery power; False otherwise.
"""
status = CrosPowerMonitor.ParsePowerSupplyInfo(
self._cri.RunCmdOnDevice(['power_supply_info'])[0])
board_data = self._cri.RunCmdOnDevice(['cat', '/etc/lsb-release'])[0]
board = re.search('BOARD=(.*)', board_data).group(1)
return CrosPowerMonitor.IsOnBatteryPower(status, board)
@staticmethod
def ParsePowerSupplyInfo(sample):
"""Parses 'power_supply_info' command output.
Args:
sample: The output of 'power_supply_info'
Returns:
Dictionary containing all fields from 'power_supply_info'
"""
rv = collections.defaultdict(dict)
dev = None
for ln in sample.splitlines():
result = re.findall(r'^Device:\s+(.*)', ln)
if result:
dev = result[0]
continue
result = re.findall(r'\s+(.+):\s+(.+)', ln)
if result and dev:
kname = re.findall(r'(.*)\s+\(\w+\)', result[0][0])
if kname:
rv[dev][kname[0]] = result[0][1]
else:
rv[dev][result[0][0]] = result[0][1]
return dict(rv)
@staticmethod
def ParsePower(initial_stats, final_stats, length_h):
"""Parse output of 'power_supply_info'
Args:
initial_stats: The output of 'power_supply_info' before the test.
final_stats: The output of 'power_supply_info' after the test.
length_h: The length of the test in hours.
Returns:
Dictionary in the format returned by StopMonitoringPower().
"""
out_dict = {'identifier': 'power_supply_info'}
component_utilization = {}
initial = CrosPowerMonitor.ParsePowerSupplyInfo(initial_stats)
final = CrosPowerMonitor.ParsePowerSupplyInfo(final_stats)
# The charge value reported by 'power_supply_info' is not precise enough to
# give meaningful results across shorter tests, so average energy rate and
# the length of the test are used.
initial_power_mw = float(initial['Battery']['energy rate']) * 10 ** 3
final_power_mw = float(final['Battery']['energy rate']) * 10 ** 3
average_power_mw = (initial_power_mw + final_power_mw) / 2.0
out_dict['power_samples_mw'] = [initial_power_mw, final_power_mw]
out_dict['energy_consumption_mwh'] = average_power_mw * length_h
# Duplicating CrOS battery fields where applicable.
battery = {}
battery['charge_full'] = float(final['Battery']['full charge'])
battery['charge_full_design'] = (
float(final['Battery']['full charge design']))
battery['charge_now'] = float(final['Battery']['charge'])
battery['current_now'] = float(final['Battery']['current'])
battery['energy'] = float(final['Battery']['energy'])
battery['energy_rate'] = float(final['Battery']['energy rate'])
battery['voltage_now'] = float(final['Battery']['voltage'])
component_utilization['battery'] = battery
out_dict['component_utilization'] = component_utilization
return out_dict
| bsd-3-clause | 3,648,558,497,403,812,400 | 35.940476 | 79 | 0.657106 | false |
SpectoLabs/hoverfly | docs/conf.py | 1 | 1924 | import sys
import os
import shlex
extensions = [
'sphinxcontrib.mermaid',
'sphinx.ext.extlinks',
'sphinx.ext.todo'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Hoverfly'
copyright = u'2017, SpectoLabs'
author = u'SpectoLabs'
version = 'v1.3.2'
# The full version, including alpha/beta/rc tags.
release = version
zip_base_url = 'https://github.com/SpectoLabs/hoverfly/releases/download/' + version + '/'
extlinks = {'zip_bundle_os_arch': (zip_base_url + 'hoverfly_bundle_%s.zip', 'zip_bundle_os_arch')}
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
if 'READTHEDOCS' not in os.environ:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css',
],
}
htmlhelp_basename = 'hoverflydoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
latex_documents = [
(master_doc, 'hoverfly.tex', u'Hoverfly Documentation',
u'SpectoLabs', 'manual'),
]
man_pages = [
(master_doc, 'Hoverfly', u'Hoverfly Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'Hoverfly', u'Hoverfly Documentation',
author, 'Hoverfly', 'API simulations for development and testing',
'Miscellaneous'),
]
| apache-2.0 | 7,942,276,931,572,694,000 | 21.635294 | 98 | 0.617983 | false |
askdaddy/PerfKitBenchmarker | perfkitbenchmarker/benchmarks/bonnie_benchmark.py | 6 | 8167 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs plain vanilla bonnie++."""
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'bonnie++',
'description': 'Runs Bonnie++. Running this benchmark inside '
'a container is currently not supported, '
'since Docker tries to run it as root, which '
'is not recommended.',
'scratch_disk': True,
'num_machines': 1}
LATENCY_REGEX = r'([0-9]*\.?[0-9]+)(\w+)'
# Bonnie++ result fields mapping, see man bon_csv2txt for details.
BONNIE_RESULTS_MAPPING = {
'format_version': 0,
'bonnie_version': 1,
'name': 2,
'concurrency': 3,
'seed': 4,
'file_size': 5,
'chunk_size': 6,
'putc': 7,
'putc_cpu': 8,
'put_block': 9,
'put_block_cpu': 10,
'rewrite': 11,
'rewrite_cpu': 12,
'getc': 13,
'getc_cpu': 14,
'get_block': 15,
'get_block_cpu': 16,
'seeks': 17,
'seeks_cpu': 18,
'num_files': 19,
'max_size': 20,
'min_size': 21,
'num_dirs': 22,
'file_chunk_size': 23,
'seq_create': 24,
'seq_create_cpu': 25,
'seq_stat': 26,
'seq_stat_cpu': 27,
'seq_del': 28,
'seq_del_cpu': 29,
'ran_create': 30,
'ran_create_cpu': 31,
'ran_stat': 32,
'ran_stat_cpu': 33,
'ran_del': 34,
'ran_del_cpu': 35,
'putc_latency': 36,
'put_block_latency': 37,
'rewrite_latency': 38,
'getc_latency': 39,
'get_block_latency': 40,
'seeks_latency': 41,
'seq_create_latency': 42,
'seq_stat_latency': 43,
'seq_del_latency': 44,
'ran_create_latency': 45,
'ran_stat_latency': 46,
'ran_del_latency': 47}
def GetInfo():
return BENCHMARK_INFO
def Prepare(benchmark_spec):
"""Install Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ prepare on %s', vm)
vm.Install('bonnieplusplus')
def IsValueValid(value):
"""Validate the value.
An invalid value is either an empty string or a string of multiple '+'.
Args:
value: string. The value in raw result.
Returns:
A boolean indicates if the value is valid or not.
"""
if value == '' or '+' in value:
return False
return True
def IsCpuField(field):
"""Check if the field is cpu percentage.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'cpu'.
"""
return 'cpu' in field
def IsLatencyField(field):
"""Check if the field is latency.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'latency'.
"""
return 'latency' in field
def ParseLatencyResult(result):
"""Parse latency result into value and unit.
Args:
result: string. Latency value in string format, contains value and unit.
eg. 200ms
Returns:
A tuple of value (float) and unit (string).
"""
match = regex_util.ExtractAllMatches(LATENCY_REGEX, result)[0]
return float(match[0]), match[1]
def UpdateMetadata(metadata, key, value):
"""Check if the value is valid, update metadata with the key, value pair.
Args:
metadata: dict. A dictionary of sample metadata.
key: string. Key that will be added into metadata dictionary.
value: Value that of the key.
"""
if IsValueValid(value):
metadata[key] = value
def CreateSamples(results, start_index, end_index, metadata,
field_index_mapping):
"""Create samples with data in results from start_index to end_index.
Args:
results: A list of string representing bonnie++ results.
start_index: integer. The start index in results list of the samples.
end_index: integer. The end index in results list of the samples.
metadata: dict. A dictionary of metadata added into samples.
field_index_mapping: dict. A dictionary maps field index to field names.
Returns:
A list of sample.Sample instances.
"""
samples = []
for field_index in range(start_index, end_index):
field_name = field_index_mapping[field_index]
value = results[field_index]
if not IsValueValid(value):
continue
if IsCpuField(field_name):
unit = '%s'
elif IsLatencyField(field_name):
value, unit = ParseLatencyResult(value)
else:
unit = 'K/sec'
samples.append(sample.Sample(field_name, float(value), unit, metadata))
return samples
def ParseCSVResults(results):
"""Parse csv format bonnie++ results.
Sample Results:
1.96,1.96,perfkit-7b22f510-0,1,1421800799,7423M,,,,72853,15,47358,5,,,
156821,7,537.7,10,100,,,,,49223,58,+++++,+++,54405,53,2898,97,+++++,+++,
59089,60,,512ms,670ms,,44660us,200ms,3747us,1759us,1643us,33518us,192us,
839us
Args:
results: string. Bonnie++ results.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
field_index_mapping = {}
for field, value in BONNIE_RESULTS_MAPPING.iteritems():
field_index_mapping[value] = field
results = results.split(',')
assert len(results) == len(BONNIE_RESULTS_MAPPING)
samples = []
metadata = {}
for field_index in range(BONNIE_RESULTS_MAPPING['format_version'],
BONNIE_RESULTS_MAPPING['chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
for field_index in range(BONNIE_RESULTS_MAPPING['num_files'],
BONNIE_RESULTS_MAPPING['file_chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['putc'],
BONNIE_RESULTS_MAPPING['num_files'],
metadata, field_index_mapping))
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['seq_create'],
BONNIE_RESULTS_MAPPING['ran_del_latency'] + 1,
metadata, field_index_mapping))
return samples
def Run(benchmark_spec):
"""Run Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ running on %s', vm)
bonnie_command = ('/usr/sbin/bonnie++ -q -d %s -s %d -n 100 -f' %
(vm.GetScratchDir(),
2 * vm.total_memory_kb / 1024))
logging.info('Bonnie++ Results:')
out, _ = vm.RemoteCommand(bonnie_command, should_log=True)
return ParseCSVResults(out.strip())
def Cleanup(benchmark_spec):
"""Cleanup Bonnie++ on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
| apache-2.0 | 708,635,107,849,070,700 | 28.59058 | 80 | 0.629362 | false |
luckielordie/conan | conans/test/util/xz_test.py | 2 | 3796 | import os
from unittest import TestCase
import six
import unittest
import tarfile
from conans.test.utils.test_files import temp_folder
from conans.tools import unzip, save
from conans.util.files import load, save_files
from conans.errors import ConanException
from conans.test.utils.tools import TestClient, TestServer
from conans.model.ref import ConanFileReference, PackageReference
class XZTest(TestCase):
def test_error_xz(self):
server = TestServer()
ref = ConanFileReference.loads("Pkg/0.1@user/channel")
export = server.paths.export(ref)
save_files(export, {"conanfile.py": "#",
"conanmanifest.txt": "#",
"conan_export.txz": "#"})
client = TestClient(servers={"default": server},
users={"default": [("lasote", "mypass")]})
error = client.run("install Pkg/0.1@user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: This Conan version is not prepared to handle "
"'conan_export.txz' file format", client.out)
def test_error_sources_xz(self):
server = TestServer()
ref = ConanFileReference.loads("Pkg/0.1@user/channel")
client = TestClient(servers={"default": server},
users={"default": [("lasote", "mypass")]})
export = server.paths.export(ref)
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
exports_sources = "*"
"""
save_files(export, {"conanfile.py": conanfile,
"conanmanifest.txt": "1",
"conan_sources.txz": "#"})
error = client.run("install Pkg/0.1@user/channel --build", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: This Conan version is not prepared to handle "
"'conan_sources.txz' file format", client.out)
def test_error_package_xz(self):
server = TestServer()
ref = ConanFileReference.loads("Pkg/0.1@user/channel")
client = TestClient(servers={"default": server},
users={"default": [("lasote", "mypass")]})
export = server.paths.export(ref)
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
exports_sources = "*"
"""
save_files(export, {"conanfile.py": conanfile,
"conanmanifest.txt": "1"})
pkg_ref = PackageReference(ref, "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
package = server.paths.package(pkg_ref)
save_files(package, {"conaninfo.txt": "#",
"conanmanifest.txt": "1",
"conan_package.txz": "#"})
error = client.run("install Pkg/0.1@user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: This Conan version is not prepared to handle "
"'conan_package.txz' file format", client.out)
@unittest.skipUnless(six.PY3, "only Py3")
def test(self):
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "a_file.txt")
save(file_path, "my content!")
txz = os.path.join(tmp_dir, "sample.tar.xz")
with tarfile.open(txz, "w:xz") as tar:
tar.add(file_path, "a_file.txt")
dest_folder = temp_folder()
unzip(txz, dest_folder)
content = load(os.path.join(dest_folder, "a_file.txt"))
self.assertEqual(content, "my content!")
@unittest.skipUnless(six.PY2, "only Py2")
def test_error_python2(self):
with self.assertRaisesRegexp(ConanException, "XZ format not supported in Python 2"):
dest_folder = temp_folder()
unzip("somefile.tar.xz", dest_folder)
| mit | -1,320,579,582,135,440,600 | 42.136364 | 92 | 0.594046 | false |
tuomas777/linkedevents | events/management/commands/event_export.py | 2 | 2390 | import os
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import activate, get_language
from events.exporter.base import get_exporters
class Command(BaseCommand):
help = "Export event data"
exporter_types = ['events']
def __init__(self):
super().__init__()
self.exporters = get_exporters()
self.exp_list = ', '.join(sorted(self.exporters.keys()))
self.missing_args_message = "Enter the name of the event exporter module. Valid exporters: %s" % self.exp_list
def add_arguments(self, parser):
parser.add_argument('module')
parser.add_argument('--new', action='store_true', dest='new',
help='Export entities added after last export date')
parser.add_argument('--delete', action='store_true', dest='delete',
help='Delete exported items from target system')
for exp in self.exporter_types:
parser.add_argument('--%s' % exp, dest=exp, action='store_true', help='export %s' % exp)
def handle(self, *args, **options):
module = options['module']
if not module in self.exporters:
raise CommandError("Exporter %s not found. Valid exporters: %s" % (module, self.exp_list))
exp_class = self.exporters[module]
if hasattr(settings, 'PROJECT_ROOT'):
root_dir = settings.PROJECT_ROOT
else:
root_dir = settings.BASE_DIR
exporter = exp_class()
# Activate the default language for the duration of the export
# to make sure translated fields are populated correctly.
old_lang = get_language()
activate(settings.LANGUAGES[0][0])
for exp_type in self.exporter_types:
name = "export_%s" % exp_type
method = getattr(exporter, name, None)
if options[exp_type]:
if not method:
raise CommandError(
"Exporter %s does not support exporter %s"
% (name, exp_type))
else:
if not options['new'] and not options['delete']:
continue
if method:
method(is_delete=(True if options['delete'] else False))
activate(old_lang)
| bsd-3-clause | -4,868,746,838,007,228,000 | 37.548387 | 118 | 0.592469 | false |
ternaris/marv-robotics | code/marv/marv/app/__init__.py | 1 | 4497 | # Copyright 2016 - 2021 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import asyncio
import base64
import os
from collections import namedtuple
from logging import getLogger
from pathlib import Path
from aiohttp import web
from pkg_resources import resource_filename
from marv.collection import cached_property
from marv_api.utils import find_obj
from marv_webapi.api import api
from marv_webapi.tooling import Webapi, auth_middleware, safejoin
DOCS = Path(resource_filename('marv.app', 'docs'))
log = getLogger(__name__)
async def site_load_for_web(aioapp):
aioapp['site'].load_for_web()
async def destroy_site(aioapp):
await aioapp['site'].destroy()
class App():
STARTUP_FNS = (
site_load_for_web,
)
SHUTDOWN_FNS = (
destroy_site,
)
CACHE = {'Cache-Control': 'max-age=604800'}
NOCACHE = {'Cache-Control': 'no-cache'}
def __init__(self, site, app_root='', middlewares=None):
self.aioapp = web.Application(middlewares=[*(middlewares or []), auth_middleware])
self.aioapp['app_root'] = app_root.rstrip('/')
self.aioapp['config'] = {
'SECRET_KEY': site.config.marv.sessionkey_file.read_text(),
}
self.aioapp['debug'] = False
self.aioapp['site'] = site
for func in self.STARTUP_FNS:
self.aioapp.on_startup.append(func)
for func in self.SHUTDOWN_FNS:
self.aioapp.on_shutdown.append(func)
self.api = Webapi()
self.api.endpoints.extend(api.endpoints)
self.initialize_routes()
@cached_property
def index_html(self):
path = self.aioapp['site'].config.marv.staticdir / 'index.html'
index_html = path.read_text().replace('MARV_APP_ROOT', self.aioapp['app_root'] or '')
frontenddir = self.aioapp['site'].config.marv.frontenddir
for ext in ('css', 'js'):
try:
data = base64.b64encode((frontenddir / f'custom.{ext}').read_bytes()).decode()
except IOError:
pass
else:
placeholder = f'<!--custom.{ext}-->'
assert placeholder in index_html
script = f'<script src="data:text/javascript;base64,{data}"></script>' \
if ext == 'js' else \
f'<link rel="stylesheet" href="data:text/css;base64,{data}" />'
index_html = index_html.replace(placeholder, script, 1)
return index_html
def initialize_routes(self):
aggressive_caching = bool(os.environ.get('MARV_EXPERIMENTAL_AGGRESSIVE_CACHING'))
customdir = self.aioapp['site'].config.marv.frontenddir / 'custom'
staticdir = self.aioapp['site'].config.marv.staticdir
# decorator for non api endpoint routes
@self.api.endpoint('/custom/{path:.*}', methods=['GET'], allow_anon=True)
async def custom(request): # pylint: disable=unused-variable
path = request.match_info['path']
fullpath = safejoin(customdir, path)
if not fullpath.is_file():
raise web.HTTPNotFound
return web.FileResponse(fullpath, headers=self.NOCACHE)
@self.api.endpoint(r'/docs{_:/?}{path:((?<=/).*)?}', methods=['GET'], allow_anon=True)
async def docs(request): # pylint: disable=unused-variable
path = request.match_info['path'] or 'index.html'
return web.FileResponse(safejoin(DOCS, path), headers={'Cache-Control': 'no-cache'})
@self.api.endpoint('/{path:.*}', methods=['GET'], allow_anon=True)
async def assets(request): # pylint: disable=unused-variable
path = request.match_info['path'] or 'index.html'
if path == 'index.html':
return web.Response(text=self.index_html, headers={
'Content-Type': 'text/html',
**self.NOCACHE,
})
fullpath = safejoin(staticdir, path)
if not fullpath.is_file():
raise web.HTTPNotFound
headers = (self.CACHE if aggressive_caching and fullpath.suffix == '.svg' else
self.NOCACHE)
return web.FileResponse(fullpath, headers=headers)
for ep in self.api.endpoints:
name = ep.name
path = f'{self.aioapp["app_root"]}{ep.url_rule}'
for method in ep.methods:
self.aioapp.add_routes([web.route(method, path, ep, name=name)])
| agpl-3.0 | 2,227,634,308,673,547,800 | 35.560976 | 96 | 0.594619 | false |
rishirajsurti/BuildingMachineLearningSystemsWithPython | ch01/analyze_webstats.py | 23 | 5113 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from utils import DATA_DIR, CHART_DIR
import scipy as sp
import matplotlib.pyplot as plt
sp.random.seed(3) # to reproduce the data later on
data = sp.genfromtxt(os.path.join(DATA_DIR, "web_traffic.tsv"), delimiter="\t")
print(data[:10])
print(data.shape)
# all examples will have three classes in this file
colors = ['g', 'k', 'b', 'm', 'r']
linestyles = ['-', '-.', '--', ':', '-']
x = data[:, 0]
y = data[:, 1]
print("Number of invalid entries:", sp.sum(sp.isnan(y)))
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
# plot input data
def plot_models(x, y, models, fname, mx=None, ymax=None, xmin=None):
plt.figure(num=None, figsize=(8, 6))
plt.clf()
plt.scatter(x, y, s=10)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks(
[w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])
if models:
if mx is None:
mx = sp.linspace(0, x[-1], 1000)
for model, style, color in zip(models, linestyles, colors):
# print "Model:",model
# print "Coeffs:",model.coeffs
plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color)
plt.legend(["d=%i" % m.order for m in models], loc="upper left")
plt.autoscale(tight=True)
plt.ylim(ymin=0)
if ymax:
plt.ylim(ymax=ymax)
if xmin:
plt.xlim(xmin=xmin)
plt.grid(True, linestyle='-', color='0.75')
plt.savefig(fname)
# first look at the data
plot_models(x, y, None, os.path.join(CHART_DIR, "1400_01_01.png"))
# create and plot models
fp1, res1, rank1, sv1, rcond1 = sp.polyfit(x, y, 1, full=True)
print("Model parameters of fp1: %s" % fp1)
print("Error of the model of fp1:", res1)
f1 = sp.poly1d(fp1)
fp2, res2, rank2, sv2, rcond2 = sp.polyfit(x, y, 2, full=True)
print("Model parameters of fp2: %s" % fp2)
print("Error of the model of fp2:", res2)
f2 = sp.poly1d(fp2)
f3 = sp.poly1d(sp.polyfit(x, y, 3))
f10 = sp.poly1d(sp.polyfit(x, y, 10))
f100 = sp.poly1d(sp.polyfit(x, y, 100))
plot_models(x, y, [f1], os.path.join(CHART_DIR, "1400_01_02.png"))
plot_models(x, y, [f1, f2], os.path.join(CHART_DIR, "1400_01_03.png"))
plot_models(
x, y, [f1, f2, f3, f10, f100], os.path.join(CHART_DIR, "1400_01_04.png"))
# fit and plot a model using the knowledge about inflection point
inflection = 3.5 * 7 * 24
xa = x[:inflection]
ya = y[:inflection]
xb = x[inflection:]
yb = y[inflection:]
fa = sp.poly1d(sp.polyfit(xa, ya, 1))
fb = sp.poly1d(sp.polyfit(xb, yb, 1))
plot_models(x, y, [fa, fb], os.path.join(CHART_DIR, "1400_01_05.png"))
def error(f, x, y):
return sp.sum((f(x) - y) ** 2)
print("Errors for the complete data set:")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, x, y)))
print("Errors for only the time after inflection point")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
print("Error inflection=%f" % (error(fa, xa, ya) + error(fb, xb, yb)))
# extrapolating into the future
plot_models(
x, y, [f1, f2, f3, f10, f100],
os.path.join(CHART_DIR, "1400_01_06.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
print("Trained only on data after inflection point")
fb1 = fb
fb2 = sp.poly1d(sp.polyfit(xb, yb, 2))
fb3 = sp.poly1d(sp.polyfit(xb, yb, 3))
fb10 = sp.poly1d(sp.polyfit(xb, yb, 10))
fb100 = sp.poly1d(sp.polyfit(xb, yb, 100))
print("Errors for only the time after inflection point")
for f in [fb1, fb2, fb3, fb10, fb100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
plot_models(
x, y, [fb1, fb2, fb3, fb10, fb100],
os.path.join(CHART_DIR, "1400_01_07.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
# separating training from testing data
frac = 0.3
split_idx = int(frac * len(xb))
shuffled = sp.random.permutation(list(range(len(xb))))
test = sorted(shuffled[:split_idx])
train = sorted(shuffled[split_idx:])
fbt1 = sp.poly1d(sp.polyfit(xb[train], yb[train], 1))
fbt2 = sp.poly1d(sp.polyfit(xb[train], yb[train], 2))
print("fbt2(x)= \n%s"%fbt2)
print("fbt2(x)-100,000= \n%s"%(fbt2-100000))
fbt3 = sp.poly1d(sp.polyfit(xb[train], yb[train], 3))
fbt10 = sp.poly1d(sp.polyfit(xb[train], yb[train], 10))
fbt100 = sp.poly1d(sp.polyfit(xb[train], yb[train], 100))
print("Test errors for only the time after inflection point")
for f in [fbt1, fbt2, fbt3, fbt10, fbt100]:
print("Error d=%i: %f" % (f.order, error(f, xb[test], yb[test])))
plot_models(
x, y, [fbt1, fbt2, fbt3, fbt10, fbt100],
os.path.join(CHART_DIR, "1400_01_08.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
from scipy.optimize import fsolve
print(fbt2)
print(fbt2 - 100000)
reached_max = fsolve(fbt2 - 100000, x0=800) / (7 * 24)
print("100,000 hits/hour expected at week %f" % reached_max[0])
| mit | 2,744,561,174,527,055,000 | 30.757764 | 79 | 0.629963 | false |
sklearn-theano/sklearn-theano | sklearn_theano/externals/google/protobuf/pyext/cpp_message.py | 39 | 2828 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Protocol message implementation hooks for C++ implementation.
Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'tibell@google.com (Johan Tibell)'
from google.protobuf.pyext import _message
class GeneratedProtocolMessageType(_message.MessageMeta):
"""Metaclass for protocol message classes created at runtime from Descriptors.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
The above example will not work for nested types. If you wish to include them,
use reflection.MakeClass() instead of manually instantiating the class in
order to create the appropriate class structure.
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
| bsd-3-clause | 1,104,740,423,133,969,700 | 42.507692 | 80 | 0.775813 | false |
Tiggels/opencog | tests/python/blending_test/new_atom_maker_test.py | 26 | 4699 | __author__ = 'DongMin Kim'
from opencog.atomspace import *
from test_conceptual_blending_base import TestConceptualBlendingBase
# Only run the unit tests if the required dependencies have been installed
# (see: https://github.com/opencog/opencog/issues/337)
try:
__import__("nose.tools")
except ImportError:
import unittest
raise unittest.SkipTest(
"ImportError exception: " +
"Can't find Nose. " +
"make sure the required dependencies are installed."
)
else:
# noinspection PyPackageRequirements
from nose.tools import *
try:
__import__("opencog.scheme_wrapper")
except ImportError:
import unittest
raise unittest.SkipTest(
"ImportError exception: " +
"Can't find Scheme wrapper for Python. " +
"make sure the required dependencies are installed."
)
else:
from opencog.scheme_wrapper import *
try:
__import__("blending.blend")
except ImportError:
import unittest
raise unittest.SkipTest(
"ImportError exception: " +
"Can't find Python Conceptual Blender. " +
"make sure the required dependencies are installed."
)
else:
from blending.blend import ConceptualBlending
try:
from blending.util.py_cog_execute import PyCogExecute
PyCogExecute().load_scheme()
except (ImportError, RuntimeError):
import unittest
raise unittest.SkipTest(
"Can't load Scheme." +
"make sure the you installed atomspace to /usr/local/share/opencog."
)
# noinspection PyArgumentList, PyTypeChecker
class TestNewBlendAtomMaker(TestConceptualBlendingBase):
"""
2.3 New Blend Atom Maker tests.
"""
"""
2.3.1. MakeSimple tests.
"""
__test__ = True
def __default_make_simple(self):
self.a.add_link(
types.InheritanceLink,
[
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, "default-config")
]
)
self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:atoms-chooser"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, "ChooseAll")
]
)
self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:blending-decider"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, "DecideBestSTI")
]
)
self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:new-blend-atom-maker"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, "MakeSimple")
]
)
def test_make_simple(self):
self.__default_make_simple()
make_atom_prefix = "["
make_atom_separator = "*"
make_atom_postfix = "]"
# Test blender makes new node with custom name.
make_atom_prefix_link = self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:make-atom-prefix"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, make_atom_prefix)
]
)
make_atom_separator_link = self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:make-atom-separator"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, make_atom_separator)
]
)
make_atom_postfix_link = self.a.add_link(
types.ExecutionLink,
[
self.a.add_node(types.SchemaNode, "BLEND:make-atom-postfix"),
self.a.add_node(types.ConceptNode, "my-config"),
self.a.add_node(types.ConceptNode, make_atom_postfix)
]
)
result = self.blender.run(
self.a.get_atoms_by_type(types.Node),
self.a.add_node(types.ConceptNode, "my-config")
)
self.a.remove(make_atom_prefix_link)
self.a.remove(make_atom_separator_link)
self.a.remove(make_atom_postfix_link)
# Test blender makes new blend node correctly.
blended_node = result[0]
assert_in(
make_atom_prefix +
"car" + make_atom_separator + "man" +
make_atom_postfix,
str(blended_node.name)
)
| agpl-3.0 | 7,538,982,936,700,073,000 | 30.326667 | 80 | 0.578634 | false |
alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras | data_helpers.py | 1 | 4232 | import numpy as np
import re
import itertools
from collections import Counter
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| mit | -5,858,940,968,319,273,000 | 34.864407 | 91 | 0.639178 | false |
twatteyne/dustlink_academy | SmartMeshSDK/HartMoteConnector/HartMoteConnectorInternal.py | 4 | 3250 |
from SmartMeshSDK.ApiDefinition import HartMoteDefinition
from SmartMeshSDK.SerialConnector import SerialConnector
class HartMoteConnectorInternal(SerialConnector.SerialConnector):
'''
\ingroup ApiConnector
\brief Internal class for HART mote connector, over Serial.
'''
def __init__(self, maxQSize=100):
api_def = HartMoteDefinition.HartMoteDefinition()
SerialConnector.SerialConnector.__init__(self,api_def, maxQSize)
#======================== TX ==============================================
def _buildTxHeader(self,cmdId,isResponse,serializedFields):
txHeader = []
if len(serializedFields)>0 and isinstance(serializedFields[0],list):
flagList = serializedFields.pop(0)
else:
flagList = []
txHeader.append(cmdId) # command ID
txHeader.append(len(serializedFields)) # length
txHeader.append(self._setFlags(isResponse,flagList))# flags
if isResponse:
txHeader.append(0) # RC (always 0)
return txHeader
def _setFlags(self,isResponse,flagList):
self.paramLock.acquire()
flags = 0
if isResponse:
flags |= 0x01
flags |= self.RxPacketId<<1
else:
self._incrementTxPacketId()
flags |= self.TxPacketId<<1
self.paramLock.release()
for flagPosition in flagList:
flags |= 1<<flagPosition
return flags
def _ackIfNeeded(self,cmdId,isResponse):
if isResponse:
# I received a response
return False
else:
# I received a request
self._sendInternal(cmdId,True,[])
return True
#======================== RX ==============================================
def _parseRxHeader(self,frameRx):
cmdId = frameRx[0]
length = frameRx[1]
flags = frameRx[2]
payload = frameRx[3:]
# parse flag byte
isResponse = ((flags&0x01)>>0==1)
packetId = (flags&0x02)>>1
self.ignorePktId = ((flags&0x04)>>2==1)
return cmdId,length,isResponse,packetId,payload
def isValidPacketId(self,cmdId,isResponse,packetId):
result = True
isRepeatId = False
updateRxPacketId = True
if self.RxPacketId==None:
result = True
else:
result = not (
isResponse==False and
self.ignorePktId==False and
packetId==self.RxPacketId
)
return (result, isRepeatId, updateRxPacketId)
#======================== packetId ========================================
def _incrementTxPacketId(self):
if self.TxPacketId==0:
self.TxPacketId=1
else:
self.TxPacketId=0 | bsd-3-clause | 8,547,815,820,284,076,000 | 31.183673 | 79 | 0.475385 | false |
RapidApplicationDevelopment/tensorflow | tensorflow/python/framework/tensor_util_test.py | 6 | 24676 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
class TensorUtilTest(tf.test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=tf.float32)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32),
a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
self.assertProtoEquals("""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.float32, np.float32), (tf.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]], dtype=nptype), a)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [
(tf.int64, np.int64),
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
t = tensor_util.make_tensor_proto([10], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[10, 10, 10, 10],
[10, 10, 10, 10],
[10, 10, 10, 10]], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto([10, 20, 30], shape=[1, 3],
dtype=tf.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=tf.qint32)
self.assertProtoEquals("""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.quint8)
self.assertProtoEquals("""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.qint8)
self.assertProtoEquals("""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.quint16)
self.assertProtoEquals("""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=tf.qint16)
self.assertProtoEquals("""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(tf.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto("f", shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(np.array([[b"a", b"ab"],
[b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(tf.complex64, np.complex64),
(tf.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)],
[(1+1j), (1+1j), (1+1j), (1+1j)]],
dtype=np_dtype), a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
dtype=tf.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=tf.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=tf.qint8)
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape,
verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(array, shape=incorrect_shape,
verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(tensor_util.ShapeEquals(
t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
class ConstantValueTest(tf.test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.constant(np_val)
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = tf.constant(np_val)
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
def testUnknown(self):
tf_val = gen_state_ops._variable(shape=[3, 4, 7], dtype=tf.float32,
name="tf_val", container="", shared_name="")
self.assertIs(None, tf.contrib.util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = tf.shape(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testSize(self):
tf_val = tf.size(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(6, c_val)
def testSizeOfScalar(self):
tf_val = tf.size(tf.constant(0.0))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
def testRank(self):
tf_val = tf.rank(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(tf.constant(0.0, shape=[1, 2, 3]),
optimize=False)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.cast(tf.constant(np_val), tf.float64)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = tf.cast(tf.constant(np_val), tf.float64)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = tf.concat(
0, [np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = tf.concat(
tf.placeholder(tf.int32),
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = tf.concat(
1,
[np_val[0, :, :], tf.placeholder(tf.float32),
np_val[2, :, :]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = tf.stack(inputs)
c_val = tf.contrib.util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = tf.stack([inputs[0], tf.placeholder(tf.float32), inputs[2]])
c_val = tf.contrib.util.constant_value(tf_val)
self.assertIs(None, c_val)
class ConstantValueAsShapeTest(tf.test.TestCase):
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = tf.constant(np_val)
self.assertEqual(tf.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = tf.constant([], dtype=tf.int32)
self.assertEqual(tf.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
def testShape(self):
tf_val = tf.shape(tf.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tf.TensorShape([1, 2, 3]), c_val)
def testPack(self):
tf_val = tf.stack([tf.constant(16), 37, tf.placeholder(tf.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
def testConcat(self):
tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = tf.concat(0,
[[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 1,541,813,174,414,003,000 | 34.866279 | 119 | 0.605892 | false |
MiNGRotten/Trivium | cli.py | 1 | 1923 | # Console interface
from trivium import Trivium
from encryptor import Encryptor
import argparse
class CLI:
def __init__(self):
self._trivium = None
self._input_string = None
self._output_string = None
self._key = None
self._iv = None
self._argument_parser = argparse.ArgumentParser()
self._allbytes = dict([("%02X" % i, i) for i in range(256)])
# Init CLI arguments parser
def init_argparser(self):
self._argument_parser.add_argument('input', type=str, help='input file')
self._argument_parser.add_argument('output', type=str, help='output file')
self._argument_parser.add_argument('key', type=str, help='key')
self._argument_parser.add_argument('iv', type=str, help='iv')
# Parse CLI arguments
def parse_agrs(self):
try:
args = self._argument_parser.parse_args()
self._input_string = args.input
self._output_string = args.output
self._key = str(args.key)
self._iv = str(args.iv)
except:
self._argument_parser.print_help()
exit(1)
def convert_args(self):
self._key = self.hex_to_bits("0F62B5085BAE0154A7FA")[::-1]
self._iv = self.hex_to_bits("0F62B5085BAE0154A7FA")[::-1]
def init_cipher(self):
self._trivium = Trivium(self._key, self._iv)
def run_cipher(self):
encryptor = Encryptor(self._trivium, self._input_string, self._output_string)
encryptor.encrypt()
def hex_to_bytes(self, s):
return [self._allbytes[s[i:i + 2].upper()] for i in range(0, len(s), 2)]
def hex_to_bits(self, s):
return [(b >> i) & 1 for b in self.hex_to_bytes(s)
for i in range(8)]
def bits_to_hex(self, b):
return "".join(["%02X" % sum([b[i + j] << j for j in range(8)])
for i in range(0, len(b), 8)])
| mit | 8,659,041,294,777,556,000 | 33.339286 | 85 | 0.575143 | false |
savoirfairelinux/django | tests/aggregation_regress/tests.py | 10 | 59133 | import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Case, Count, DecimalField, F, IntegerField, Max, Q, StdDev, Sum,
Value, Variance, When,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.items():
self.assertEqual(getattr(obj, attr), value)
def test_annotation_with_value(self):
values = Book.objects.filter(
name='Practical Django Projects',
).annotate(
discount_price=F('price') * 2,
).values(
'discount_price',
).annotate(sum_discount=Sum('discount_price'))
self.assertSequenceEqual(
values,
[{'discount_price': Decimal('59.38'), 'sum_discount': Decimal('59.38')}]
)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
The subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertSequenceEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# All of the objects are getting counted (allow_nulls) and that values
# respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Consecutive calls to annotate accumulate in the query
qs = (
Book.objects
.values('price')
.annotate(oldest=Max('authors__age'))
.order_by('oldest', 'price')
.annotate(Max('publisher__num_awards'))
)
self.assertSequenceEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
)
def test_aggregate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = (
Book.objects
.all()
.annotate(num_authors=Count('authors__id'))
.aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
)
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_conditional_aggreate(self):
# Conditional aggregation of a grouped queryset.
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('pk').aggregate(test=Sum(
Case(When(c__gt=1, then=1), output_field=IntegerField())
))['test'],
3
)
def test_sliced_conditional_aggregate(self):
self.assertEqual(
Author.objects.all()[:5].aggregate(test=Sum(Case(
When(age__lte=35, then=1), output_field=IntegerField()
)))['test'],
3
)
def test_annotated_conditional_aggregate(self):
annotated_qs = Book.objects.annotate(discount_price=F('price') * 0.75)
self.assertAlmostEqual(
annotated_qs.aggregate(test=Avg(Case(
When(pages__lt=400, then='discount_price'),
output_field=DecimalField()
)))['test'],
22.27, places=2
)
def test_distinct_conditional_aggregate(self):
self.assertEqual(
Book.objects.distinct().aggregate(test=Avg(Case(
When(price=Decimal('29.69'), then='pages'),
output_field=IntegerField()
)))['test'],
325
)
def test_conditional_aggregate_on_complex_condition(self):
self.assertEqual(
Book.objects.distinct().aggregate(test=Avg(Case(
When(Q(price__gte=Decimal('29')) & Q(price__lt=Decimal('30')), then='pages'),
output_field=IntegerField()
)))['test'],
325
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
with self.assertRaises(FieldError):
Book.objects.all().aggregate(num_authors=Count('foo'))
with self.assertRaises(FieldError):
Book.objects.all().annotate(num_authors=Count('foo'))
with self.assertRaises(FieldError):
Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertSequenceEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertSequenceEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_awards__gt=2 * F('num_books'))
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertSequenceEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertSequenceEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = (
Clues.objects
.values('EntryID__Entry')
.annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = (
Book.objects
.filter(id__in=[])
.aggregate(
num_authors=Count('authors'),
avg_authors=Avg('authors'),
max_authors=Max('authors'),
max_price=Max('price'),
max_rating=Max('rating'),
)
)
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = (
Publisher.objects
.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count('book__authors'),
avg_authors=Avg('book__authors'),
max_authors=Max('book__authors'),
max_price=Max('book__price'),
max_rating=Max('book__rating'),
).values()
)
self.assertSequenceEqual(
qs,
[{
'max_authors': None,
'name': "Jonno's House of Books",
'num_awards': 0,
'max_price': None,
'num_authors': 0,
'max_rating': None,
'id': self.p5.id,
'avg_authors': None,
}],
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs,
[
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
(
'Python Web Development with Django',
Approximate(30.333, places=2),
'Prentice Hall',
'Jeffrey Forcier',
),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertSequenceEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
)
qs = (
Book.objects
.extra(select={'pub': 'publisher_id', 'foo': 'pages'})
.values('pub')
.annotate(Count('id'))
.order_by('pub')
)
self.assertSequenceEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects
.filter(pages__gt=100)
.annotate(n_authors=Count('authors'))
.filter(n_authors__gt=2)
.order_by('n_authors')
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# There is just one GROUP BY clause (zero commas means at most one clause).
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
with self.assertRaises(ValueError):
Book.objects.all().annotate(Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with dates()
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertSequenceEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects
.annotate(mean_auth_age=Avg('authors__age'))
.extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2])
.order_by('sheets')
.values('sheets')
)
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertSequenceEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertSequenceEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
with self.assertRaises(FieldError):
Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# When a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = (
Book.objects
.annotate(n_authors=Count("authors"))
.filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") |
(Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
)
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count('book')).aggregate(Max('book__count')),
{'book__count__max': 2}
)
def test_annotate_joins(self):
"""
The base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# The query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
@skipUnlessDBFeature('allows_group_by_selected_pks')
def test_aggregate_ummanaged_model_columns(self):
"""
Unmanaged models are sometimes used to represent database views which
may not allow grouping by selected primary key.
"""
def assertQuerysetResults(queryset):
self.assertEqual(
[(b.name, b.num_authors) for b in queryset.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2),
]
)
queryset = Book.objects.select_related('contact').annotate(num_authors=Count('authors'))
# Unmanaged origin model.
with mock.patch.object(Book._meta, 'managed', False):
_, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
for index, field in enumerate(Book._meta.fields):
self.assertIn(field.name, grouping[index][0])
self.assertIn(Author._meta.pk.name, grouping[-1][0])
assertQuerysetResults(queryset)
# Unmanaged related model.
with mock.patch.object(Author._meta, 'managed', False):
_, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
self.assertIn(Book._meta.pk.name, grouping[0][0])
for index, field in enumerate(Author._meta.fields):
self.assertIn(field.name, grouping[index + 1][0])
assertQuerysetResults(queryset)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(
object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book),
)
ItemTag.objects.create(
object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book),
)
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(
object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk),
)
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(
object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book),
)
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
An F() object referring to related column works correctly in group by.
"""
qs = Book.objects.annotate(
account=Count('authors')
).filter(
account=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
| bsd-3-clause | -5,203,596,772,730,056,000 | 38.369507 | 116 | 0.558385 | false |
sabel83/metashell | 3rd/templight/libcxx/utils/libcxx/test/target_info.py | 1 | 10256 | #===----------------------------------------------------------------------===//
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===//
import importlib
import locale
import os
import platform
import re
import subprocess
import sys
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
def platform(self):
return sys.platform.lower().strip()
def add_locale_features(self, features):
self.full_config.lit_config.warning(
"No locales entry for target_system: %s" % self.platform())
def add_cxx_compile_flags(self, flags): pass
def add_cxx_link_flags(self, flags): pass
def configure_env(self, env): pass
def allow_cxxabi_link(self): return True
def add_sanitizer_features(self, sanitizer_type, features): pass
def use_lit_shell_default(self): return False
def test_locale(loc):
assert loc is not None
default_locale = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, loc)
return True
except locale.Error:
return False
finally:
locale.setlocale(locale.LC_ALL, default_locale)
def add_common_locales(features, lit_config, is_windows=False):
# A list of locales needed by the test-suite.
# The list uses the canonical name for the locale used in the test-suite
# TODO: On Linux ISO8859 *may* needs to hyphenated.
locales = [
('en_US.UTF-8', 'English_United States.1252'),
('fr_FR.UTF-8', 'French_France.1252'),
('ru_RU.UTF-8', 'Russian_Russia.1251'),
('zh_CN.UTF-8', 'Chinese_China.936'),
('fr_CA.ISO8859-1', 'French_Canada.1252'),
('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250')
]
for loc_id, windows_loc_name in locales:
loc_name = windows_loc_name if is_windows else loc_id
if test_locale(loc_name):
features.add('locale.{0}'.format(loc_id))
else:
lit_config.warning('The locale {0} is not supported by '
'your platform. Some tests will be '
'unsupported.'.format(loc_name))
class DarwinLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(DarwinLocalTI, self).__init__(full_config)
def is_host_macosx(self):
name = subprocess.check_output(['sw_vers', '-productName']).strip()
return name == "Mac OS X"
def get_macosx_version(self):
assert self.is_host_macosx()
version = subprocess.check_output(
['sw_vers', '-productVersion']).strip()
version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version)
return version
def get_sdk_version(self, name):
assert self.is_host_macosx()
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
try:
out = subprocess.check_output(cmd).strip()
except OSError:
pass
if not out:
self.full_config.lit_config.fatal(
"cannot infer sdk version with: %r" % cmd)
return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out)
def get_platform(self):
platform = self.full_config.get_lit_conf('platform')
if platform:
platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform)
name, version = tuple(platform.split('-', 1))
else:
name = 'macosx'
version = None
if version:
return (False, name, version)
# Infer the version, either from the SDK or the system itself. For
# macosx, ignore the SDK version; what matters is what's at
# /usr/lib/libc++.dylib.
if name == 'macosx':
version = self.get_macosx_version()
else:
version = self.get_sdk_version(name)
return (True, name, version)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_compile_flags(self, flags):
if self.full_config.use_deployment:
_, name, _ = self.full_config.config.deployment
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
else:
cmd = ['xcrun', '--show-sdk-path']
try:
out = subprocess.check_output(cmd).strip()
res = 0
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
flags += ["-isysroot", sdk_path]
def add_cxx_link_flags(self, flags):
flags += ['-lSystem']
def configure_env(self, env):
library_paths = []
# Configure the library path for libc++
if self.full_config.use_system_cxx_lib:
if (os.path.isdir(str(self.full_config.use_system_cxx_lib))):
library_paths += [self.full_config.use_system_cxx_lib]
pass
elif self.full_config.cxx_runtime_root:
library_paths += [self.full_config.cxx_runtime_root]
# Configure the abi library path
if self.full_config.abi_library_root:
library_paths += [self.full_config.abi_library_root]
if library_paths:
env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths)
def allow_cxxabi_link(self):
# FIXME: PR27405
# libc++ *should* export all of the symbols found in libc++abi on OS X.
# For this reason LibcxxConfiguration will not link libc++abi in OS X.
# However __cxa_throw_bad_new_array_length doesn't get exported into
# libc++ yet so we still need to explicitly link libc++abi when testing
# libc++abi
# See PR22654.
if(self.full_config.get_lit_conf('name', '') == 'libc++abi'):
return True
# Don't link libc++abi explicitly on OS X because the symbols
# should be available in libc++ directly.
return False
class FreeBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(FreeBSDLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
def platform(self):
return 'linux'
def platform_name(self):
name, _, _ = platform.linux_distribution()
# Some distros have spaces, e.g. 'SUSE Linux Enterprise Server'
# lit features can't have spaces
name = name.lower().strip().replace(' ', '-')
return name # Permitted to be None
def platform_ver(self):
_, ver, _ = platform.linux_distribution()
ver = ver.lower().strip().replace(' ', '-')
return ver # Permitted to be None.
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
# Some linux distributions have different locale data than others.
# Insert the distributions name and name-version into the available
# features to allow tests to XFAIL on them.
name = self.platform_name()
ver = self.platform_ver()
if name:
features.add(name)
if name and ver:
features.add('%s-%s' % (name, ver))
def add_cxx_compile_flags(self, flags):
flags += ['-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
'-D__STDC_CONSTANT_MACROS']
def add_cxx_link_flags(self, flags):
enable_threads = ('libcpp-has-no-threads' not in
self.full_config.config.available_features)
llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False)
shared_libcxx = self.full_config.get_lit_bool('enable_shared', True)
flags += ['-lm']
if not llvm_unwinder:
flags += ['-lgcc_s', '-lgcc']
if enable_threads:
flags += ['-lpthread']
if not shared_libcxx:
flags += ['-lrt']
flags += ['-lc']
if llvm_unwinder:
flags += ['-lunwind', '-ldl']
else:
flags += ['-lgcc_s']
flags += ['-lgcc']
use_libatomic = self.full_config.get_lit_bool('use_libatomic', False)
if use_libatomic:
flags += ['-latomic']
san = self.full_config.get_lit_conf('use_sanitizer', '').strip()
if san:
# The libraries and their order are taken from the
# linkSanitizerRuntimeDeps function in
# clang/lib/Driver/Tools.cpp
flags += ['-lpthread', '-lrt', '-lm', '-ldl']
class WindowsLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(WindowsLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config,
is_windows=True)
def use_lit_shell_default(self):
# Default to the internal shell on Windows, as bash on Windows is
# usually very slow.
return True
def make_target_info(full_config):
default = "libcxx.test.target_info.LocalTI"
info_str = full_config.get_lit_conf('target_info', default)
if info_str != default:
mod_path, _, info = info_str.rpartition('.')
mod = importlib.import_module(mod_path)
target_info = getattr(mod, info)(full_config)
full_config.lit_config.note("inferred target_info as: %r" % info_str)
return target_info
target_system = platform.system()
if target_system == 'Darwin': return DarwinLocalTI(full_config)
if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
return DefaultTargetInfo(full_config)
| gpl-3.0 | 5,279,652,156,364,281,000 | 36.025271 | 79 | 0.580246 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/utils/isodate/isostrf.py | 1 | 9278 | ##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
"""
This module provides an alternative strftime method.
The strftime method in this module allows only a subset of Python's strftime
format codes, plus a few additional. It supports the full range of date values
possible with standard Python date/time objects. Furthermore there are several
pr-defined format strings in this module to make ease producing of ISO 8601
conforming strings.
"""
from __future__ import absolute_import
import re
from datetime import date, timedelta
from .duration import Duration
from .isotzinfo import tz_isoformat
# Date specific format strings
DATE_BAS_COMPLETE = '%Y%m%d'
DATE_EXT_COMPLETE = '%Y-%m-%d'
DATE_BAS_WEEK_COMPLETE = '%YW%W%w'
DATE_EXT_WEEK_COMPLETE = '%Y-W%W-%w'
DATE_BAS_ORD_COMPLETE = '%Y%j'
DATE_EXT_ORD_COMPLETE = '%Y-%j'
DATE_BAS_WEEK = '%YW%W'
DATE_EXT_WEEK = '%Y-W%W'
DATE_BAS_MONTH = '%Y%m'
DATE_EXT_MONTH = '%Y-%m'
DATE_YEAR = '%Y'
DATE_CENTURY = '%C'
# Time specific format strings
TIME_BAS_COMPLETE = '%H%M%S'
TIME_EXT_COMPLETE = '%H:%M:%S'
TIME_BAS_MINUTE = '%H%M'
TIME_EXT_MINUTE = '%H:%M'
TIME_HOUR = '%H'
# Time zone formats
TZ_BAS = '%z'
TZ_EXT = '%Z'
TZ_HOUR = '%h'
# DateTime formats
DT_EXT_COMPLETE = DATE_EXT_COMPLETE + 'T' + TIME_EXT_COMPLETE + TZ_EXT
DT_BAS_COMPLETE = DATE_BAS_COMPLETE + 'T' + TIME_BAS_COMPLETE + TZ_BAS
DT_EXT_ORD_COMPLETE = DATE_EXT_ORD_COMPLETE + 'T' + TIME_EXT_COMPLETE + TZ_EXT
DT_BAS_ORD_COMPLETE = DATE_BAS_ORD_COMPLETE + 'T' + TIME_BAS_COMPLETE + TZ_BAS
DT_EXT_WEEK_COMPLETE = (DATE_EXT_WEEK_COMPLETE + 'T' +
TIME_EXT_COMPLETE + TZ_EXT)
DT_BAS_WEEK_COMPLETE = (DATE_BAS_WEEK_COMPLETE + 'T' +
TIME_BAS_COMPLETE + TZ_BAS)
# Duration formts
D_DEFAULT = 'P%P'
D_WEEK = 'P%p'
D_ALT_EXT = 'P' + DATE_EXT_COMPLETE + 'T' + TIME_EXT_COMPLETE
D_ALT_BAS = 'P' + DATE_BAS_COMPLETE + 'T' + TIME_BAS_COMPLETE
D_ALT_EXT_ORD = 'P' + DATE_EXT_ORD_COMPLETE + 'T' + TIME_EXT_COMPLETE
D_ALT_BAS_ORD = 'P' + DATE_BAS_ORD_COMPLETE + 'T' + TIME_BAS_COMPLETE
STRF_DT_MAP = {'%d': lambda tdt, yds: '%02d' % tdt.day,
'%f': lambda tdt, yds: '%06d' % tdt.microsecond,
'%H': lambda tdt, yds: '%02d' % tdt.hour,
'%j': lambda tdt, yds: '%03d' % (tdt.toordinal() -
date(tdt.year,
1, 1).toordinal() +
1),
'%m': lambda tdt, yds: '%02d' % tdt.month,
'%M': lambda tdt, yds: '%02d' % tdt.minute,
'%S': lambda tdt, yds: '%02d' % tdt.second,
'%w': lambda tdt, yds: '%1d' % tdt.isoweekday(),
'%W': lambda tdt, yds: '%02d' % tdt.isocalendar()[1],
'%Y': lambda tdt, yds: (((yds != 4) and '+') or '') +
(('%%0%dd' % yds) % tdt.year),
'%C': lambda tdt, yds: (((yds != 4) and '+') or '') +
(('%%0%dd' % (yds - 2)) %
(tdt.year / 100)),
'%h': lambda tdt, yds: tz_isoformat(tdt, '%h'),
'%Z': lambda tdt, yds: tz_isoformat(tdt, '%Z'),
'%z': lambda tdt, yds: tz_isoformat(tdt, '%z'),
'%%': lambda tdt, yds: '%'}
STRF_D_MAP = {'%d': lambda tdt, yds: '%02d' % tdt.days,
'%f': lambda tdt, yds: '%06d' % tdt.microseconds,
'%H': lambda tdt, yds: '%02d' % (tdt.seconds / 60 / 60),
'%m': lambda tdt, yds: '%02d' % tdt.months,
'%M': lambda tdt, yds: '%02d' % ((tdt.seconds / 60) % 60),
'%S': lambda tdt, yds: '%02d' % (tdt.seconds % 60),
'%W': lambda tdt, yds: '%02d' % (abs(tdt.days / 7)),
'%Y': lambda tdt, yds: (((yds != 4) and '+') or '') +
(('%%0%dd' % yds) % tdt.years),
'%C': lambda tdt, yds: (((yds != 4) and '+') or '') +
(('%%0%dd' % (yds - 2)) %
(tdt.years / 100)),
'%%': lambda tdt, yds: '%'}
def _strfduration(tdt, format, yeardigits=4):
'''
this is the work method for timedelta and Duration instances.
see strftime for more details.
'''
def repl(match):
'''
lookup format command and return corresponding replacement.
'''
if match.group(0) in STRF_D_MAP:
return STRF_D_MAP[match.group(0)](tdt, yeardigits)
elif match.group(0) == '%P':
ret = []
if isinstance(tdt, Duration):
if tdt.years:
ret.append('%sY' % abs(tdt.years))
if tdt.months:
ret.append('%sM' % abs(tdt.months))
usecs = abs((tdt.days * 24 * 60 * 60 + tdt.seconds) * 1000000 +
tdt.microseconds)
seconds, usecs = divmod(usecs, 1000000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
if days:
ret.append('%sD' % days)
if hours or minutes or seconds or usecs:
ret.append('T')
if hours:
ret.append('%sH' % hours)
if minutes:
ret.append('%sM' % minutes)
if seconds or usecs:
if usecs:
ret.append(("%d.%06d" % (seconds, usecs)).rstrip('0'))
else:
ret.append("%d" % seconds)
ret.append('S')
# at least one component has to be there.
return ret and ''.join(ret) or '0D'
elif match.group(0) == '%p':
return str(abs(tdt.days // 7)) + 'W'
return match.group(0)
return re.sub('%d|%f|%H|%m|%M|%S|%W|%Y|%C|%%|%P|%p', repl,
format)
def _strfdt(tdt, format, yeardigits=4):
'''
this is the work method for time and date instances.
see strftime for more details.
'''
def repl(match):
'''
lookup format command and return corresponding replacement.
'''
if match.group(0) in STRF_DT_MAP:
return STRF_DT_MAP[match.group(0)](tdt, yeardigits)
return match.group(0)
return re.sub('%d|%f|%H|%j|%m|%M|%S|%w|%W|%Y|%C|%z|%Z|%h|%%', repl,
format)
def strftime(tdt, format, yeardigits=4):
'''Directive Meaning Notes
%d Day of the month as a decimal number [01,31].
%f Microsecond as a decimal number [0,999999], zero-padded
on the left (1)
%H Hour (24-hour clock) as a decimal number [00,23].
%j Day of the year as a decimal number [001,366].
%m Month as a decimal number [01,12].
%M Minute as a decimal number [00,59].
%S Second as a decimal number [00,61]. (3)
%w Weekday as a decimal number [0(Monday),6].
%W Week number of the year (Monday as the first day of the week)
as a decimal number [00,53]. All days in a new year preceding the
first Monday are considered to be in week 0. (4)
%Y Year with century as a decimal number. [0000,9999]
%C Century as a decimal number. [00,99]
%z UTC offset in the form +HHMM or -HHMM (empty string if the
object is naive). (5)
%Z Time zone name (empty string if the object is naive).
%P ISO8601 duration format.
%p ISO8601 duration format in weeks.
%% A literal '%' character.
'''
if isinstance(tdt, (timedelta, Duration)):
return _strfduration(tdt, format, yeardigits)
return _strfdt(tdt, format, yeardigits)
| gpl-2.0 | 9,058,615,639,992,064,000 | 41.953704 | 78 | 0.546023 | false |
sl2017/campos | campos_sms/models/sms_confirmed_number.py | 1 | 4752 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, exceptions, _
import string, random
SMS_CONFIRM_MAX_TRIES = 3
SMS_CONFIRM_CODE_LENGTH = 6
class SmsConfirmedNumber(models.Model):
_name = 'sms.confirmed_number'
user_id = fields.Many2one('res.users', string='User', readonly=True)
number = fields.Char('Number', size=20)
state = fields.Selection([('draft', 'Draft'), ('waiting', 'Waiting'), ('confirmed', 'Confirmed')], default='draft', readonly=True)
confirm_code = fields.Char('Confirm code', size=SMS_CONFIRM_CODE_LENGTH, groups='base.erp_manager')
confirm_tries = fields.Integer('Confirm Tries left', readonly=True)
_sql_constraints = [('sms_confirm_number_unique', 'UNIQUE (user_id, number)', 'A confirmation for this user/number has already been created')]
@api.model
def default_get(self, fields):
result = super(SmsConfirmedNumber, self).default_get(fields)
result['user_id'] = self.env.uid
result['number'] = self.env.user.partner_id.mobile_clean
return result
@api.multi
def name_get(self):
result = []
for r in self:
result.append((r.id, '%s (%s)' % (r.number, _('Confirmed') if r.state == 'confirmed' else _('Not confirmed'))))
return result
def _generate_code(self):
'''
Return SMS_CONFIRM_CODE_LENGTH digit random uppercase string
'''
return ''.join(random.choice(string.ascii_uppercase) for _ in range(SMS_CONFIRM_CODE_LENGTH))
@api.model
def _clean(self, number):
# Clean number
allowed_chars = '+0123456789'
delete_table = string.maketrans(allowed_chars, ' ' * len(allowed_chars))
# Delete illegal chars
mobile_clean = str(number).translate(None, delete_table)
# Make sure number starts with country code
if len(mobile_clean) > 0 and mobile_clean[0] != '+':
# @todo: Use country prefix from partner's country setting
mobile_clean = '+45' + mobile_clean
# Number can only have '+' as the first char - and length must be less than 18 chars
# (two numbers will be at least 2 * 8 + 3 = 19 chars (3 for '+45')
if '+' in mobile_clean[1:] or len(mobile_clean) > 18:
mobile_clean = False
return mobile_clean
@api.model
def find_or_create(self, number):
'''
Find a number - or create an new confirm record if it does not exist
'''
number = self._clean(number)
nr_id = self.search([('number', '=', number), ('user_id', '=', self.env.uid)])
if nr_id:
return nr_id[0]
return self.create({'user_id': self.env.uid,
'number': number})
def send_confirm_code(self, data):
if self.state == 'confirmed':
raise exceptions.Warning(_('You have already confirmed the number %s') % self.number)
self.number = self._clean(self.number)
# Change state and reset tries counter
self.sudo().state = 'waiting'
code = self._generate_code()
self.sudo().confirm_code = code
self.sudo().confirm_tries = SMS_CONFIRM_MAX_TRIES
# Send the code by SMS
data.mobile_to = self.number
old_text = data.text
data.text = _('Enter this code to confirm your mobile number: %s') % code
log_text = _('Enter this code to confirm your mobile number: %s') % ('*' * SMS_CONFIRM_CODE_LENGTH)
client = self.env['sms.smsclient']
client.with_context(sms_send_direct=True, sms_log_overwrite=log_text)._send_message(data)
data.text = old_text
def check_code(self, code):
if not code:
raise exceptions.Warning(_('Please enter the verification code that was send to your phone by SMS.'))
# Check if user has tried too many times...
self.confirm_tries = self.confirm_tries -1
if self.confirm_tries < 0:
raise exceptions.Warning(_('The confirmation code has expired, because you have tried more than %d times. You may request a new code and try again.') % SMS_CONFIRM_MAX_TRIES)
# Commit the count before we continue!
self.env.cr.commit()
# Cannot read directly with "code = self.sudo().confirm_code" !
confirm_code = self.sudo().read(fields=['confirm_code'])[0]['confirm_code']
if str(code).upper() != str(confirm_code):
# @TODO: "confirm_tries" is not updated in the user interface, when we throw an exception...
raise exceptions.Warning(_('The code was not entered correct. Please note that the code contains only letters (no digits).'))
self.state = 'confirmed'
self.env.user.sms_last_sender = self
| agpl-3.0 | -5,940,723,224,474,992,000 | 43 | 186 | 0.615951 | false |
ken-muturi/pombola | pombola/interests_register/admin.py | 4 | 1060 | from django.contrib import admin
from . import models
from pombola.slug_helpers.admin import StricterSlugFieldMixin
class CategoryAdmin(StricterSlugFieldMixin, admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'sort_order']
search_fields = ['name']
class ReleaseAdmin(StricterSlugFieldMixin, admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'date']
search_fields = ['name']
date_hierarchy = 'date'
class LineItemInlineAdmin(admin.TabularInline):
model = models.EntryLineItem
# extra = 2
fields = [ 'key', 'value' ]
class EntryAdmin(admin.ModelAdmin):
inlines = [LineItemInlineAdmin]
list_display = ['id', 'person', 'category', 'release', 'sort_order']
list_filter = [ 'release', 'category' ]
search_fields = ['person__legal_name']
# Add these to the admin
admin.site.register( models.Category, CategoryAdmin)
admin.site.register( models.Release, ReleaseAdmin)
admin.site.register( models.Entry, EntryAdmin)
| agpl-3.0 | -7,271,131,099,272,014,000 | 27.648649 | 72 | 0.69717 | false |
joehakimrahme/thawra | thawra/action.py | 1 | 1614 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
class NotEnoughMana(Exception):
pass
def attack(actor, target, cost, weather):
for enemy in target:
damage = (actor.stats['ATK'] - enemy.stats['DEF']) * 3
enemy.hp -= damage
def magic(actor, target, cost, weather):
if actor.mp < cost:
raise NotEnoughMana
else:
actor.mp -= cost
for enemy in target:
damage = (actor.stats['MAG'] - enemy.stats['MDE']) * 3
enemy.hp -= damage
class Action(object):
effects = {
'ATK': attack,
'MAG': magic,
}
def __init__(self, actor, effect, target, cost):
self.actor = actor
# TODO(rahmu): Needs further validation
self._effect = effect
self.target = target
self.cost = cost
@property
def effect(self):
return functools.partial(self.effects[self._effect],
self.actor,
self.target,
self.cost)
def __call__(self, weather):
self.effect(weather)
| apache-2.0 | -2,315,298,181,632,022,500 | 25.459016 | 75 | 0.607807 | false |
fibbo/DIRAC | StorageManagementSystem/scripts/dirac-stager-monitor-file.py | 11 | 5430 | #! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-stager-monitor-file
# Author : Daniela Remenska
########################################################################
"""
-gives monitoring information regarding a staging file uniquely identified with (LFN,SE):
- - status
- - last update
- - jobs requesting this file to be staged
- - SRM requestID
- - pin expiry time
- - pin length
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s LFN SE ...' % Script.scriptName,
'Arguments:',
' LFN: LFN of the staging file \n',
' SE: Storage Element for the staging file \n'
] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
from DIRAC import exit as DIRACExit
lfn = args[0]
se = args[1]
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
client = StorageManagerClient()
res = client.getCacheReplicas( {'LFN':lfn,'SE':se} )
if not res['OK']:
print res['Message']
cacheReplicaInfo = res['Value']
if cacheReplicaInfo:
replicaID = cacheReplicaInfo.keys()[0]
outStr = "\n--------------------"
outStr = "%s\n%s: %s" % ( outStr, 'LFN'.ljust( 8 ), cacheReplicaInfo[replicaID]['LFN'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'SE'.ljust( 8 ), cacheReplicaInfo[replicaID]['SE'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'PFN'.ljust( 8 ), cacheReplicaInfo[replicaID]['PFN'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'Status'.ljust( 8 ), cacheReplicaInfo[replicaID]['Status'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'LastUpdate'.ljust( 8 ), str(cacheReplicaInfo[replicaID]['LastUpdate']).ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'Reason'.ljust( 8 ), str( cacheReplicaInfo[replicaID]['Reason']).ljust( 100 ) )
resTasks = client.getTasks({'ReplicaID':replicaID})
if resTasks['OK']:
#print resTasks['Message']
outStr = '%s\nJob IDs requesting this file to be staged:'.ljust( 8) % outStr
tasks = resTasks['Value']
for tid in tasks.keys():
outStr = '%s %s ' % (outStr, tasks[tid]['SourceTaskID'])
resStageRequests = client.getStageRequests({'ReplicaID':replicaID})
if not resStageRequests['OK']:
print resStageRequests['Message']
if resStageRequests['Records']:
stageRequests = resStageRequests['Value']
outStr = "%s\n------SRM staging request info--------------" % outStr
for srid in stageRequests.keys():
outStr = "%s\n%s: %s" % ( outStr, 'SRM RequestID'.ljust( 8 ), stageRequests[srid]['RequestID'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'SRM StageStatus'.ljust( 8 ), stageRequests[srid]['StageStatus'].ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'SRM StageRequestSubmitTime'.ljust( 8 ), str(stageRequests[srid]['StageRequestSubmitTime']).ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'SRM StageRequestCompletedTime'.ljust( 8 ), str(stageRequests[srid]['StageRequestCompletedTime']).ljust( 100 ) )
outStr = "%s\n%s: %s" % ( outStr, 'SRM PinExpiryTime'.ljust( 8 ), str(stageRequests[srid]['PinExpiryTime']).ljust( 100 ) )
outStr = "%s\n%s: %s sec" % ( outStr, 'SRM PinLength'.ljust( 8 ), str(stageRequests[srid]['PinLength']).ljust( 100 ) )
else:
outStr = '%s\nThere are no staging requests submitted to the site yet.'.ljust( 8) % outStr
else:
outStr = "\nThere is no such file requested for staging. Check for typo's!"
#Script.showHelp()
print outStr
DIRACExit( 0 )
''' Example:
dirac-stager-monitor-file.py /lhcb/LHCb/Collision12/FULL.DST/00020846/0005/00020846_00056603_1.full.dst GRIDKA-RDST
--------------------
LFN : /lhcb/LHCb/Collision12/FULL.DST/00020846/0005/00020846_00056603_1.full.dst
SE : GRIDKA-RDST
PFN : srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/LHCb/Collision12/FULL.DST/00020846/0005/00020846_00056603_1.full.dst
Status : StageSubmitted
LastUpdate: 2013-06-11 18:13:40
Reason : None
Jobs requesting this file to be staged: 48518896
------SRM staging request info--------------
SRM RequestID: -1768636375
SRM StageStatus: StageSubmitted
SRM StageRequestSubmitTime: 2013-06-11 18:13:38
SRM StageRequestCompletedTime: None
SRM PinExpiryTime: None
SRM PinLength: 43200
'''
| gpl-3.0 | -866,084,695,327,064,400 | 52.235294 | 152 | 0.51547 | false |
marco-c/pluotsorbet | tests/runtests.py | 4 | 4918 | #!/usr/bin/env python
import os
import select
import socket
import subprocess
import sys
import time
import platform
(system, node, release, version, machine, processor) = platform.uname()
httpsServer = ['node', 'httpsServer.js']
sslEchoServer = ['node', 'sslEchoServer.js']
if system == "Darwin":
httpsServer = './httpsServer.py'
sslEchoServer = './sslEchoServer.py'
# The test automation scripts to run via casperjs/slimerjs.
automation_scripts = [
['tests', 'automation.js'],
['tests', 'fs', 'automation.js'],
]
# The exit code to return. We set this to 1 if an automation script outputs
# "FAIL" (case-sensitive) at any point in the test run. Ideally, we'd determine
# the success/failure based on the exit code of the test run, but casperjs
# with slimerjs always returns 0, so instead we must look for the string "FAIL",
# which only occurs on test failure.
#
# See https://github.com/laurentj/slimerjs/issues/50 for more information.
#
exit_code = 0
# Open the server processes that handle HTTP/S requests and socket connections.
# We pipe their standard output/error back to the parent process to print it
# to the parent's standard output interspersed with the output produced by
# the automation scripts (which we have to pipe, instead of letting them print
# their output directly, so we can look for "FAIL", as described above).
#
# We don't actually pipe anything into standard input, but we have to specify
# that a new pipe should be created for it, otherwise for some reason it causes
# print/sys.stdout.write in the parent process to throw the exception "IOError:
# [Errno 35] Resource temporarily unavailable" on large amounts of output.
#
server_processes = [
subprocess.Popen('tests/httpServer.py', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1),
subprocess.Popen('tests/echoServer.py', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1),
# The SSL-based servers need to have their current working directory set
# to the tests/ subdirectory, since they load cert/key files relative to it.
subprocess.Popen(httpsServer, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, cwd='tests'),
subprocess.Popen(sslEchoServer, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, cwd='tests'),
]
# The output streams for the servers.
server_output_streams = [p.stdout for p in server_processes]
def wait_server(port):
end = time.time() + 30 # Timeout of 30 seconds
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('', port))
s.close()
return
except:
if end < time.time():
raise Exception("Can't connect to " + str(port))
else:
time.sleep(1)
def run_test(script_path):
global exit_code
args = ['casperjs', '--engine=slimerjs']
if 'VERBOSE' in os.environ and os.environ['VERBOSE'] != '0':
args.append('--log-level=debug')
args.extend(['test', script_path])
script_process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1)
output_streams = list(server_output_streams)
output_streams.append(script_process.stdout)
while True:
readable_streams, _, _ = select.select(output_streams, [], [])
for stream in readable_streams:
line = stream.readline()
if stream is script_process.stdout and "FAIL" in line:
exit_code = 1
sys.stdout.write(line)
if script_process.poll() is not None:
# Print any famous last words the process wrote to its output stream
# between the last time we polled it and its termination.
sys.stdout.write(script_process.stdout.read())
break
# Wait for the servers to become ready for connections.
wait_server(8000)
wait_server(50003)
wait_server(4443)
wait_server(54443)
# Run each test automation script in turn.
for scriptParts in automation_scripts:
run_test(os.path.join(os.getcwd(), *scriptParts))
# Terminate all the server processes.
for process in server_processes:
process.terminate()
# Print any famous last words the processes wrote to their output streams
# between the last time we polled them and their termination.
for stream in server_output_streams:
sys.stdout.write(stream.read())
p = subprocess.Popen(['js', 'jsshell.js', 'Basic'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
shell_success = False
for line in iter(p.stdout.readline, b''):
if "The end" in line:
shell_success = True
sys.stdout.write(line)
if exit_code == 0 and not shell_success:
exit_code = 1
sys.exit(exit_code)
| gpl-2.0 | 2,934,052,381,832,252,400 | 34.381295 | 116 | 0.687271 | false |
miltonruelas/cursotecnico | branch/account_move_template/account_document_template.py | 3 | 4267 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
import re
class account_document_template(orm.Model):
_computed_lines = {}
_current_template_id = 0
_cr = None
_uid = None
_name = 'account.document.template'
_columns = {
'name': fields.char('Name', size=64, required=True),
}
def _input_lines(self, cr, uid, template):
count = 0
for line in template.template_line_ids:
if line.type == 'input':
count += 1
return count
def _get_template_line(self, cr, uid, template_id, line_number):
for line in self.browse(cr, uid, template_id).template_line_ids:
if line.sequence == line_number:
return line
return False
def _generate_empty_lines(self, cr, uid, template_id):
lines = {}
for template_line in self.browse(cr, uid, template_id).template_line_ids:
lines[template_line.sequence] = None
return lines
def lines(self, line_number):
if self._computed_lines[line_number] is not None:
return self._computed_lines[line_number]
line = self._get_template_line(self._cr, self._uid, self._current_template_id, line_number)
if re.match('L\( *'+str(line_number)+' *\)',line.python_code):
raise orm.except_orm(_('Error'),
_('Line %s can\'t refer to itself') % str(line_number))
try:
self._computed_lines[line_number] = eval(line.python_code.replace('L', 'self.lines'))
except KeyError:
raise orm.except_orm(_('Error'),
_('Code "%s" refers to non existing line') % line.python_code)
return self._computed_lines[line_number]
def compute_lines(self, cr, uid, template_id, input_lines):
# input_lines: dictionary in the form {line_number: line_amount}
# returns all the lines (included input lines) in the form {line_number: line_amount}
template = self.browse(cr, uid, template_id)
if len(input_lines) != self._input_lines(cr, uid, template):
raise orm.except_orm(_('Error'),
_('Inconsistency between input lines and filled lines for template %s') % template.name)
self._current_template_id = template.id
self._cr = cr
self._uid = uid
self._computed_lines = self._generate_empty_lines(cr, uid, template_id)
self._computed_lines.update(input_lines)
for line_number in self._computed_lines:
self.lines(line_number)
return self._computed_lines
def check_zero_lines(self, cr, uid, wizard):
if not wizard.line_ids:
return True
for template_line in wizard.line_ids:
if template_line.amount:
return True
return False
class account_document_template_line(orm.Model):
_name = 'account.document.template.line'
_columns = {
'name': fields.char('Name', size=64, required=True),
'sequence': fields.integer('Sequence', required=True),
'type': fields.selection([('computed', 'Computed'),('input', 'User input')], 'Type', required=True),
'python_code':fields.text('Python Code'),
}
| agpl-3.0 | -9,197,199,709,438,660,000 | 40.028846 | 108 | 0.598781 | false |
araichev/make_gtfs | make_gtfs/cli.py | 1 | 1577 | """
The command-line-interface module.
"""
import click
import gtfs_kit as gk
from . import protofeed as pf
from . import constants as cs
from . import main as m
@click.command(short_help="Create a GTFS feed from simpler files")
@click.argument("source_path", type=click.Path())
@click.argument("target_path", type=click.Path())
@click.option(
"-b",
"--buffer",
default=cs.BUFFER,
type=float,
show_default=True,
help="Meters to buffer trip paths to find stops",
)
@click.option(
"-n",
"--ndigits",
default=6,
type=int,
show_default=True,
help="Number of decimal places to round float values in the output " "GTFS feed",
)
def make_gtfs(source_path, target_path, buffer, ndigits):
"""
Create a GTFS feed from the files in the directory SOURCE_PATH.
See the project README for a description of the required source
files.
Save the feed to the file or directory TARGET_PATH.
If the target path ends in '.zip', then write the feed as a zip
archive.
Otherwise assume the path is a directory, and write the feed as a
collection of CSV files to that directory, creating the directory
if it does not exist.
If a stops file is present, then search within ``buffer`` meters
on the traffic side of trip paths for stops.
Round all decimals to ndigits decimal places.
All distances in the resulting GTFS feed will be in kilometers.
"""
pfeed = pf.read_protofeed(source_path)
feed = m.build_feed(pfeed, buffer=buffer)
gk.write_gtfs(feed, target_path, ndigits=ndigits)
| mit | 3,882,843,729,302,898,700 | 30.54 | 85 | 0.693722 | false |
thaim/ansible | lib/ansible/module_utils/database.py | 13 | 5936 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
database=1,
schema=2,
table=3,
column=4,
role=1,
tablespace=1,
sequence=3,
publication=1,
)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError:
raise UnclosedQuoteError
accumulate = accumulate + quote
try:
next_char = identifier[quote + 1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote + 2:]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote + 1] == '.':
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
except ValueError:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char * 2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot + 1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char * 2)
first_identifier = ''.join((quote_char, first_identifier, quote_char))
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
return '.'.join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)
| mit | 9,138,396,885,790,706,000 | 40.802817 | 134 | 0.658019 | false |
kkdang/synapsePythonClient | synapseclient/cache.py | 1 | 11025 | # Note: Even though this has Sphinx format, this is not meant to be part of the public docs
"""
************
File Caching
************
Implements a cache on local disk for Synapse file entities and other objects
with a `FileHandle <https://rest.synapse.org/org/sagebionetworks/repo/model/file/FileHandle.html>`_.
This is part of the internal implementation of the client and should not be
accessed directly by users of the client.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import collections
import datetime
import json
import operator
import os
import re
import shutil
import six
from math import floor
import synapseclient.utils as utils
from synapseclient.lock import Lock
from synapseclient.exceptions import *
CACHE_ROOT_DIR = os.path.join('~', '.synapseCache')
def epoch_time_to_iso(epoch_time):
"""
Convert seconds since unix epoch to a string in ISO format
"""
return None if epoch_time is None else utils.datetime_to_iso(utils.from_unix_epoch_time_secs(epoch_time))
def iso_time_to_epoch(iso_time):
"""
Convert an ISO formatted time into seconds since unix epoch
"""
return None if iso_time is None else utils.to_unix_epoch_time_secs(utils.iso_to_datetime(iso_time))
def compare_timestamps(modified_time, cached_time):
"""
Compare two ISO formatted timestamps, with a special case when cached_time
ends in .000Z.
For backward compatibility, we always write .000 for milliseconds into the cache.
We then match a cached time ending in .000Z, meaning zero milliseconds
with a modified time with any number of milliseconds.
:param modified_time: float representing seconds since unix epoch
:param cached_time: string holding a ISO formatted time
"""
if cached_time is None or modified_time is None:
return False
if cached_time.endswith(".000Z"):
return cached_time == epoch_time_to_iso(floor(modified_time))
else:
return cached_time == epoch_time_to_iso(modified_time)
def _get_modified_time(path):
if os.path.exists(path):
return os.path.getmtime(path)
return None
class Cache():
"""
Represent a cache in which files are accessed by file handle ID.
"""
def __init__(self, cache_root_dir=CACHE_ROOT_DIR, fanout=1000):
## set root dir of cache in which meta data will be stored and files
## will be stored here by default, but other locations can be specified
cache_root_dir = os.path.expanduser(cache_root_dir)
if not os.path.exists(cache_root_dir):
os.makedirs(cache_root_dir)
self.cache_root_dir = cache_root_dir
self.fanout = fanout
self.cache_map_file_name = ".cacheMap"
def get_cache_dir(self, file_handle_id):
if isinstance(file_handle_id, collections.Mapping):
if 'dataFileHandleId' in file_handle_id:
file_handle_id = file_handle_id['dataFileHandleId']
elif 'concreteType' in file_handle_id and 'id' in file_handle_id and file_handle_id['concreteType'].startswith('org.sagebionetworks.repo.model.file'):
file_handle_id = file_handle_id['id']
return os.path.join(self.cache_root_dir, str(int(file_handle_id) % self.fanout), str(file_handle_id))
def _read_cache_map(self, cache_dir):
cache_map_file = os.path.join(cache_dir, self.cache_map_file_name)
if not os.path.exists(cache_map_file):
return {}
with open(cache_map_file, 'r') as f:
cache_map = json.load(f)
return cache_map
def _write_cache_map(self, cache_dir, cache_map):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_map_file = os.path.join(cache_dir, self.cache_map_file_name)
with open(cache_map_file, 'w') as f:
json.dump(cache_map, f)
f.write('\n') # For compatibility with R's JSON parser
def contains(self, file_handle_id, path):
"""
Given a file and file_handle_id, return True if an unmodified cached
copy of the file exists at the exact path given or False otherwise.
:param file_handle_id:
:param path: file path at which to look for a cached copy
"""
cache_dir = self.get_cache_dir(file_handle_id)
if not os.path.exists(cache_dir):
return False
with Lock(self.cache_map_file_name, dir=cache_dir):
cache_map = self._read_cache_map(cache_dir)
path = utils.normalize_path(path)
cached_time = cache_map.get(path, None)
if cached_time:
return True if compare_timestamps(_get_modified_time(path), cached_time) else False
def get(self, file_handle_id, path=None):
"""
Retrieve a file with the given file handle from the cache.
:param file_handle_id:
:param path: If the given path is None, look for a cached copy of the
file in the cache directory. If the path is a directory,
look there for a cached copy. If a full file-path is
given, only check whether that exact file exists and is
unmodified since it was cached.
:returns: Either a file path, if an unmodified cached copy of the file
exists in the specified location or None if it does not
"""
cache_dir = self.get_cache_dir(file_handle_id)
if not os.path.exists(cache_dir):
return None
with Lock(self.cache_map_file_name, dir=cache_dir):
cache_map = self._read_cache_map(cache_dir)
path = utils.normalize_path(path)
## If the caller specifies a path and that path exists in the cache
## but has been modified, we need to indicate no match by returning
## None. The logic for updating a synapse entity depends on this to
## determine the need to upload a new file.
if path is not None:
## If we're given a path to a directory, look for a cached file in that directory
if os.path.isdir(path):
for cached_file_path, cached_time in six.iteritems(cache_map):
if path == os.path.dirname(cached_file_path):
return cached_file_path if compare_timestamps(_get_modified_time(cached_file_path), cached_time) else None
## if we're given a full file path, look up a matching file in the cache
else:
cached_time = cache_map.get(path, None)
if cached_time:
return path if compare_timestamps(_get_modified_time(path), cached_time) else None
## return most recently cached and unmodified file OR
## None if there are no unmodified files
for cached_file_path, cached_time in sorted(cache_map.items(), key=operator.itemgetter(1), reverse=True):
if compare_timestamps(_get_modified_time(cached_file_path), cached_time):
return cached_file_path
return None
def add(self, file_handle_id, path):
"""
Add a file to the cache
"""
if not path or not os.path.exists(path):
raise ValueError("Can't find file \"%s\"" % path)
cache_dir = self.get_cache_dir(file_handle_id)
with Lock(self.cache_map_file_name, dir=cache_dir):
cache_map = self._read_cache_map(cache_dir)
path = utils.normalize_path(path)
## write .000 milliseconds for backward compatibility
cache_map[path] = epoch_time_to_iso(floor(_get_modified_time(path)))
self._write_cache_map(cache_dir, cache_map)
return cache_map
def remove(self, file_handle_id, path=None, delete=None):
"""
Remove a file from the cache.
:param file_handle_id: Will also extract file handle id from either a File or file handle
:param path: If the given path is None, remove (and potentially delete)
all cached copies. If the path is that of a file in the
.cacheMap file, remove it.
:returns: A list of files removed
"""
removed = []
cache_dir = self.get_cache_dir(file_handle_id)
## if we've passed an entity and not a path, get path from entity
if path is None and isinstance(file_handle_id, collections.Mapping) and 'path' in file_handle_id:
path = file_handle_id['path']
with Lock(self.cache_map_file_name, dir=cache_dir):
cache_map = self._read_cache_map(cache_dir)
if path is None:
for path in cache_map:
if delete is True and os.path.exists(path):
os.remove(path)
removed.append(path)
cache_map = {}
else:
path = utils.normalize_path(path)
if path in cache_map:
if delete is True and os.path.exists(path):
os.remove(path)
del cache_map[path]
removed.append(path)
self._write_cache_map(cache_dir, cache_map)
return removed
def _cache_dirs(self):
"""
Generate a list of all cache dirs, directories of the form:
[cache.cache_root_dir]/949/59949
"""
for item1 in os.listdir(self.cache_root_dir):
path1 = os.path.join(self.cache_root_dir, item1)
if os.path.isdir(path1) and re.match('\d+', item1):
for item2 in os.listdir(path1):
path2 = os.path.join(path1, item2)
if os.path.isdir(path2) and re.match('\d+', item2):
yield path2
def purge(self, before_date, dry_run=False):
"""
Purge the cache. Use with caution. Delete files whose cache maps were last updated prior to the given date.
Deletes .cacheMap files and files stored in the cache.cache_root_dir, but does not delete
files stored outside the cache.
"""
if isinstance(before_date, datetime.datetime):
before_date = utils.to_unix_epoch_time_secs(epoch_time)
count = 0
for cache_dir in self._cache_dirs():
## _get_modified_time returns None if the cache map file doesn't
## exist and n > None evaluates to True (wtf?). I'm guessing it's
## OK to purge directories in the cache that have no .cacheMap file
if before_date > _get_modified_time(os.path.join(cache_dir, self.cache_map_file_name)):
if dry_run:
print(cache_dir)
else:
shutil.rmtree(cache_dir)
count += 1
return count
| apache-2.0 | 3,364,944,807,029,795,300 | 37.148789 | 162 | 0.609161 | false |
mansonul/events | events/contrib/plugins/form_elements/fields/regex/base.py | 1 | 1508 | from __future__ import absolute_import
from django.forms.fields import RegexField
from django.forms.widgets import TextInput
from django.utils.translation import ugettext_lazy as _
from fobi.base import FormFieldPlugin, get_theme
from . import UID
from .forms import RegexInputForm
__title__ = 'fobi.contrib.plugins.form_elements.fields.regex.base'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('RegexInputPlugin',)
theme = get_theme(request=None, as_instance=True)
class RegexInputPlugin(FormFieldPlugin):
"""Regex field plugin."""
uid = UID
name = _("Regex")
group = _("Fields")
form = RegexInputForm
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
widget_attrs = {
'class': theme.form_element_html_class,
'placeholder': self.data.placeholder,
}
field_kwargs = {
'label': self.data.label,
'help_text': self.data.help_text,
'regex': self.data.regex,
'initial': self.data.initial,
'required': self.data.required,
'widget': TextInput(attrs=widget_attrs),
}
if self.data.max_length:
field_kwargs['max_length'] = self.data.max_length
return [(self.data.name, RegexField, field_kwargs)]
| mit | -6,803,810,622,282,764,000 | 29.77551 | 70 | 0.626658 | false |
viralpandey/kivy | examples/canvas/bezier.py | 30 | 3688 | #!/usr/bin/env python
'''
Bezier Example
==============
This example shows a closed Bezier curve computed from a polygon. You
should see a purple polygon, a red bezier curve computed from the polygon,
and two sliders. You can drag points on the polygon to recompute the curve.
The two sliders control the dash length of the dashed lines making up the two
shapes.
'''
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line
class BezierTest(FloatLayout):
def __init__(self, points=[], loop=False, *args, **kwargs):
super(BezierTest, self).__init__(*args, **kwargs)
self.d = 10 # pixel tolerance when clicking on a point
self.points = points
self.loop = loop
self.current_point = None # index of point being dragged
with self.canvas:
Color(1.0, 0.0, 0.0)
self.bezier = Bezier(
points=self.points,
segments=150,
loop=self.loop,
dash_length=100,
dash_offset=10)
Color(1.0, 0.0, 1.0)
self.line = Line(
points=self.points + self.points[:2],
dash_offset=10,
dash_length=100)
s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_bezier_dash_offset)
self.add_widget(s)
s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_line_dash_offset)
self.add_widget(s)
def _set_bezier_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.bezier.dash_length = 100 - value
self.bezier.dash_offset = value
def _set_line_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.line.dash_length = 100 - value
self.line.dash_offset = value
def on_touch_down(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
for i, p in enumerate(list(zip(self.points[::2],
self.points[1::2]))):
if (abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and
abs(touch.pos[1] - self.pos[1] - p[1]) < self.d):
self.current_point = i + 1
return True
return super(BezierTest, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
if self.current_point:
self.current_point = None
return True
return super(BezierTest, self).on_touch_up(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
c = self.current_point
if c:
self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0]
self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1]
self.bezier.points = self.points
self.line.points = self.points + self.points[:2]
return True
return super(BezierTest, self).on_touch_move(touch)
class Main(App):
def build(self):
from math import cos, sin, radians
x = y = 150
l = 100
# Pacman !
points = [x, y]
for i in range(45, 360, 45):
i = radians(i)
points.extend([x + cos(i) * l, y + sin(i) * l])
return BezierTest(points=points, loop=True)
if __name__ == '__main__':
Main().run()
| mit | -536,426,628,712,736,600 | 33.792453 | 77 | 0.547993 | false |
t3dev/odoo | doc/_extensions/autojsdoc/parser/tests/test_crap.py | 33 | 2124 | # -*- coding: utf-8 -*-
"""
Test various crap patterns found in Odoo code to ensure they don't blow up
the parser thingie
"""
from autojsdoc.parser import jsdoc
from support import parse
def test_export_external():
[mod] = parse("""
odoo.define('module', function () {
return $.Deferred().reject();
});
""")
assert isinstance(mod.exports, jsdoc.CommentDoc)
assert mod.exports.doc == ''
def test_extend_jq():
parse("""
odoo.define('a', function (r) {
$.extend($.expr[':'], { a: function () {} });
$.fn.extend({ a: function () {} });
});
""")
def test_extend_dynamic():
parse("""
odoo.define('a', function () {
foo.bar.baz[qux + '_external'] = function () {};
});
""")
def test_extend_deep():
parse("""
odoo.define('a', function () {
var eventHandler = $.summernote.eventHandler;
var dom = $.summernote.core.dom;
dom.thing = function () {};
var fn_editor_currentstyle = eventHandler.modules.editor.currentStyle;
eventHandler.modules.editor.currentStyle = function () {}
});
""")
def test_arbitrary():
parse("""
odoo.define('bob', function () {
var page = window.location.href.replace(/^.*\/\/[^\/]+/, '');
var mailWidgets = ['mail_followers', 'mail_thread', 'mail_activity', 'kanban_activity'];
var bob;
var fldj = foo.getTemplate().baz;
});
""")
def test_prototype():
[A, B] = parse("""
odoo.define('mod1', function () {
var exports = {};
exports.Foo = Backbone.Model.extend({});
exports.Bar = Backbone.Model.extend({});
var BarCollection = Backbone.Collection.extend({
model: exports.Bar,
});
exports.Baz = Backbone.Model.extend({});
return exports;
});
odoo.define('mod2', function (require) {
var models = require('mod1');
var _super_orderline = models.Bar.prototype;
models.Foo = models.Bar.extend({});
var _super_order = models.Baz.prototype;
models.Bar = models.Baz.extend({});
});
""")
| gpl-3.0 | -4,738,835,734,835,197,000 | 27.32 | 96 | 0.556026 | false |
ZLLab-Mooc/edx-platform | lms/djangoapps/verify_student/tests/test_ssencrypt.py | 63 | 3638 | import base64
from nose.tools import assert_equals
from lms.djangoapps.verify_student.ssencrypt import (
aes_decrypt, aes_encrypt, encrypt_and_encode, decode_and_decrypt,
rsa_decrypt, rsa_encrypt
)
def test_aes():
key_str = "32fe72aaf2abb44de9e161131b5435c8d37cbdb6f5df242ae860b283115f2dae"
key = key_str.decode("hex")
def assert_roundtrip(text):
assert_equals(text, aes_decrypt(aes_encrypt(text, key), key))
assert_equals(
text,
decode_and_decrypt(
encrypt_and_encode(text, key),
key
)
)
assert_roundtrip("Hello World!")
assert_roundtrip("1234567890123456") # AES block size, padding corner case
# Longer string
assert_roundtrip("12345678901234561234567890123456123456789012345601")
assert_roundtrip("")
assert_roundtrip("\xe9\xe1a\x13\x1bT5\xc8") # Random, non-ASCII text
def test_rsa():
# Make up some garbage keys for testing purposes.
pub_key_str = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1hLVjP0oV0Uy/+jQ+Upz
c+eYc4Pyflb/WpfgYATggkoQdnsdplmvPtQr85+utgqKPxOh+PvYGW8QNUzjLIu4
5/GlmvBa82i1jRMgEAxGI95bz7j9DtH+7mnj+06zR5xHwT49jK0zMs5MjMaz5WRq
BUNkz7dxWzDrYJZQx230sPp6upy1Y5H5O8SnJVdghsh8sNciS4Bo4ZONQ3giBwxz
h5svjspz1MIsOoShjbAdfG+4VX7sVwYlw2rnQeRsMH5/xpnNeqtScyOMoz0N9UDG
dtRMNGa2MihAg7zh7/zckbUrtf+o5wQtlCJL1Kdj4EjshqYvCxzWnSM+MaYAjb3M
EQIDAQAB
-----END PUBLIC KEY-----"""
priv_key_str = """-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA1hLVjP0oV0Uy/+jQ+Upzc+eYc4Pyflb/WpfgYATggkoQdnsd
plmvPtQr85+utgqKPxOh+PvYGW8QNUzjLIu45/GlmvBa82i1jRMgEAxGI95bz7j9
DtH+7mnj+06zR5xHwT49jK0zMs5MjMaz5WRqBUNkz7dxWzDrYJZQx230sPp6upy1
Y5H5O8SnJVdghsh8sNciS4Bo4ZONQ3giBwxzh5svjspz1MIsOoShjbAdfG+4VX7s
VwYlw2rnQeRsMH5/xpnNeqtScyOMoz0N9UDGdtRMNGa2MihAg7zh7/zckbUrtf+o
5wQtlCJL1Kdj4EjshqYvCxzWnSM+MaYAjb3MEQIDAQABAoIBAQCviuA87fdfoOoS
OerrEacc20QDLaby/QoGUtZ2RmmHzY40af7FQ3PWFIw6Ca5trrTwxnuivXnWWWG0
I2mCRM0Kvfgr1n7ubOW7WnyHTFlT3mnxK2Ov/HmNLZ36nO2cgkXA6/Xy3rBGMC9L
nUE1kSLzT/Fh965ntfS9zmVNNBhb6no0rVkGx5nK3vTI6kUmaa0m+E7KL/HweO4c
JodhN8CX4gpxSrkuwJ7IHEPYspqc0jInMYKLmD3d2g3BiOctjzFmaj3lV5AUlujW
z7/LVe5WAEaaxjwaMvwqrJLv9ogxWU3etJf22+Yy7r5gbPtqpqJrCZ5+WpGnUHws
3mMGP2QBAoGBAOc3pzLFgGUREVPSFQlJ06QFtfKYqg9fFHJCgWu/2B2aVZc2aO/t
Zhuoz+AgOdzsw+CWv7K0FH9sUkffk2VKPzwwwufLK3avD9gI0bhmBAYvdhS6A3nO
YM3W+lvmaJtFL00K6kdd+CzgRnBS9cZ70WbcbtqjdXI6+mV1WdGUTLhBAoGBAO0E
xhD4z+GjubSgfHYEZPgRJPqyUIfDH+5UmFGpr6zlvNN/depaGxsbhW8t/V6xkxsG
MCgic7GLMihEiUMx1+/snVs5bBUx7OT9API0d+vStHCFlTTe6aTdmiduFD4PbDsq
6E4DElVRqZhpIYusdDh7Z3fO2hm5ad4FfMlx65/RAoGAPYEfV7ETs06z9kEG2X6q
7pGaUZrsecRH8xDfzmKswUshg2S0y0WyCJ+CFFNeMPdGL4LKIWYnobGVvYqqcaIr
af5qijAQMrTkmQnXh56TaXXMijzk2czdEUQjOrjykIL5zxudMDi94GoUMqLOv+qF
zD/MuRoMDsPDgaOSrd4t/kECgYEAzwBNT8NOIz3P0Z4cNSJPYIvwpPaY+IkE2SyO
vzuYj0Mx7/Ew9ZTueXVGyzv6PfqOhJqZ8mNscZIlIyAAVWwxsHwRTfvPlo882xzP
97i1R4OFTYSNNFi+69sSZ/9utGjZ2K73pjJuj487tD2VK5xZAH9edTd2KeNSP7LB
MlpJNBECgYAmIswPdldm+G8SJd5j9O2fcDVTURjKAoSXCv2j4gEZzzfudpLWNHYu
l8N6+LEIVTMAytPk+/bImHvGHKZkCz5rEMSuYJWOmqKI92rUtI6fz5DUb3XSbrwT
3W+sdGFUK3GH1NAX71VxbAlFVLUetcMwai1+wXmGkRw6A7YezVFnhw==
-----END RSA PRIVATE KEY-----"""
aes_key_str = "32fe72aaf2abb44de9e161131b5435c8d37cbdb6f5df242ae860b283115f2dae"
aes_key = aes_key_str.decode('hex')
encrypted_aes_key = rsa_encrypt(aes_key, pub_key_str)
assert_equals(aes_key, rsa_decrypt(encrypted_aes_key, priv_key_str))
# Even though our AES key is only 32 bytes, RSA encryption will make it 256
# bytes, and base64 encoding will blow that up to 344
assert_equals(len(base64.urlsafe_b64encode(encrypted_aes_key)), 344)
| agpl-3.0 | -8,612,998,956,430,498,000 | 45.050633 | 84 | 0.824079 | false |
evilhero/mylar | lib/cherrypy/test/test_request_obj.py | 6 | 31327 | """Basic tests for the cherrypy.Request object."""
import os
localDir = os.path.dirname(__file__)
import sys
import types
from cherrypy._cpcompat import IncompleteRead, ntob, unicodestr
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import httputil
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "PROPFIND")
# Client-side code #
from cherrypy.test import helper
class RequestObjectTests(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
def scheme(self):
return cherrypy.request.scheme
scheme.exposed = True
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class Params(Test):
def index(self, thing):
return repr(thing)
def ismap(self, x, y):
return "Coordinates: %s, %s" % (x, y)
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, kwargs)
default._cp_config = {'request.query_string_encoding': 'latin1'}
class ParamErrorsCallable(object):
exposed = True
def __call__(self):
return "data"
class ParamErrors(Test):
def one_positional(self, param1):
return "data"
one_positional.exposed = True
def one_positional_args(self, param1, *args):
return "data"
one_positional_args.exposed = True
def one_positional_args_kwargs(self, param1, *args, **kwargs):
return "data"
one_positional_args_kwargs.exposed = True
def one_positional_kwargs(self, param1, **kwargs):
return "data"
one_positional_kwargs.exposed = True
def no_positional(self):
return "data"
no_positional.exposed = True
def no_positional_args(self, *args):
return "data"
no_positional_args.exposed = True
def no_positional_args_kwargs(self, *args, **kwargs):
return "data"
no_positional_args_kwargs.exposed = True
def no_positional_kwargs(self, **kwargs):
return "data"
no_positional_kwargs.exposed = True
callable_object = ParamErrorsCallable()
def raise_type_error(self, **kwargs):
raise TypeError("Client Error")
raise_type_error.exposed = True
def raise_type_error_with_default_param(self, x, y=None):
return '%d' % 'a' # throw an exception
raise_type_error_with_default_param.exposed = True
def callable_error_page(status, **kwargs):
return "Error %s - Well, I'm very sorry but you haven't paid!" % status
class Error(Test):
_cp_config = {'tools.log_tracebacks.on': True,
}
def reason_phrase(self):
raise cherrypy.HTTPError("410 Gone fishin'")
def custom(self, err='404'):
raise cherrypy.HTTPError(int(err), "No, <b>really</b>, not found!")
custom._cp_config = {'error_page.404': os.path.join(localDir, "static/index.html"),
'error_page.401': callable_error_page,
}
def custom_default(self):
return 1 + 'a' # raise an unexpected error
custom_default._cp_config = {'error_page.default': callable_error_page}
def noexist(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
noexist._cp_config = {'error_page.404': "nonexistent.html"}
def page_method(self):
raise ValueError()
def page_yield(self):
yield "howdy"
raise ValueError()
def page_streamed(self):
yield "word up"
raise ValueError()
yield "very oops"
page_streamed._cp_config = {"response.stream": True}
def cause_err_in_finalize(self):
# Since status must start with an int, this should error.
cherrypy.response.status = "ZOO OK"
cause_err_in_finalize._cp_config = {'request.show_tracebacks': False}
def rethrow(self):
"""Test that an error raised here will be thrown out to the server."""
raise ValueError()
rethrow._cp_config = {'request.throw_errors': True}
class Expect(Test):
def expectation_failed(self):
expect = cherrypy.request.headers.elements("Expect")
if expect and expect[0].value != '100-continue':
raise cherrypy.HTTPError(400)
raise cherrypy.HTTPError(417, 'Expectation Failed')
class Headers(Test):
def default(self, headername):
"""Spit back out the value for the requested header."""
return cherrypy.request.headers[headername]
def doubledheaders(self):
# From http://www.cherrypy.org/ticket/165:
# "header field names should not be case sensitive sayes the rfc.
# if i set a headerfield in complete lowercase i end up with two
# header fields, one in lowercase, the other in mixed-case."
# Set the most common headers
hMap = cherrypy.response.headers
hMap['content-type'] = "text/html"
hMap['content-length'] = 18
hMap['server'] = 'CherryPy headertest'
hMap['location'] = ('%s://%s:%s/headers/'
% (cherrypy.request.local.ip,
cherrypy.request.local.port,
cherrypy.request.scheme))
# Set a rare header for fun
hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT'
return "double header test"
def ifmatch(self):
val = cherrypy.request.headers['If-Match']
assert isinstance(val, unicodestr)
cherrypy.response.headers['ETag'] = val
return val
class HeaderElements(Test):
def get_elements(self, headername):
e = cherrypy.request.headers.elements(headername)
return "\n".join([unicodestr(x) for x in e])
class Method(Test):
def index(self):
m = cherrypy.request.method
if m in defined_http_methods or m == "CONNECT":
return m
if m == "LINK":
raise cherrypy.HTTPError(405)
else:
raise cherrypy.HTTPError(501)
def parameterized(self, data):
return data
def request_body(self):
# This should be a file object (temp file),
# which CP will just pipe back out if we tell it to.
return cherrypy.request.body
def reachable(self):
return "success"
class Divorce:
"""HTTP Method handlers shouldn't collide with normal method names.
For example, a GET-handler shouldn't collide with a method named 'get'.
If you build HTTP method dispatching into CherryPy, rewrite this class
to use your new dispatch mechanism and make sure that:
"GET /divorce HTTP/1.1" maps to divorce.index() and
"GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get()
"""
documents = {}
def index(self):
yield "<h1>Choose your document</h1>\n"
yield "<ul>\n"
for id, contents in self.documents.items():
yield (" <li><a href='/divorce/get?ID=%s'>%s</a>: %s</li>\n"
% (id, id, contents))
yield "</ul>"
index.exposed = True
def get(self, ID):
return ("Divorce document %s: %s" %
(ID, self.documents.get(ID, "empty")))
get.exposed = True
root.divorce = Divorce()
class ThreadLocal(Test):
def index(self):
existing = repr(getattr(cherrypy.request, "asdf", None))
cherrypy.request.asdf = "rassfrassin"
return existing
appconf = {
'/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")},
}
cherrypy.tree.mount(root, config=appconf)
setup_server = staticmethod(setup_server)
def test_scheme(self):
self.getPage("/scheme")
self.assertBody(self.scheme)
def testParams(self):
self.getPage("/params/?thing=a")
self.assertBody("u'a'")
self.getPage("/params/?thing=a&thing=b&thing=c")
self.assertBody("[u'a', u'b', u'c']")
# Test friendly error message when given params are not accepted.
cherrypy.config.update({"request.show_mismatched_params": True})
self.getPage("/params/?notathing=meeting")
self.assertInBody("Missing parameters: thing")
self.getPage("/params/?thing=meeting¬athing=meeting")
self.assertInBody("Unexpected query string parameters: notathing")
# Test ability to turn off friendly error messages
cherrypy.config.update({"request.show_mismatched_params": False})
self.getPage("/params/?notathing=meeting")
self.assertInBody("Not Found")
self.getPage("/params/?thing=meeting¬athing=meeting")
self.assertInBody("Not Found")
# Test "% HEX HEX"-encoded URL, param keys, and values
self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville")
self.assertBody(r"args: ('\xd4 \xe3', 'cheese') "
r"kwargs: {'Gruy\xe8re': u'Bulgn\xe9ville'}")
# Make sure that encoded = and & get parsed correctly
self.getPage("/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2")
self.assertBody(r"args: ('code',) "
r"kwargs: {'url': u'http://cherrypy.org/index?a=1&b=2'}")
# Test coordinates sent by <img ismap>
self.getPage("/params/ismap?223,114")
self.assertBody("Coordinates: 223, 114")
# Test "name[key]" dict-like params
self.getPage("/params/dictlike?a[1]=1&a[2]=2&b=foo&b[bar]=baz")
self.assertBody(
"args: ('dictlike',) "
"kwargs: {'a[1]': u'1', 'b[bar]': u'baz', 'b': u'foo', 'a[2]': u'2'}")
def testParamErrors(self):
# test that all of the handlers work when given
# the correct parameters in order to ensure that the
# errors below aren't coming from some other source.
for uri in (
'/paramerrors/one_positional?param1=foo',
'/paramerrors/one_positional_args?param1=foo',
'/paramerrors/one_positional_args/foo',
'/paramerrors/one_positional_args/foo/bar/baz',
'/paramerrors/one_positional_args_kwargs?param1=foo¶m2=bar',
'/paramerrors/one_positional_args_kwargs/foo?param2=bar¶m3=baz',
'/paramerrors/one_positional_args_kwargs/foo/bar/baz?param2=bar¶m3=baz',
'/paramerrors/one_positional_kwargs?param1=foo¶m2=bar¶m3=baz',
'/paramerrors/one_positional_kwargs/foo?param4=foo¶m2=bar¶m3=baz',
'/paramerrors/no_positional',
'/paramerrors/no_positional_args/foo',
'/paramerrors/no_positional_args/foo/bar/baz',
'/paramerrors/no_positional_args_kwargs?param1=foo¶m2=bar',
'/paramerrors/no_positional_args_kwargs/foo?param2=bar',
'/paramerrors/no_positional_args_kwargs/foo/bar/baz?param2=bar¶m3=baz',
'/paramerrors/no_positional_kwargs?param1=foo¶m2=bar',
'/paramerrors/callable_object',
):
self.getPage(uri)
self.assertStatus(200)
# query string parameters are part of the URI, so if they are wrong
# for a particular handler, the status MUST be a 404.
error_msgs = [
'Missing parameters',
'Nothing matches the given URI',
'Multiple values for parameters',
'Unexpected query string parameters',
'Unexpected body parameters',
]
for uri, msg in (
('/paramerrors/one_positional', error_msgs[0]),
('/paramerrors/one_positional?foo=foo', error_msgs[0]),
('/paramerrors/one_positional/foo/bar/baz', error_msgs[1]),
('/paramerrors/one_positional/foo?param1=foo', error_msgs[2]),
('/paramerrors/one_positional/foo?param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo?param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo/bar/baz?param2=foo', error_msgs[3]),
('/paramerrors/one_positional_args_kwargs/foo/bar/baz?param1=bar¶m3=baz', error_msgs[2]),
('/paramerrors/one_positional_kwargs/foo?param1=foo¶m2=bar¶m3=baz', error_msgs[2]),
('/paramerrors/no_positional/boo', error_msgs[1]),
('/paramerrors/no_positional?param1=foo', error_msgs[3]),
('/paramerrors/no_positional_args/boo?param1=foo', error_msgs[3]),
('/paramerrors/no_positional_kwargs/boo?param1=foo', error_msgs[1]),
('/paramerrors/callable_object?param1=foo', error_msgs[3]),
('/paramerrors/callable_object/boo', error_msgs[1]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri)
self.assertStatus(404)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("Not Found")
# if body parameters are wrong, a 400 must be returned.
for uri, body, msg in (
('/paramerrors/one_positional/foo', 'param1=foo', error_msgs[2]),
('/paramerrors/one_positional/foo', 'param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo', 'param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo/bar/baz', 'param2=foo', error_msgs[4]),
('/paramerrors/one_positional_args_kwargs/foo/bar/baz', 'param1=bar¶m3=baz', error_msgs[2]),
('/paramerrors/one_positional_kwargs/foo', 'param1=foo¶m2=bar¶m3=baz', error_msgs[2]),
('/paramerrors/no_positional', 'param1=foo', error_msgs[4]),
('/paramerrors/no_positional_args/boo', 'param1=foo', error_msgs[4]),
('/paramerrors/callable_object', 'param1=foo', error_msgs[4]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri, method='POST', body=body)
self.assertStatus(400)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("Bad Request")
# even if body parameters are wrong, if we get the uri wrong, then
# it's a 404
for uri, body, msg in (
('/paramerrors/one_positional?param2=foo', 'param1=foo', error_msgs[3]),
('/paramerrors/one_positional/foo/bar', 'param2=foo', error_msgs[1]),
('/paramerrors/one_positional_args/foo/bar?param2=foo', 'param3=foo', error_msgs[3]),
('/paramerrors/one_positional_kwargs/foo/bar', 'param2=bar¶m3=baz', error_msgs[1]),
('/paramerrors/no_positional?param1=foo', 'param2=foo', error_msgs[3]),
('/paramerrors/no_positional_args/boo?param2=foo', 'param1=foo', error_msgs[3]),
('/paramerrors/callable_object?param2=bar', 'param1=foo', error_msgs[3]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri, method='POST', body=body)
self.assertStatus(404)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("Not Found")
# In the case that a handler raises a TypeError we should
# let that type error through.
for uri in (
'/paramerrors/raise_type_error',
'/paramerrors/raise_type_error_with_default_param?x=0',
'/paramerrors/raise_type_error_with_default_param?x=0&y=0',
):
self.getPage(uri, method='GET')
self.assertStatus(500)
self.assertTrue('Client Error', self.body)
def testErrorHandling(self):
self.getPage("/error/missing")
self.assertStatus(404)
self.assertErrorPage(404, "The path '/error/missing' was not found.")
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
valerr = '\n raise ValueError()\nValueError'
self.getPage("/error/page_method")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_yield")
self.assertErrorPage(500, pattern=valerr)
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage("/error/page_streamed")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus(200)
self.assertBody("word up")
else:
# Under HTTP/1.1, the chunked transfer-coding is used.
# The HTTP client will choke when the output is incomplete.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
"/error/page_streamed")
# No traceback should be present
self.getPage("/error/cause_err_in_finalize")
msg = "Illegal response status from server ('ZOO' is non-numeric)."
self.assertErrorPage(500, msg, None)
finally:
ignore.pop()
# Test HTTPError with a reason-phrase in the status arg.
self.getPage('/error/reason_phrase')
self.assertStatus("410 Gone fishin'")
# Test custom error page for a specific error.
self.getPage("/error/custom")
self.assertStatus(404)
self.assertBody("Hello, world\r\n" + (" " * 499))
# Test custom error page for a specific error.
self.getPage("/error/custom?err=401")
self.assertStatus(401)
self.assertBody("Error 401 Unauthorized - Well, I'm very sorry but you haven't paid!")
# Test default custom error page.
self.getPage("/error/custom_default")
self.assertStatus(500)
self.assertBody("Error 500 Internal Server Error - Well, I'm very sorry but you haven't paid!".ljust(513))
# Test error in custom error page (ticket #305).
# Note that the message is escaped for HTML (ticket #310).
self.getPage("/error/noexist")
self.assertStatus(404)
msg = ("No, <b>really</b>, not found!<br />"
"In addition, the custom error page failed:\n<br />"
"IOError: [Errno 2] No such file or directory: 'nonexistent.html'")
self.assertInBody(msg)
if getattr(cherrypy.server, "using_apache", False):
pass
else:
# Test throw_errors (ticket #186).
self.getPage("/error/rethrow")
self.assertInBody("raise ValueError()")
def testExpect(self):
e = ('Expect', '100-continue')
self.getPage("/headerelements/get_elements?headername=Expect", [e])
self.assertBody('100-continue')
self.getPage("/expect/expectation_failed", [e])
self.assertStatus(417)
def testHeaderElements(self):
# Accept-* header elements should be sorted, with most preferred first.
h = [('Accept', 'audio/*; q=0.2, audio/basic')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("audio/basic\n"
"audio/*;q=0.2")
h = [('Accept', 'text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/x-c\n"
"text/html\n"
"text/x-dvi;q=0.8\n"
"text/plain;q=0.5")
# Test that more specific media ranges get priority.
h = [('Accept', 'text/*, text/html, text/html;level=1, */*')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/html;level=1\n"
"text/html\n"
"text/*\n"
"*/*")
# Test Accept-Charset
h = [('Accept-Charset', 'iso-8859-5, unicode-1-1;q=0.8')]
self.getPage("/headerelements/get_elements?headername=Accept-Charset", h)
self.assertStatus("200 OK")
self.assertBody("iso-8859-5\n"
"unicode-1-1;q=0.8")
# Test Accept-Encoding
h = [('Accept-Encoding', 'gzip;q=1.0, identity; q=0.5, *;q=0')]
self.getPage("/headerelements/get_elements?headername=Accept-Encoding", h)
self.assertStatus("200 OK")
self.assertBody("gzip;q=1.0\n"
"identity;q=0.5\n"
"*;q=0")
# Test Accept-Language
h = [('Accept-Language', 'da, en-gb;q=0.8, en;q=0.7')]
self.getPage("/headerelements/get_elements?headername=Accept-Language", h)
self.assertStatus("200 OK")
self.assertBody("da\n"
"en-gb;q=0.8\n"
"en;q=0.7")
# Test malformed header parsing. See http://www.cherrypy.org/ticket/763.
self.getPage("/headerelements/get_elements?headername=Content-Type",
# Note the illegal trailing ";"
headers=[('Content-Type', 'text/html; charset=utf-8;')])
self.assertStatus(200)
self.assertBody("text/html;charset=utf-8")
def test_repeated_headers(self):
# Test that two request headers are collapsed into one.
# See http://www.cherrypy.org/ticket/542.
self.getPage("/headers/Accept-Charset",
headers=[("Accept-Charset", "iso-8859-5"),
("Accept-Charset", "unicode-1-1;q=0.8")])
self.assertBody("iso-8859-5, unicode-1-1;q=0.8")
# Tests that each header only appears once, regardless of case.
self.getPage("/headers/doubledheaders")
self.assertBody("double header test")
hnames = [name.title() for name, val in self.headers]
for key in ['Content-Length', 'Content-Type', 'Date',
'Expires', 'Location', 'Server']:
self.assertEqual(hnames.count(key), 1, self.headers)
def test_encoded_headers(self):
# First, make sure the innards work like expected.
self.assertEqual(httputil.decode_TEXT(u"=?utf-8?q?f=C3=BCr?="), u"f\xfcr")
if cherrypy.server.protocol_version == "HTTP/1.1":
# Test RFC-2047-encoded request and response header values
u = u'\u212bngstr\xf6m'
c = u"=E2=84=ABngstr=C3=B6m"
self.getPage("/headers/ifmatch", [('If-Match', u'=?utf-8?q?%s?=' % c)])
# The body should be utf-8 encoded.
self.assertBody("\xe2\x84\xabngstr\xc3\xb6m")
# But the Etag header should be RFC-2047 encoded (binary)
self.assertHeader("ETag", u'=?utf-8?b?4oSrbmdzdHLDtm0=?=')
# Test a *LONG* RFC-2047-encoded request and response header value
self.getPage("/headers/ifmatch",
[('If-Match', u'=?utf-8?q?%s?=' % (c * 10))])
self.assertBody("\xe2\x84\xabngstr\xc3\xb6m" * 10)
# Note: this is different output for Python3, but it decodes fine.
etag = self.assertHeader("ETag",
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm0=?=')
self.assertEqual(httputil.decode_TEXT(etag), u * 10)
def test_header_presence(self):
# If we don't pass a Content-Type header, it should not be present
# in cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[])
self.assertStatus(500)
# If Content-Type is present in the request, it should be present in
# cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[("Content-type", "application/json")])
self.assertBody("application/json")
def test_basic_HTTPMethods(self):
helper.webtest.methods_with_bodies = ("POST", "PUT", "PROPFIND")
# Test that all defined HTTP methods work.
for m in defined_http_methods:
self.getPage("/method/", method=m)
# HEAD requests should not return any body.
if m == "HEAD":
self.assertBody("")
elif m == "TRACE":
# Some HTTP servers (like modpy) have their own TRACE support
self.assertEqual(self.body[:5], ntob("TRACE"))
else:
self.assertBody(m)
# Request a PUT method with a form-urlencoded body
self.getPage("/method/parameterized", method="PUT",
body="data=on+top+of+other+things")
self.assertBody("on top of other things")
# Request a PUT method with a file body
b = "one thing on top of another"
h = [("Content-Type", "text/plain"),
("Content-Length", str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PUT", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a PUT method with a file body but no Content-Type.
# See http://www.cherrypy.org/ticket/790.
b = ntob("one thing on top of another")
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest("PUT", "/method/request_body", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader('Content-Length', str(len(b)))
conn.endheaders()
conn.send(b)
response = conn.response_class(conn.sock, method="PUT")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(b)
finally:
self.persistent = False
# Request a PUT method with no body whatsoever (not an empty one).
# See http://www.cherrypy.org/ticket/650.
# Provide a C-T or webtest will provide one (and a C-L) for us.
h = [("Content-Type", "text/plain")]
self.getPage("/method/reachable", headers=h, method="PUT")
self.assertStatus(411)
# Request a custom method with a request body
b = ('<?xml version="1.0" encoding="utf-8" ?>\n\n'
'<propfind xmlns="DAV:"><prop><getlastmodified/>'
'</prop></propfind>')
h = [('Content-Type', 'text/xml'),
('Content-Length', str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PROPFIND", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a disallowed method
self.getPage("/method/", method="LINK")
self.assertStatus(405)
# Request an unknown method
self.getPage("/method/", method="SEARCH")
self.assertStatus(501)
# For method dispatchers: make sure that an HTTP method doesn't
# collide with a virtual path atom. If you build HTTP-method
# dispatching into the core, rewrite these handlers to use
# your dispatch idioms.
self.getPage("/divorce/get?ID=13")
self.assertBody('Divorce document 13: empty')
self.assertStatus(200)
self.getPage("/divorce/", method="GET")
self.assertBody('<h1>Choose your document</h1>\n<ul>\n</ul>')
self.assertStatus(200)
def test_CONNECT_method(self):
if getattr(cherrypy.server, "using_apache", False):
return self.skip("skipped due to known Apache differences... ")
self.getPage("/method/", method="CONNECT")
self.assertBody("CONNECT")
def testEmptyThreadlocals(self):
results = []
for x in range(20):
self.getPage("/threadlocal/")
results.append(self.body)
self.assertEqual(results, [ntob("None")] * 20)
| gpl-3.0 | 3,029,887,268,917,854,700 | 42.389197 | 114 | 0.542599 | false |
freedesktop-unofficial-mirror/packagekit | backends/conary/conaryBackend.py | 6 | 17671 | #!/usr/bin/python
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2007 Ken VanDine <ken@vandine.org>
# Copyright (C) 2008 Richard Hughes <richard@hughsie.com>
# Copyright (C) 2009-2010 Andres Vargas <zodman@foresightlinux.org>
# Scott Parkerson <scott.parkerson@gmail.com>
import sys
from conary.conaryclient import DepResolutionFailure
from conary.errors import InternalConaryError
from conary.trove import TroveIntegrityError
from packagekit.backend import get_package_id, split_package_id, \
PackageKitBaseBackend
from packagekit.enums import (ERROR_DEP_RESOLUTION_FAILED, ERROR_NO_CACHE,
ERROR_NO_PACKAGES_TO_UPDATE, ERROR_UNKNOWN, FILTER_INSTALLED,
FILTER_NOT_INSTALLED, INFO_INSTALLING, INFO_NORMAL, INFO_REMOVING,
INFO_SECURITY, INFO_UPDATING, MESSAGE_COULD_NOT_FIND_PACKAGE,
RESTART_APPLICATION, RESTART_NONE, RESTART_SYSTEM, STATUS_INFO,
STATUS_QUERY, STATUS_REFRESH_CACHE, STATUS_RUNNING, STATUS_UPDATE,
UPDATE_STATE_STABLE, UPDATE_STATE_TESTING, UPDATE_STATE_UNSTABLE)
from conaryCallback import UpdateCallback, GetUpdateCallback
from conaryCallback import RemoveCallback, UpdateSystemCallback
from conaryFilter import ConaryFilter
from XMLCache import XMLCache
import conarypk
# To use the logger, uncomment this line:
# from pkConaryLog import log
def ConaryExceptionHandler(func):
'''Centralized handler for conary Exceptions
Currently only considers conary install/erase/updateall.
'''
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except DepResolutionFailure as e:
deps = [str(i[0][0]).split(":")[0] for i in e.cannotResolve]
self.error(ERROR_DEP_RESOLUTION_FAILED, ", ".join(set(deps)))
except InternalConaryError as e:
if str(e) == "Stale update job":
self.conary.clear_job_cache()
# The UpdateJob can be invalid. It's probably because after the
# update job is fozen, the state of the database has changed.
self.error(ERROR_NO_CACHE,
"The previously cached update job is broken. Please try again.")
except TroveIntegrityError:
self.error(ERROR_NO_PACKAGES_TO_UPDATE, "Network error. Try again")
return wrapper
def _get_trovespec_from_ids(package_ids):
ret = []
for p in package_ids:
name, version, arch, data = split_package_id(p)
trovespec = name
# Omitting version and label. Depend on conary to find the proper package.
# This may be problematic.
# Also omitting flavor for now.
#if arch:
# trovespec = '%s[is: %s]' % (trovespec, arch)
ret.append(trovespec)
return ret
def _get_fits(branch):
if "conary.rpath.com" in branch:
return "http://issues.rpath.com; rPath Issue Tracking System"
elif "foresight.rpath.org" in branch:
return "http://issues.foresightlinux.org; Foresight Issue Tracking System"
else:
return ""
def _get_license(license_list):
if license_list == "":
return ""
# license_list is a list of licenses in the format of
# 'rpath.com/licenses/copyright/GPL-2'.
return " ".join([i.split("/")[-1] for i in license_list])
def _get_branch(branch):
branchList = branch.split("@")
if "2-qa" in branchList[1]:
return UPDATE_STATE_TESTING
elif "2-devel" in branchList[1]:
return UPDATE_STATE_UNSTABLE
else:
return UPDATE_STATE_STABLE
class PackageKitConaryBackend(PackageKitBaseBackend):
# Packages that require a reboot
rebootpkgs = ("kernel", "glibc", "hal", "dbus")
restartpkgs = ("PackageKit","gnome-packagekit")
def __init__(self, args):
PackageKitBaseBackend.__init__(self, args)
# conary configurations
conary = conarypk.ConaryPk()
self.cfg = conary.cfg
self.client = conary.cli
self.conary = conary
self.xmlcache = XMLCache(self.conary.get_labels())
def _get_package_name_from_ids(self, package_ids):
return [split_package_id(x)[0] for x in package_ids]
def _format_package_summary(self, name, short_desc):
data = short_desc
if data == "." or data == "":
data = name.replace("-",' ').capitalize()
return data
def _search_package(self, pkg_list, name):
for pkg in pkg_list:
if pkg["name"] == name:
return pkg
return None
def _convert_package(self, trovetuple, metadata):
return dict(
trove = trovetuple,
metadata = metadata
)
def _do_search(self, filters, searchlist, where = "name"):
"""
searchlist(str)ist as the package for search like
filters(str) as the filter
"""
if where not in ("name", "details", "group", "all"):
self.error(ERROR_UNKNOWN, "DORK---- search where not found")
pkgList = self.xmlcache.search(searchlist, where )
if len(pkgList) > 0 :
pkgs = self._resolve_list(pkgList, filters)
self._show_package_list(pkgs)
else:
self.message(MESSAGE_COULD_NOT_FIND_PACKAGE,"search not found")
def _resolve_list(self, pkg_list, filters):
pkgFilter = ConaryFilter()
installed = []
if FILTER_NOT_INSTALLED not in filters:
installed = self._resolve_local(pkgFilter, pkg_list)
if FILTER_INSTALLED not in filters:
pkg_list = [x for x in pkg_list if x not in installed]
self._resolve_repo(pkgFilter, pkg_list)
package_list = pkgFilter.post_process()
return package_list
def _resolve_local(self, pkgFilter, pkg_list):
'''Find out installed packages from pkg_list
If a package from pkg_list can be found locally, add it (after some
convertion) to pkgFilter.
Returns the list of installed packages.
'''
ret = []
troves_all = [(p["name"], None, None) for p in pkg_list]
troves_local = self.client.db.findTroves(None, troves_all,
allowMissing=True)
for trv in troves_local:
pkg = self._search_package(pkg_list, trv[0])
ret.append(pkg)
# A package may have different versions/flavors installed.
for t in troves_local[trv]:
pkgFilter.add_installed([self._convert_package(t, pkg)])
return ret
def _resolve_repo(self, pkgFilter, pkg_list):
'''Find out packages from pkg_list that are available in the repository
If a package from pkg_list can be found in the repo, add it (after some
convertion) to pkgFilter.
No return value.
'''
troves_all = [(pkg["name"], None, self.conary.flavor) for pkg in
pkg_list]
troves_repo = self.client.repos.findTroves(self.conary.default_label,
troves_all, allowMissing=True)
for trv in troves_repo:
# only use the first trove in the list
t = troves_repo[trv][0]
pkg = self._search_package(pkg_list, t[0])
pkgFilter.add_available([self._convert_package(t, pkg)])
def resolve(self, filters, package ):
"""
@filters (list) list of filters
@package (list ) list with packages name for resolve
"""
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_INFO)
pkg_dict = self.xmlcache.resolve( package[0] )
if pkg_dict is None:
return None
pkgs = self._resolve_list([pkg_dict], filters)
self._show_package_list(pkgs)
def _show_package_list(self, lst):
'''Emit Package signals for a list of packages
pkgs should be a list of (trove, status) tuples.
Trove is a dict of {(name, version, flavor), metadata}, as constructed
by _convert_package.
'''
def is_redirected_package(version):
# The format of a revision string is
# "<upstream version>-<source count>-<build count>".
# If upstream version is 0, the package has become nil.
return version.split("-")[0] == "0"
for pkg, status in lst:
name, v, f = pkg["trove"]
version = str(v.trailingRevision())
if is_redirected_package(version):
continue
label = str(v.trailingLabel())
arch = conarypk.get_arch(f)
pkg_id = get_package_id(name, version, arch, label)
summary = self._format_package_summary(name,
pkg["metadata"].get("shortDesc", "").decode("UTF"))
self.package(pkg_id, status, summary)
def search_group(self, options, searchlist):
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_QUERY)
self._do_search(options, searchlist, 'group')
def search_file(self, filters, search ):
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_QUERY)
name = self.conary.search_path( search )
if name:
if ":" in name:
name = name.split(":")[0]
self.resolve( filters, [name])
def search_name(self, options, searchlist):
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_QUERY)
self._do_search(options, searchlist, 'name')
def search_details(self, options, search):
self.allow_cancel(True)
#self.percentage(None)
self.status(STATUS_QUERY)
self._do_search(options, search, 'details' )
def get_packages(self, filters):
self.allow_cancel(False)
self.status(STATUS_QUERY)
self._do_search(filters, "", 'all' )
def get_files(self, package_ids):
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_INFO)
for package_id in package_ids:
name, version, arch, data = split_package_id(package_id)
files = self.conary.list_files('%s=%s[is: %s]' %
(name, version, arch))
self.files(package_id, ';'.join(files))
@ConaryExceptionHandler
def update_system(self, only_trusted):
# FIXME: use only_trusted
self.allow_cancel(False)
self.status(STATUS_UPDATE)
cb = UpdateSystemCallback(self, self.cfg)
self.conary.updateall(cb, dry_run=False)
def refresh_cache(self, force):
# TODO: use force ?
self.percentage(None)
self.status(STATUS_REFRESH_CACHE)
self.percentage(None)
self.xmlcache.refresh()
def _display_update_jobs(self, install_jobs, erase_jobs, update_jobs):
'''Emit package status for a list of installing/erasing/updating jobs
'''
ret = []
for (name, (oldVer, oldFla), (newVer, newFla)) in install_jobs:
ret.append((name, newVer, newFla, INFO_INSTALLING))
for (name, (oldVer, oldFla), (newVer, newFla)) in erase_jobs:
ret.append((name, oldVer, oldFla, INFO_REMOVING))
for (name, (oldVer, oldFla), (newVer, newFla)) in update_jobs:
ret.append((name, oldVer, oldFla, INFO_UPDATING))
pkgs = [(self._convert_package((n, v, f), {}), info)
for (n, v, f, info) in ret]
self._show_package_list(pkgs)
def install_packages(self, only_trusted, package_ids):
self._install_packages(only_trusted, package_ids)
def simulate_install_packages(self, package_ids):
return self._install_packages(False, package_ids, simulate=True)
@ConaryExceptionHandler
def _install_packages(self, only_trusted, package_ids, simulate=False):
self.allow_cancel(False)
self.percentage(0)
self.status(STATUS_RUNNING)
pkglist = _get_trovespec_from_ids(package_ids)
cb = UpdateCallback(self, self.cfg)
updJob, suggMap = self.conary.install(pkglist, cb, simulate)
if simulate:
pkgs = self._get_package_name_from_ids(package_ids)
installs, erases, updates = conarypk.parse_jobs(updJob,
excludes=pkgs, show_components=False)
self._display_update_jobs(installs, erases, updates)
def remove_packages(self, allowDeps, autoremove, package_ids):
self. _remove_packages(allowDeps, autoremove, package_ids)
def simulate_remove_packages(self, package_ids):
return self._remove_packages(False, False, package_ids, simulate=True)
@ConaryExceptionHandler
def _remove_packages(self, allowDeps, autoremove, package_ids, simulate=False):
# TODO: use autoremove
self.allow_cancel(False)
self.percentage(0)
self.status(STATUS_RUNNING)
pkglist = _get_trovespec_from_ids(package_ids)
cb = RemoveCallback(self, self.cfg)
updJob, suggMap = self.conary.erase(pkglist, cb, simulate)
if simulate:
pkgs = self._get_package_name_from_ids(package_ids)
installs, erases, updates = conarypk.parse_jobs(updJob,
excludes=pkgs, show_components=False)
self._display_update_jobs(installs, erases, updates)
def _check_for_reboot(self, name):
if name in self.rebootpkgs:
self.require_restart(RESTART_SYSTEM, "")
def get_update_detail(self, package_ids):
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_INFO)
for package_id in package_ids:
name, version, arch, label = split_package_id(package_id)
pkgDict = self.xmlcache.resolve(name)
update = ""
obsolete = ""
cve_url = ""
if pkgDict:
vendor_url = pkgDict.get("url","")
desc = pkgDict.get("longDesc","")
reboot = self._get_restart(name)
state = _get_branch(label)
bz_url = _get_fits(label)
self.update_detail(package_id, update, obsolete, vendor_url, bz_url, cve_url,
reboot, desc, changelog="", state= state, issued="", updated = "")
def get_details(self, package_ids):
'''
Print a detailed description for a given package
'''
self.allow_cancel(True)
self.percentage(None)
self.status(STATUS_INFO)
for package_id in package_ids:
name, version, arch, data = split_package_id(package_id)
pkgDict = self.xmlcache.resolve(name)
if name and pkgDict:
longDesc = ""
url = ""
categories = None
licenses = ""
longDesc = pkgDict.get("longDesc", "")
url = pkgDict.get("url", "")
categories = self.xmlcache.getGroup(pkgDict.get("category",""))
licenses = _get_license(pkgDict.get("licenses",""))
size = pkgDict.get("size", 0)
self.details(package_id, licenses, categories, longDesc, url, size)
def _get_restart(self, name):
if name in self.rebootpkgs:
return RESTART_SYSTEM
elif name in self.restartpkgs:
return RESTART_APPLICATION
else:
return RESTART_NONE
def _get_update_priority(self, name):
if name in self.rebootpkgs:
return INFO_SECURITY
elif name in self.restartpkgs:
return INFO_SECURITY
else:
return INFO_NORMAL
def _display_updates(self, jobs):
'''Emit Package signals for a list of update jobs
jobs should only contain installs and updates. Shouldn't get any erase
jobs.
'''
ret = []
for (name, (oldVer, oldFla), (newVer, newFla)) in jobs:
info = self._get_update_priority(name)
ret.append((name, newVer, newFla, info))
pkgs = [(self._convert_package((n, v, f), {}), info)
for (n, v, f, info) in ret]
self._show_package_list(pkgs)
@ConaryExceptionHandler
def get_updates(self, filters):
self.allow_cancel(True)
self.percentage(0)
self.status(STATUS_INFO)
cb = GetUpdateCallback(self, self.cfg)
updJob, suggMap = self.conary.updateall(cb, dry_run=True)
installs, erases, updates = conarypk.parse_jobs(updJob,
show_components=False)
self._display_updates(installs + updates)
def get_repo_list(self, filters):
labels = self.conary.get_labels()
self.status(STATUS_QUERY)
for repo in labels:
self.repo_detail(repo, repo, True)
def main():
backend = PackageKitConaryBackend('')
backend.dispatcher(sys.argv[1:])
if __name__ == "__main__":
main()
| gpl-2.0 | 2,910,521,279,346,639,000 | 35.738046 | 93 | 0.608285 | false |
emrah-b/oclapi | django-nonrel/ocl/concepts/search_indexes.py | 4 | 2823 | from django.contrib.contenttypes.models import ContentType
from haystack import indexes
from concepts.models import ConceptVersion
from oclapi.search_backends import SortOrFilterField, FilterField
from oclapi.search_indexes import OCLSearchIndex
from sources.models import SourceVersion, Source
__author__ = 'misternando'
class ConceptVersionIndex(OCLSearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True)
name = SortOrFilterField(
model_attr='display_name', indexed=True, stored=True, default="")
lastUpdate = indexes.DateTimeField(
model_attr='updated_at', indexed=True, stored=True)
num_stars = indexes.IntegerField(
model_attr='versioned_object__num_stars', indexed=True, stored=True)
conceptClass = SortOrFilterField(
model_attr='concept_class', indexed=True, stored=True, faceted=True)
datatype = SortOrFilterField(
model_attr='datatype', null=True, indexed=True, stored=True, faceted=True)
locale = FilterField(
indexed=True, stored=True, faceted=True)
is_latest_version = indexes.BooleanField(
model_attr='is_latest_version', indexed=True, stored=True)
public_can_view = indexes.BooleanField(
model_attr='public_can_view', indexed=True, stored=True)
retired = indexes.BooleanField(
model_attr='retired', indexed=True, stored=True, faceted=True)
source = SortOrFilterField(model_attr='parent_resource', indexed=True, stored=True, faceted=True)
owner = SortOrFilterField(
model_attr='owner_name', indexed=True, stored=True, faceted=True)
ownerType = SortOrFilterField(
model_attr='owner_type', indexed=True, stored=True, faceted=True)
source_version = FilterField()
collection = FilterField()
collection_version = FilterField()
is_active = indexes.BooleanField(model_attr='is_active', indexed=True, stored=True)
def get_model(self):
return ConceptVersion
def prepare_locale(self, obj):
locales = set()
if obj.names:
for name in obj.names:
if name.locale is not None:
locales.add(name.locale)
return list(locales)
def prepare_source_version(self, obj):
source_version_ids = []
source = obj.source
source_versions = SourceVersion.objects.filter(
versioned_object_id=source.id,
versioned_object_type=ContentType.objects.get_for_model(Source)
)
for sv in source_versions:
if obj.id in sv.concepts:
source_version_ids.append(sv.id)
return source_version_ids
def prepare_collection_version(self, obj):
return obj.collection_version_ids
def prepare_collection(self, obj):
return obj.collection_ids
| mpl-2.0 | 3,141,436,831,198,775,300 | 39.913043 | 101 | 0.684378 | false |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/io/netcdf.py | 6 | 37683 | """
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
import warnings
import weakref
from operator import mul
from collections import OrderedDict
import mmap as mm
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy._lib.six import integer_types, text_type, binary_type
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
b'Created for a test'
>>> time = f.variables['time']
>>> print(time.units)
b'days since 2008-01-01'
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
... print(f.history)
b'Created for a test'
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
mmap = True
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = OrderedDict()
self.variables = OrderedDict()
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = OrderedDict()
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if hasattr(self, 'fp') and not self.fp.closed:
try:
self.flush()
finally:
self.variables = OrderedDict()
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
var.data.resize(shape)
except ValueError:
var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR)
]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, text_type) or isinstance(values, binary_type):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = OrderedDict()
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or OrderedDict()
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
self.data.resize(shape)
except ValueError:
self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
self.data[index] = data
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
| mit | 781,949,127,952,069,200 | 34.922784 | 107 | 0.572274 | false |
dubourg/openturns | python/doc/pyplots/StationaryCovarianceModelFactory.py | 1 | 1141 | import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View
N = 512
a = 20.0
# myMesh = ot.IntervalMesher([N]).build(ot.Interval(-a, a))
myMesh = ot.RegularGrid(0.0, 2 * a / N, N + 1)
covarianceModel = ot.ExponentialModel(1, [1.0], [1.0])
myProcess = ot.TemporalNormalProcess(covarianceModel, myMesh)
mySample = myProcess.getSample(1000)
myCovarianceFactory = ot.StationaryCovarianceModelFactory()
myEstimatedModel = myCovarianceFactory.build(mySample)
def f(x):
res = covarianceModel(x)[0, 0]
return [res]
func = ot.PythonFunction(1, 1, f)
func.setDescription(['$t$', '$cov$'])
def fEst(X):
res = myEstimatedModel(X)[0, 0]
return [res]
funcEst = ot.PythonFunction(1, 1, fEst)
funcEst.setDescription(['$t$', '$cov_{est}$'])
cov_graph = func.draw(-a / 4, a / 4, 1024)
cov_graph.add(funcEst.draw(-a / 4, a / 4, 1024))
cov_graph.setColors(['blue', 'red'])
fig = plt.figure(figsize=(10, 4))
plt.suptitle('Stationary covariance model estimation')
cov_axis = fig.add_subplot(111)
view = View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
view.show()
| gpl-3.0 | 4,922,804,402,480,347,000 | 24.355556 | 69 | 0.696757 | false |
SnappleCap/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_utils_url.py | 16 | 9226 | import unittest
from scrapy.spider import BaseSpider
from scrapy.utils.url import url_is_from_any_domain, url_is_from_spider, canonicalize_url
__doctests__ = ['scrapy.utils.url']
class UrlUtilsTest(unittest.TestCase):
def test_url_is_from_any_domain(self):
url = 'http://www.wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'javascript:%20document.orderform_2581_1190810811.mode.value=%27add%27;%20javascript:%20document.orderform_2581_1190810811.submit%28%29'
self.assertFalse(url_is_from_any_domain(url, ['testdomain.com']))
self.assertFalse(url_is_from_any_domain(url+'.testdomain.com', ['testdomain.com']))
def test_url_is_from_spider(self):
spider = BaseSpider(name='example.com')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', spider))
def test_url_is_from_spider_class_attributes(self):
class MySpider(BaseSpider):
name = 'example.com'
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
def test_url_is_from_spider_with_allowed_domains(self):
spider = BaseSpider(name='example.com', allowed_domains=['example.org', 'example.net'])
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', spider))
def test_url_is_from_spider_with_allowed_domains_class_attributes(self):
class MySpider(BaseSpider):
name = 'example.com'
allowed_domains = ['example.org', 'example.net']
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', MySpider))
def test_canonicalize_url(self):
# simplest case
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com")
# always return a str
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
# typical usage
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
# sorting by argument values
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
# using keep_blank_values
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
# spaces
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
# normalize percent-encoding case (in paths)
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do"),
# normalize percent-encoding case (in query arguments)
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
# non-ASCII percent-encoding in paths
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
# non-ASCII percent-encoding in query arguments
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=\xa3500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
# urls containing auth and ports
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com:81/do?now=1"),
u"http://user:pass@www.example.com:81/do?now=1")
# remove fragments
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag"),
u"http://user:pass@www.example.com/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True),
u"http://user:pass@www.example.com/do?a=1#frag")
# dont convert safe characters to percent encoding representation
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
# domains are case insensitive
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com"),
"http://www.example.com")
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 615,387,961,333,769,100 | 60.506667 | 150 | 0.593432 | false |
justzx2011/openyoudao | webshot.py | 4 | 2356 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import sys
import gl
import os
import gtk
import time
import webkit
class OutputView(webkit.WebView):
'''a class that represents the output widget of a conversation
'''
def __init__(self):
webkit.WebView.__init__(self)
self.load_finish_flag = False
self.set_property('can-focus', True)
self.set_property('can-default', True)
self.set_full_content_zoom(1)
# self.clipbord = gtk.Clipboard()
settings = self.get_settings()
#try:
# settings.set_property('enable-universal-access-from-file-uris', True)
# settings.set_property('javascript-can-access-clipboard', False)
settings.set_property('enable-default-context-menu', False)
# settings.set_property('enable-page-cache', True)
# settings.set_property('tab-key-cycles-through-elements', True)
# settings.set_property('enable-file-access-from-file-uris', True)
# settings.set_property('enable-spell-checking', False)
# settings.set_property('enable-caret-browsing', False)
# try:
# # Since 1.7.5
# settings.set_property('enable-accelerated-compositing', True)
# except TypeError:
# pass
#except:
# print 'Error: settings property was not set.'
class Window(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self.set_resizable(True)
self.set_title("有道首页")
self.set_default_size(800, 280)
self.set_icon_from_file("/usr/share/openyoudao/images/icon/icon.jpg")
self.scroll = gtk.ScrolledWindow()
self.scroll.props.hscrollbar_policy = gtk.POLICY_NEVER
self.scroll.props.vscrollbar_policy = gtk.POLICY_NEVER
self.output = OutputView()
self.scroll.add(self.output)
self.add(self.scroll)
self.scroll.show_all()
self.connect('delete-event', gtk.main_quit)
#self.is_fullscreen = False
def load(self, url):
print url
self.output.load_uri(url)
def reload(self):
self.output.reload()
def settitle(self,title):
self.set_title(title)
#window = Window()
#window.load(sys.argv[1])
#window.load("http://dict.youdao.com/")
#window.show()
#gtk.main()
| mit | -2,980,305,795,906,689,000 | 34.044776 | 82 | 0.611158 | false |
Conchylicultor/MusicGenerator | deepmusic/modules/encoder.py | 1 | 4503 | # Copyright 2016 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
"""
import tensorflow as tf
import deepmusic.tfutils as tfutils
import deepmusic.songstruct as music
class EncoderNetwork:
""" From the previous keyboard configuration, prepare the state for the next one
Encode the keyboard configuration at a state t
This abstract class has no effect be is here to be subclasses
Warning: To encapsulate the weights in the right tf scope, they should be defined
within the build function
"""
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
self.args = args
def build(self):
""" Initialize the weights of the model
"""
def init_state(self):
""" Return the initial cell state
"""
return None
def get_cell(self, prev_keyboard, prev_state):
""" Predict the next keyboard state
Args:
prev_keyboard (tf.Tensor): the previous keyboard configuration
prev_state (Tuple): the previous decoder state
Return:
tf.Tensor: the final encoder state
"""
raise NotImplementedError('Abstract Class')
class Identity(EncoderNetwork):
""" Implement lookup for note embedding
"""
@staticmethod
def get_module_id():
return 'identity'
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
super().__init__(args)
def get_cell(self, prev_keyboard, prev_state):
""" Predict the next keyboard state
Args:
prev_keyboard (tf.Tensor): the previous keyboard configuration
prev_state (Tuple): the previous decoder state
Return:
tf.Tensor: the final encoder state
"""
prev_state_enco, prev_state_deco = prev_state
# This simple class just pass the previous state
next_state_enco = prev_state_enco
return next_state_enco
class Rnn(EncoderNetwork):
""" Read each keyboard configuration note by note and encode it's configuration
"""
@staticmethod
def get_module_id():
return 'rnn'
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
super().__init__(args)
self.rnn_cell = None
def build(self):
""" Initialize the weights of the model
"""
self.rnn_cell = tfutils.get_rnn_cell(self.args, "deco_cell")
def init_state(self):
""" Return the initial cell state
"""
return self.rnn_cell.zero_state(batch_size=self.args.batch_size, dtype=tf.float32)
def get_cell(self, prev_keyboard, prev_state):
""" a RNN encoder
See parent class for arguments details
"""
prev_state_enco, prev_state_deco = prev_state
axis = 1 # The first dimension is the batch, we split the keys
assert prev_keyboard.get_shape()[axis].value == music.NB_NOTES
inputs = tf.split(axis, music.NB_NOTES, prev_keyboard)
_, final_state = tf.nn.rnn(
self.rnn_cell,
inputs,
initial_state=prev_state_deco
)
return final_state
class Embedding(EncoderNetwork):
""" Implement lookup for note embedding
"""
@staticmethod
def get_module_id():
return 'embedding'
def __init__(self, args):
"""
Args:
args: parameters of the model
"""
super().__init__(args)
def build(self):
""" Initialize the weights of the model
"""
def init_state(self):
""" Return the initial cell state
"""
def get_cell(self, prev_keyboard, prev_state):
""" a RNN encoder
See parent class for arguments details
"""
# TODO:
return
| apache-2.0 | 3,511,202,845,000,962,000 | 26.796296 | 90 | 0.594048 | false |
bablokb/nerd-alarmclock | files/usr/local/lib/python2.7/site-packages/nclock/WebThread.py | 1 | 1143 | # --------------------------------------------------------------------------
# Class definition of WebThread - this thread controls the REST API
#
# Author: Benjamin Fuchs
# License: GPL3
#
# Website: https://github.com/bablokb/nerd-alarmclock
#
# --------------------------------------------------------------------------
from BottleThread import BottleThread
from threading import Thread
class WebThread(Thread):
""" Web thread """
# initialize object ----------------------------------------------------
def __init__(self, settings):
""" Constructor """
super(WebThread,self).__init__(name="WebThread")
self._settings = settings
self._stop_event = settings.stop_event
# run the thread -------------------------------------------------------
def run(self):
""" run-method of thread """
self._settings.log.msg("WebThread: running WebThread")
bottleThread = BottleThread(self._settings)
bottleThread.start()
while not self._stop_event.wait(5):
self._settings.log.msg("WebThread: running WebThread")
self._settings.log.msg("WebThread: shutdown")
bottleThread.shutdown()
| gpl-3.0 | 1,997,278,657,393,778,700 | 29.078947 | 76 | 0.528434 | false |
vulcansteel/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Url/auto_rest_url_test_service/operations/paths.py | 1 | 35282 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Paths(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_boolean_true(
self, bool_path, custom_headers={}, raw=False, **operation_config):
"""
Get true Boolean value on path
:param bool_path: true boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/bool/true/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_false(
self, bool_path, custom_headers={}, raw=False, **operation_config):
"""
Get false Boolean value on path
:param bool_path: false boolean value
:type bool_path: bool
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/bool/false/{boolPath}'
path_format_arguments = {
'boolPath': self._serialize.url("bool_path", bool_path, 'bool')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_one_million(
self, int_path, custom_headers={}, raw=False, **operation_config):
"""
Get '1000000' integer value
:param int_path: '1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/int/1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_negative_one_million(
self, int_path, custom_headers={}, raw=False, **operation_config):
"""
Get '-1000000' integer value
:param int_path: '-1000000' integer value
:type int_path: int
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/int/-1000000/{intPath}'
path_format_arguments = {
'intPath': self._serialize.url("int_path", int_path, 'int')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_ten_billion(
self, long_path, custom_headers={}, raw=False, **operation_config):
"""
Get '10000000000' 64 bit integer value
:param long_path: '10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/long/10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_negative_ten_billion(
self, long_path, custom_headers={}, raw=False, **operation_config):
"""
Get '-10000000000' 64 bit integer value
:param long_path: '-10000000000' 64 bit integer value
:type long_path: long
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/long/-10000000000/{longPath}'
path_format_arguments = {
'longPath': self._serialize.url("long_path", long_path, 'long')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_positive(
self, float_path, custom_headers={}, raw=False, **operation_config):
"""
Get '1.034E+20' numeric value
:param float_path: '1.034E+20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/float/1.034E+20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_negative(
self, float_path, custom_headers={}, raw=False, **operation_config):
"""
Get '-1.034E-20' numeric value
:param float_path: '-1.034E-20'numeric value
:type float_path: float
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/float/-1.034E-20/{floatPath}'
path_format_arguments = {
'floatPath': self._serialize.url("float_path", float_path, 'float')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_positive(
self, double_path, custom_headers={}, raw=False, **operation_config):
"""
Get '9999999.999' numeric value
:param double_path: '9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/double/9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_negative(
self, double_path, custom_headers={}, raw=False, **operation_config):
"""
Get '-9999999.999' numeric value
:param double_path: '-9999999.999'numeric value
:type double_path: float
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/double/-9999999.999/{doublePath}'
path_format_arguments = {
'doublePath': self._serialize.url("double_path", double_path, 'float')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_unicode(
self, string_path, custom_headers={}, raw=False, **operation_config):
"""
Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value
:param string_path: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value. Possible
values for this parameter include: '啊齄丂狛狜隣郎隣兀﨩'
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/string/unicode/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_url_encoded(
self, string_path, custom_headers={}, raw=False, **operation_config):
"""
Get 'begin!*'();:@ &=+$,/?#[]end
:param string_path: 'begin!*'();:@ &=+$,/?#[]end' url encoded string
value. Possible values for this parameter include: 'begin!*'();:@
&=+$,/?#[]end'
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_empty(
self, string_path, custom_headers={}, raw=False, **operation_config):
"""
Get ''
:param string_path: '' string value. Possible values for this
parameter include: ''
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/string/empty/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_null(
self, string_path, custom_headers={}, raw=False, **operation_config):
"""
Get null (should throw)
:param string_path: null string value
:type string_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/string/null/{stringPath}'
path_format_arguments = {
'stringPath': self._serialize.url("string_path", string_path, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_valid(
self, enum_path, custom_headers={}, raw=False, **operation_config):
"""
Get using uri with 'green color' in path parameter
:param enum_path: send the value green. Possible values for this
parameter include: 'red color', 'green color', 'blue color'
:type enum_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/enum/green%20color/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_null(
self, enum_path, custom_headers={}, raw=False, **operation_config):
"""
Get null (should throw on the client before the request is sent on
wire)
:param enum_path: send null should throw. Possible values for this
parameter include: 'red color', 'green color', 'blue color'
:type enum_path: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/string/null/{enumPath}'
path_format_arguments = {
'enumPath': self._serialize.url("enum_path", enum_path, 'UriColor')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_multi_byte(
self, byte_path, custom_headers={}, raw=False, **operation_config):
"""
Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array
:param byte_path: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte
array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/byte/multibyte/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_empty(
self, byte_path, custom_headers={}, raw=False, **operation_config):
"""
Get '' as byte array
:param byte_path: '' as byte array
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/byte/empty/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_null(
self, byte_path, custom_headers={}, raw=False, **operation_config):
"""
Get null as byte array (should throw)
:param byte_path: null as byte array (should throw)
:type byte_path: bytearray
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/byte/null/{bytePath}'
path_format_arguments = {
'bytePath': self._serialize.url("byte_path", byte_path, 'bytearray')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_valid(
self, date_path, custom_headers={}, raw=False, **operation_config):
"""
Get '2012-01-01' as date
:param date_path: '2012-01-01' as date
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/date/2012-01-01/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_null(
self, date_path, custom_headers={}, raw=False, **operation_config):
"""
Get null as date - this should throw or be unusable on the client
side, depending on date representation
:param date_path: null as date (should throw)
:type date_path: date
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/date/null/{datePath}'
path_format_arguments = {
'datePath': self._serialize.url("date_path", date_path, 'date')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_valid(
self, date_time_path, custom_headers={}, raw=False, **operation_config):
"""
Get '2012-01-01T01:01:01Z' as date-time
:param date_time_path: '2012-01-01T01:01:01Z' as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/datetime/2012-01-01T01%3A01%3A01Z/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_null(
self, date_time_path, custom_headers={}, raw=False, **operation_config):
"""
Get null as date-time, should be disallowed or throw depending on
representation of date-time
:param date_time_path: null as date-time
:type date_time_path: datetime
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/paths/datetime/null/{dateTimePath}'
path_format_arguments = {
'dateTimePath': self._serialize.url("date_time_path", date_time_path, 'iso-8601')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [400]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit | 1,577,192,287,403,550,500 | 36.748927 | 108 | 0.617333 | false |
nburn42/tensorflow | tensorflow/contrib/meta_graph_transform/meta_graph_transform.py | 14 | 31627 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Apply graph_transforms tool to MetaGraphDefs.
@@meta_graph_transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re as _re
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as _graph_util
from tensorflow.python.framework import importer as _importer
from tensorflow.python.framework import ops as _ops
from tensorflow.python.saved_model import constants as _saved_model_constants
from tensorflow.python.training import saver as _saver_lib
from tensorflow.python.util import compat as _compat
from tensorflow.tools import graph_transforms as _graph_transforms
_FREEZE_GRAPH_TRANSFORM = 'freeze_graph'
_SPARSIFY_GATHER_TRANSFORM = 'sparsify_gather'
def _op_name(tensor_name):
"""Get the op name from a tensor name."""
# control dependency inputs start with ^
if tensor_name[0] == '^':
tensor_name = tensor_name[1:]
if ':' in tensor_name:
op_name, _ = tensor_name.split(':')
return op_name
return tensor_name
def _get_shared_init_op(initializer_names):
"""Obtain the shared init op name, if it exists.
Args:
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
Returns:
A string indicating the shared init op name or none if None if none exists.
"""
return_value = initializer_names.get(_saved_model_constants.MAIN_OP_KEY, None)
if not return_value:
return_value = initializer_names.get(
_saved_model_constants.LEGACY_INIT_OP_KEY, None)
return str(return_value[0]) if return_value else None
def _gtt_transforms(graph_def, input_names, output_names, initializer_names,
transforms):
"""Pass through gtt transforms, applying them to the graph_def.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.) that should be retained even if they are not
transitively reachable from output nodes. The keys in this dictionary
indicate the collection where these nodes were obtained from.
transforms: A list of strings naming the graph transforms to be applied in
order.
Returns:
The transformed GraphDef.
"""
if not transforms:
transformed_graph_def = _graph_pb2.GraphDef()
transformed_graph_def.CopyFrom(graph_def)
return transformed_graph_def
initializer_names_flat = sorted(
[k for l in initializer_names.values() for k in l])
all_output_names = output_names + initializer_names_flat
return _graph_transforms.TransformGraph(graph_def, input_names,
all_output_names, transforms)
def _freeze_transform(graph_def, output_names, initializer_names, saver_def,
checkpoint_path):
"""Handle the freeze transform.
Determine which initializer nodes should be retained by the freeze transform.
Retain those nodes and return an updated dictionary containing them.
Args:
graph_def: A GraphDef proto to be transformed.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
saver_def: A SaverDef proto used for restoring a checkpoint during freezing,
if needed (default None).
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A tuple containing the GraphDef and a Dict of pruned initializer nodes.
"""
table_initializers = initializer_names.get(_ops.GraphKeys.TABLE_INITIALIZERS,
[])
shared_init_op = _get_shared_init_op(initializer_names)
graph_def = _freeze_graph_with_def_protos(graph_def, output_names,
table_initializers, shared_init_op,
saver_def, checkpoint_path)
pruned_initializer_names = {}
# Freeze graph prunes all initializers and shared init nodes that are not
# explicitly maintained. Create new initializer_names dictionary to reflect
# this.
if table_initializers:
pruned_initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = (
table_initializers)
if _saved_model_constants.LEGACY_INIT_OP_KEY in initializer_names:
pruned_initializer_names[_saved_model_constants.LEGACY_INIT_OP_KEY] = (
initializer_names[_saved_model_constants.LEGACY_INIT_OP_KEY])
if _saved_model_constants.MAIN_OP_KEY in initializer_names:
pruned_initializer_names[_saved_model_constants.MAIN_OP_KEY] = (
initializer_names[_saved_model_constants.MAIN_OP_KEY])
return (graph_def, pruned_initializer_names)
def _clean_save_and_restore(graph_def, op, removed_op_names):
"""Clean the specified save and restore op.
Updates the dtypes attribute of the save / restore op and the associated name
and shape tensors to remove entries for variables that have been removed.
Args:
graph_def: A GraphDef proto to be transformed.
op: The save or restore op to update.
removed_op_names: List of op names that have been removed.
"""
name = op.name + '/tensor_names'
shape = op.name + '/shape_and_slices'
name_op = _find_op(graph_def, name)
shape_op = _find_op(graph_def, shape)
name_op_value_tensor = name_op.attr['value'].tensor
shape_op_value_tensor = shape_op.attr['value'].tensor
names = []
shapes = []
dtypes = []
for index, value in enumerate(name_op_value_tensor.string_val):
if not _is_removed(_compat.as_str(value), removed_op_names):
names.append(value)
shapes.append(shape_op_value_tensor.string_val[index])
dtypes.append(op.attr['dtypes'].list.type[index])
name_op_value_tensor.string_val[:] = names
name_op_value_tensor.tensor_shape.dim[0].size = len(names)
shape_op_value_tensor.string_val[:] = shapes
shape_op_value_tensor.tensor_shape.dim[0].size = len(shapes)
op.attr['dtypes'].list.type[:] = dtypes
if not name_op.attr['_output_shapes'].list.shape:
name_op.attr['_output_shapes'].list.shape.add()
name_op.attr['_output_shapes'].list.shape[0].dim.add()
name_op.attr['_output_shapes'].list.shape[0].dim[0].size = len(names)
if not shape_op.attr['_output_shapes'].list.shape:
shape_op.attr['_output_shapes'].list.shape.add()
shape_op.attr['_output_shapes'].list.shape[0].dim.add()
shape_op.attr['_output_shapes'].list.shape[0].dim[0].size = len(shapes)
def _sparsify_gather_transform(graph_def, input_names, output_names,
initializer_names, checkpoint_path):
"""Handle the sparsify gather transform.
Provides the transform the checkpoint and keeps track of the newly created
initializer nodes.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
checkpoint_path: A path to a checkpoint.
Returns:
A tuple containing the GraphDef and a Dict of updated initializer nodes.
Raises:
ValueError: if the restore_op_name does not have the expected format.
"""
# Ensure that sparsify_shared_init_op is unique.
sparsify_shared_init_op = 'sparify_gather_init_op'
while _find_op(graph_def, sparsify_shared_init_op):
sparsify_shared_init_op += '_1'
input_flag = ''
if checkpoint_path:
input_flag = 'input_checkpoint="%s", ' % checkpoint_path
sparsify_cmd = [
'sparsify_gather(%sgroup_init_node="%s")' % (input_flag,
sparsify_shared_init_op)
]
starting_op_names = [node.name for node in graph_def.node]
graph_def = _gtt_transforms(graph_def, input_names, output_names,
initializer_names, sparsify_cmd)
ending_op_names = [node.name for node in graph_def.node]
removed_op_names = list(set(starting_op_names) - set(ending_op_names))
removed_op_names.sort()
for op_index, op_name in enumerate(removed_op_names):
op_name_parts = op_name.rsplit('/', 1)
# Remove part to get the checkpoint names used by the saver.
if len(op_name_parts) == 2 and op_name_parts[1].startswith('part_'):
removed_op_names[op_index] = op_name_parts[0]
else:
removed_op_names[op_index] = op_name
# Obtain newly created table inits from gtt sparsify transform.
added_table_inits = []
for index, node in enumerate(graph_def.node):
if node.name == sparsify_shared_init_op:
added_table_inits = [n.lstrip('^') for n in node.input]
table_initializers = initializer_names.get(
_ops.GraphKeys.TABLE_INITIALIZERS, [])
table_initializers.extend(added_table_inits)
initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = table_initializers
del graph_def.node[index]
break
# Add inits to existing shared init op.
node = _find_op(graph_def, _get_shared_init_op(initializer_names))
for init in added_table_inits:
node.input.append('^' + init)
# Update saver.
for node in graph_def.node:
if node.name.endswith('SaveV2'):
_clean_save_and_restore(graph_def, node, removed_op_names)
return (graph_def, initializer_names)
def _do_transforms(graph_def,
input_names,
output_names,
initializer_names,
transforms,
saver_def=None,
checkpoint_path=None):
"""Apply requested transforms to a GraphDef, including freezing.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.) that should be retained even if they are not
transitively reachable from output nodes. The keys in this dictionary
indicate the collection where these nodes were obtained from.
transforms: A list of strings naming the graph transforms to be applied in
order. These transform names are exactly those supported by the Graph
Transform Tool, with the addition of the 'freeze_graph' and
'sparsify_gather' transforms.
saver_def: A SaverDef proto used for restoring a checkpoint during freezing,
if needed (default None).
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A tuple containing the GraphDef and a Dict of updated initializer nodes.
"""
transformed_graph_def = _graph_pb2.GraphDef()
transformed_graph_def.CopyFrom(graph_def)
transformed_initializer_names = initializer_names.copy()
if not transforms:
return transformed_graph_def, transformed_initializer_names
current_gtt_transforms = []
for t in transforms:
if t == _FREEZE_GRAPH_TRANSFORM:
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
output_node_names = [_op_name(x) for x in output_names]
transformed_graph_def, transformed_initializer_names = _freeze_transform(
transformed_graph_def, output_node_names,
transformed_initializer_names, saver_def, checkpoint_path)
current_gtt_transforms = []
elif t == _SPARSIFY_GATHER_TRANSFORM:
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
transformed_graph_def, transformed_initializer_names = (
_sparsify_gather_transform(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, checkpoint_path))
current_gtt_transforms = []
else:
current_gtt_transforms.append(t)
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
return transformed_graph_def, transformed_initializer_names
def _connect_to_shared_init_op(graph_def, shared_init_op_name,
nodes_to_connect):
"""Creates a new shared init node that is connected to via control deps.
Args:
graph_def: The GraphDef proto to add the shared init node to.
shared_init_op_name: A string specifying the name of the shared init node to
create.
nodes_to_connect: A list of strings specifying the names of nodes to connect
to the shared node via control dependencies.
"""
if nodes_to_connect:
init_op = graph_def.node.add()
init_op.name = shared_init_op_name
init_op.op = 'NoOp'
init_op.input.extend(['^' + i for i in nodes_to_connect])
# forked and modified from freeze_graph.py
def _freeze_graph_with_def_protos(input_graph_def, output_node_names,
initializer_names, shared_init_op_name,
input_saver_def, input_checkpoint):
"""Converts all variables in a graph and checkpoint into constants.
During this process, we need to retain certain initializer nodes (e.g. table
initializer nodes). Instead of determining which dependencies
of the shared initializer node (e.g. group_deps) to keep, we
reconstruct the connections between the individual initializer nodes and
the shared node after freezing the graph.
Args:
input_graph_def: A GraphDef proto to be frozen.
output_node_names: Names of output nodes.
initializer_names: Names of initializer nodes to keep.
shared_init_op_name: The name of the shared initializer node to connect the
nodes in initializer names to.
input_saver_def: A SaverDef proto used for restoring a checkpoint.
input_checkpoint: A path to a checkpoint to restore.
Returns:
A frozen GraphDef.
"""
with _ops.Graph().as_default():
_ = _importer.import_graph_def(input_graph_def, name='')
with _session.Session() as sess:
saver = _saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
output_graph_def = _graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names + initializer_names)
_connect_to_shared_init_op(output_graph_def, shared_init_op_name,
initializer_names)
return output_graph_def
def _find_all_mandatory_retain_ops(base_meta_graph_def):
"""Identify all infrastructural Ops, to ensure that they are retained.
We need to retain infrastructural Ops (init and saver stuff), in addition
to the desired outputs.
For now we retain *all* save and restore ops, variable initializers,
table initializers, and main init ops.
This means that strip_unused_nodes will not remove unused variables.
Args:
base_meta_graph_def: a GraphDef proto in which to identify nodes to retain.
Returns:
A dictionary corresponding to the nodes associated with each collection
that are to be retained.
"""
# TODO(b/63447631): implement variable stripping.
initializer_names = {}
# Primary SaverDef and SAVERS collection
saver_defs = []
if base_meta_graph_def.HasField('saver_def'):
saver_defs.append(base_meta_graph_def.saver_def)
saver_defs.extend(_get_all_protos_from_collection(
base_meta_graph_def, _ops.GraphKeys.SAVERS))
for saver_def in saver_defs:
savers = initializer_names.get(_ops.GraphKeys.SAVERS, [])
savers.extend([
saver_def.filename_tensor_name, saver_def.save_tensor_name,
saver_def.restore_op_name
])
initializer_names[_ops.GraphKeys.SAVERS] = savers
# Variable initializers
variable_collections = [
_ops.GraphKeys.GLOBAL_VARIABLES,
_ops.GraphKeys.TRAINABLE_VARIABLES,
_ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
_ops.GraphKeys.LOCAL_VARIABLES,
_ops.GraphKeys.MODEL_VARIABLES]
for var_coll in variable_collections:
variables = _get_all_protos_from_collection(base_meta_graph_def, var_coll)
var_init_names = [v.initializer_name for v in variables]
if var_init_names:
# Sanity check to ensure we don't overwrite dictionary entries.
assert var_coll not in initializer_names
initializer_names[var_coll] = var_init_names
# Table initializers
op_names = _get_all_node_names_from_collection(
base_meta_graph_def, _ops.GraphKeys.TABLE_INITIALIZERS)
if op_names:
# Sanity check to ensure we don't overwrite dictionary entries.
assert _ops.GraphKeys.TABLE_INITIALIZERS not in initializer_names
table_initializers = [t for t in op_names]
initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = table_initializers
# Various init ops
various_init_op_collections = [_saved_model_constants.LEGACY_INIT_OP_KEY,
_saved_model_constants.MAIN_OP_KEY,
_ops.GraphKeys.INIT_OP,
_ops.GraphKeys.LOCAL_INIT_OP,
_ops.GraphKeys.READY_OP,
_ops.GraphKeys.READY_FOR_LOCAL_INIT_OP]
for op_coll in various_init_op_collections:
op_name = _get_single_node_name_from_collection(
base_meta_graph_def, op_coll)
if op_name:
# Sanity check to ensure we don't overwrite dictionary entries.
assert op_coll not in initializer_names
initializer_names[op_coll] = [op_name]
return initializer_names
def _add_pruned_collection(base_meta_graph_def, meta_graph_def,
collection_name, removed_op_names):
"""Copy collection to the transformed MetaGraphDef, omitting removed items."""
base_collection = base_meta_graph_def.collection_def[collection_name]
collection = meta_graph_def.collection_def[collection_name]
if base_collection.HasField('any_list'):
for any_value in base_collection.any_list.value:
# just search the serialized proto as a string
if not _is_removed_mentioned(any_value.value, removed_op_names):
copied_any = collection.any_list.value.add()
copied_any.CopyFrom(any_value)
elif base_collection.HasField('bytes_list'):
collection.bytes_list.value[:] = [
s for s in base_collection.bytes_list.value
if not _is_removed_mentioned(s, removed_op_names)]
elif base_collection.HasField('node_list'):
collection.node_list.value[:] = [
s for s in base_collection.node_list.value
if not _is_removed(s, removed_op_names)]
else:
collection.CopyFrom(base_collection)
def _add_pruned_saver(base_meta_graph_def, meta_graph_def, removed_op_names):
"""Copy the Saver into the transformed MetaGraphDef, if valid.
Currently this copies the Saver as is, after verifying that none of the
referenced Save & Restore ops were removed. A future version will modify
the Save and Restore ops themselves as needed to account for removed
Variables.
Args:
base_meta_graph_def: The untransformed MetaGraphDef.
meta_graph_def: The transformed MetaGraphDef being built.
removed_op_names: An iterable of names of ops that were removed.
"""
# Note this does surgery on meta_graph_def.graph_def too, so that should have
# been copied already.
if base_meta_graph_def.HasField('saver_def'):
filename_tensor_name = base_meta_graph_def.saver_def.filename_tensor_name
save_tensor_name = base_meta_graph_def.saver_def.save_tensor_name
restore_op_name = base_meta_graph_def.saver_def.restore_op_name
_check_tensor_not_removed(filename_tensor_name, removed_op_names)
_check_tensor_not_removed(save_tensor_name, removed_op_names)
_check_tensor_not_removed(restore_op_name, removed_op_names)
# TODO(b/63447631): Once we strip unused variables, remove references to
# them from save and restore ops. Retain those ops only if they also refer
# to retained Variables. See if we can use _clean_save_and_restore() for
# this.
# saver_name, restore_all = restore_op_name.rsplit('/', 1)
# if restore_all != 'restore_all':
# raise ValueError(
# 'SaverDef restore_op_name did not have expected form */restore_all')
# save_tensor_names_op_name = '{}/SaveV2/tensor_names'.format(saver_name)
# restore_tensor_names_op_name = (
# '{}/RestoreV2/tensor_names'.format(saver_name))
# save_tensor_names_op = _find_op(meta_graph_def.graph_def,
# save_tensor_names_op_name)
# save_tensor_names_value_tensor = save_tensor_names_op.attr['value'].tensor
# save_tensor_names_value_tensor.string_val[:] = [
# s for s in save_tensor_names_value_tensor.string_val
# if not _is_removed(s, removed_op_names)]
# restore_tensor_names_op = _find_op(
# meta_graph_def.graph_def, restore_tensor_names_op_name)
# restore_tensor_names_value_tensor = (
# restore_tensor_names_op.attr['value'].tensor)
# restore_tensor_names_value_tensor.string_val[:] = [
# s for s in restore_tensor_names_value_tensor.string_val
# if not _is_removed(s, removed_op_names)]
# if (save_tensor_names_value_tensor.string_val
# or restore_tensor_names_value_tensor.string_val):
meta_graph_def.saver_def.CopyFrom(base_meta_graph_def.saver_def)
def _find_op(graph_def, op_name):
"""Fetch a node from a GraphDef proto by name."""
for node_def in graph_def.node:
if node_def.name == op_name:
return node_def
return None
def _add_pruned_signature(base_meta_graph_def, meta_graph_def,
signature_name, removed_op_names):
"""Copy the named signature into the transformed MetaGraphDef, if valid.
If any input or output mentioned in the signature was removed by the graph
transform, the signature is silently omitted from the transformed
MetaGraphDef.
Args:
base_meta_graph_def: The untransformed MetaGraphDef.
meta_graph_def: The transformed MetaGraphDef being built.
signature_name: The name of the signature to copy.
removed_op_names: An iterable of names of ops that were removed.
"""
try:
base_signature = base_meta_graph_def.signature_def[signature_name]
for key in base_signature.inputs:
_check_tensor_not_removed(base_signature.inputs[key].name,
removed_op_names)
for key in base_signature.outputs:
_check_tensor_not_removed(base_signature.outputs[key].name,
removed_op_names)
meta_graph_def.signature_def[signature_name].CopyFrom(base_signature)
except ValueError:
# exclude any signature that mentions a removed node
pass
def _get_single_node_name_from_collection(meta_graph_def, collection_key):
"""Obtain a node name that is the single element of a collection."""
if collection_key not in meta_graph_def.collection_def:
return None
collection = meta_graph_def.collection_def[collection_key]
if not collection.node_list.value:
raise ValueError(
'Collection {} is present but type is not node_list.'.format(
collection_key))
if len(collection.node_list.value) != 1:
raise ValueError(
'Collection {} is has {} elements; expected exactly one.'.format(
collection_key, collection.bytes_list))
return collection.node_list.value[0]
def _get_all_node_names_from_collection(meta_graph_def, collection_key):
"""Obtain node names from a collection."""
if collection_key not in meta_graph_def.collection_def:
return None
collection = meta_graph_def.collection_def[collection_key]
if not collection.node_list.value:
raise ValueError(
'Collection {} is present but type is not node_list.'.format(
collection_key))
return collection.node_list.value
def _get_all_protos_from_collection(meta_graph_def, collection_key):
"""Obtain node names from a collection."""
if collection_key not in meta_graph_def.collection_def:
return []
collection = meta_graph_def.collection_def[collection_key]
if not collection.bytes_list.value:
raise ValueError(
'Collection {} is present but type is not bytes_list.'.format(
collection_key))
proto_type = _ops.get_collection_proto_type(collection_key)
result = []
for value in collection.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
result.append(proto)
return result
def _is_removed(tensor_name, removed_op_names):
"""Determine whether the named tensor is an output of a removed op."""
for removed_op_name in removed_op_names:
if tensor_name.split(':')[0] == removed_op_name:
return True
return False
def _is_removed_mentioned(s, removed_op_names):
"""Determine whether any removed op is mentioned in the given object.
This relies on the string representation of the object. This is used for
proto messages that may mention ops by name in nested fields. The string
representation of the proto includes those field values, so this string
search approach is sufficient.
Args:
s: an object to search for removed op names.
removed_op_names: An iterable of names of ops that were removed.
Returns:
True if any removed op is mentioned in the given object, False otherwise.
"""
# A common approach taken by some of the transforms in gtt is to add new nodes
# that have the same prefix as the node they are removing. For example, if
# the original node name was /foo, they may remove that node and add in
# /foo/bar. This regex ensures that we handle these two nodes
# as separate entities. It matches on nodes having names in the form of
# '/foo/bar_x' as well as nodes having names in the form of 'foo.'
s_names = _re.findall(r'((?:[\/]?[a-zA-Z0-9\_]*)*)', _compat.as_str_any(s))
for removed_op_name in removed_op_names:
for s_name in s_names:
if s_name.endswith(removed_op_name):
return True
return False
def _check_tensor_not_removed(tensor_name, removed_op_names):
"""Verify that the named tensor was not removed.
Args:
tensor_name: the name of a tensor to check.
removed_op_names: An iterable of names of ops that were removed.
Raises:
ValueError: if the tensor was removed.
"""
if not tensor_name:
raise ValueError('Tensor name should not be empty')
if _is_removed(tensor_name, removed_op_names):
raise ValueError(
'Expected Tensor, but it was removed: {}'.format(tensor_name))
def _add_new_inits_to_collection(meta_graph_def, updated_initializer_names):
"""Add new inits to collection.
Args:
meta_graph_def: The MetaGraphDef protocol buffer to update.
updated_initializer_names: Dictionary of the updated "infrastructural" nodes
(initializers, save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
Raises:
ValueError: if the tensor was removed.
"""
# TODO(dzats): Extend this to support all collections.
if _ops.GraphKeys.TABLE_INITIALIZERS in updated_initializer_names:
orig_table_inits = _get_all_node_names_from_collection(
meta_graph_def, _ops.GraphKeys.TABLE_INITIALIZERS)
orig_table_inits = orig_table_inits if orig_table_inits else []
updated_table_inits = updated_initializer_names[
_ops.GraphKeys.TABLE_INITIALIZERS]
new_table_inits = list(set(updated_table_inits) - set(orig_table_inits))
new_table_inits.sort()
meta_graph_def.collection_def[
_ops.GraphKeys.TABLE_INITIALIZERS].node_list.value.extend(
new_table_inits)
def meta_graph_transform(
base_meta_graph_def, input_names, output_names, transforms, tags,
checkpoint_path=None):
"""Apply the Graph Transform tool to a MetaGraphDef.
Args:
base_meta_graph_def: A MetaGraphDef protocol buffer to transform.
input_names: Names of input nodes.
output_names: Names of output nodes.
transforms: A list of strings naming the graph transforms to be applied in
order. These transform names are exactly those supported by the Graph
Transform Tool, with the addition of the 'freeze_graph' and
'sparsify_gather' transforms.
tags: A list of tags with which to annotate the transformed MetaGraphDef.
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A new transformed MetaGraphDef protocol buffer.
"""
meta_graph_def = _meta_graph_pb2.MetaGraphDef()
initializer_names = _find_all_mandatory_retain_ops(base_meta_graph_def)
transformed_graph_def, updated_initializer_names = _do_transforms(
base_meta_graph_def.graph_def, input_names, output_names,
initializer_names, transforms, base_meta_graph_def.saver_def,
checkpoint_path)
meta_graph_def.graph_def.CopyFrom(transformed_graph_def)
meta_graph_def.meta_info_def.CopyFrom(base_meta_graph_def.meta_info_def)
meta_graph_def.meta_info_def.ClearField('tags')
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
base_op_names = [_compat.as_str(node.name)
for node in base_meta_graph_def.graph_def.node]
retained_op_names = [_compat.as_str(node.name)
for node in meta_graph_def.graph_def.node]
removed_op_names = set(base_op_names) - set(retained_op_names)
# Copy saver, excluding any pruned nodes if graph was not frozen.
# TODO(b/63447631): Revisit this once the problem is addressed. Currently
# _add_pruned_saver assumes that the save and restore nodes have not been
# removed but freeze_graph (correctly) removes them.
if _FREEZE_GRAPH_TRANSFORM not in transforms:
_add_pruned_saver(base_meta_graph_def, meta_graph_def, removed_op_names)
# Copy collections, excluding any pruned nodes
for collection_name in base_meta_graph_def.collection_def:
_add_pruned_collection(
base_meta_graph_def, meta_graph_def, collection_name,
removed_op_names)
# Append newly added initializers to collection.
_add_new_inits_to_collection(meta_graph_def, updated_initializer_names)
# Copy signature_defs, excluding any pruned nodes
for signature_name in base_meta_graph_def.signature_def:
_add_pruned_signature(
base_meta_graph_def, meta_graph_def, signature_name,
removed_op_names)
return meta_graph_def
| apache-2.0 | -7,278,445,487,876,727,000 | 40.020752 | 80 | 0.693932 | false |
lbt/boss-launcher-webhook | src/webhook_launcher/app/urls.py | 1 | 1945 | # Copyright (C) 2013 Jolla Ltd.
# Contact: Islam Amer <islam.amer@jollamobile.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from django.conf.urls import url, include
from django.contrib import admin
admin.autodiscover()
from rest_framework import routers
from webhook_launcher.app import views
router = routers.DefaultRouter()
router.register(r'webhookmappings', views.WebHookMappingViewSet)
router.register(r'buildservices', views.BuildServiceViewSet)
router.register(r'lastseenrevisions', views.LastSeenRevisionViewSet)
# The .../find view supports an alternate pk lookup
find = views.WebHookMappingViewSet.as_view({'get': 'find', 'put': 'find'})
trigger = views.WebHookMappingViewSet.as_view({'put': 'trigger'})
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls)),
url(r'^api/webhookmappings/(?P<obsname>.*)/(?P<project>.*)/(?P<package>.*)/find', find),
url(r'^api/webhookmappings/(?P<obsname>.*)/(?P<project>.*)/(?P<package>.*)/trigger', trigger),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', views.remotelogin_redirect, name='redirect'),
url(r'^landing/$', views.index, name='index'),
url(r'^$', views.index, name='index'),
]
| gpl-2.0 | 795,231,168,162,221,300 | 44.232558 | 98 | 0.730077 | false |
silenceli/nova | nova/tests/unit/api/openstack/compute/test_v3_auth.py | 11 | 2609 | # Copyright 2013 IBM Corp.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.dec
from nova import context
from nova import test
from nova.tests.unit.api.openstack import fakes
class TestNoAuthMiddlewareV3(test.NoDBTestCase):
def setUp(self):
super(TestNoAuthMiddlewareV3, self).setUp()
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_networking(self.stubs)
def test_authorize_user(self):
req = webob.Request.blank('/v2/fake')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
"http://localhost/v2/fake")
def test_authorize_user_trailing_slash(self):
# make sure it works with trailing slash on the request
req = webob.Request.blank('/v2/fake/')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(result.headers['X-Server-Management-Url'],
"http://localhost/v2/fake")
def test_auth_token_no_empty_headers(self):
req = webob.Request.blank('/v2/fake')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
req.headers['X-Auth-Project-Id'] = 'user1_project'
result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
self.assertEqual(result.status, '204 No Content')
self.assertNotIn('X-CDN-Management-Url', result.headers)
self.assertNotIn('X-Storage-Url', result.headers)
| apache-2.0 | 7,899,064,966,411,528,000 | 41.080645 | 78 | 0.672288 | false |
sysadminmatmoz/odoo-clearcorp | TODO-7.0/mrp_production_sequence/__init__.py | 4 | 1061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_production_sequence
| agpl-3.0 | -4,752,057,140,196,866,000 | 47.227273 | 78 | 0.611687 | false |
Abi1ity/uniclust2.0 | SQLAlchemy-0.9.9/setup.py | 1 | 5627 | """setup.py
Please see README for basic installation instructions.
"""
import os
import re
import sys
from distutils.command.build_ext import build_ext
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
has_feature = False
try:
from setuptools import setup, Extension
try:
# see
# https://bitbucket.org/pypa/setuptools/issue/65/deprecate-and-remove-features,
# where they may remove Feature.
from setuptools import Feature
has_feature = True
except ImportError:
pass
except ImportError:
from distutils.core import setup, Extension
py3k = False
cmdclass = {}
extra = {}
if sys.version_info < (2, 6):
raise Exception("SQLAlchemy requires Python 2.6 or higher.")
elif sys.version_info >= (3, 0):
py3k = True
import platform
cpython = platform.python_implementation() == 'CPython'
ext_modules = [
Extension('sqlalchemy.cprocessors',
sources=['lib/sqlalchemy/cextension/processors.c']),
Extension('sqlalchemy.cresultproxy',
sources=['lib/sqlalchemy/cextension/resultproxy.c']),
Extension('sqlalchemy.cutils',
sources=['lib/sqlalchemy/cextension/utils.c'])
]
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
def find_packages(location):
packages = []
for pkg in ['sqlalchemy']:
for _dir, subdirectories, files in (
os.walk(os.path.join(location, pkg))):
if '__init__.py' in files:
tokens = _dir.split(os.sep)[len(location.split(os.sep)):]
packages.append(".".join(tokens))
return packages
v_file = open(os.path.join(os.path.dirname(__file__),
'lib', 'sqlalchemy', '__init__.py'))
VERSION = re.compile(r".*__version__ = '(.*?)'",
re.S).match(v_file.read()).group(1)
v_file.close()
r_file = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
readme = r_file.read()
r_file.close()
def run_setup(with_cext):
kwargs = extra.copy()
if with_cext:
if has_feature:
kwargs['features'] = {'cextensions': Feature(
"optional C speed-enhancements",
standard=True,
ext_modules=ext_modules
)}
else:
kwargs['ext_modules'] = ext_modules
setup(name="SQLAlchemy",
version=VERSION,
description="Database Abstraction Library",
author="Mike Bayer",
author_email="mike_mp@zzzcomputing.com",
url="http://www.sqlalchemy.org",
packages=find_packages('lib'),
package_dir={'': 'lib'},
license="MIT License",
cmdclass=cmdclass,
tests_require=['pytest >= 2.5.2', 'mock'],
test_suite="sqlalchemy.testing.distutils_run",
long_description=readme,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: Jython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database :: Front-Ends",
"Operating System :: OS Independent",
],
**kwargs
)
if not cpython:
run_setup(False)
status_msgs(
"WARNING: C extensions are not supported on " +
"this Python platform, speedups are not enabled.",
"Plain-Python build succeeded."
)
elif os.environ.get('DISABLE_SQLALCHEMY_CEXT'):
run_setup(False)
status_msgs(
"DISABLE_SQLALCHEMY_CEXT is set; " +
"not attempting to build C extensions.",
"Plain-Python build succeeded."
)
else:
try:
run_setup(True)
except BuildFailed as exc:
status_msgs(
exc.cause,
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now."
)
run_setup(False)
status_msgs(
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Plain-Python build succeeded."
)
| bsd-3-clause | -5,437,259,516,930,550,000 | 29.252688 | 87 | 0.582548 | false |
asm-products/pipfix | server/documents.py | 1 | 1618 | from mongoengine import *
from mongoengine.django import auth
import binascii
import os
class Vote(Document):
stuff = ReferenceField('Stuff')
pips = IntField(min_value=1, max_value=10)
user = ReferenceField('User', unique_with='stuff')
comment = StringField(max_length=256)
@property
def username(self):
return self.user.username
class User(auth.User):
email = EmailField(required=False)
twitter_id = IntField()
followed = ListField(IntField())
token = StringField(max_length=40, unique=True)
def save(self, *args, **kwargs):
if not self.id or not self.token:
self.token = binascii.hexlify(os.urandom(20)).decode()
super(User, self).save(*args, **kwargs)
class Stuff(Document):
stuff_id = StringField(max_length=120, required=True, unique=True, primary_key=True)
title = StringField(max_length=120, required=True)
year = IntField()
image = URLField()
description = StringField()
@property
def average(self):
return Vote.objects(stuff=self).average('pips')
class UserStuff(Document):
user = ReferenceField('User', unique_with="stuff")
stuff = ReferenceField('Stuff')
@property
def average(self):
users = User.objects(twitter_id__in=self.user.followed)
return Vote.objects(stuff=self.stuff, user__in=users).average('pips')
@property
def votes(self):
users = User.objects(twitter_id__in=self.user.followed)
return Vote.objects(stuff=self.stuff, user__in=users)
@property
def global_average(self):
return self.stuff.average | agpl-3.0 | -2,160,260,970,359,436,300 | 28.436364 | 88 | 0.668727 | false |
DerKastellan/challenge-tables | src/create.py | 1 | 1510 | #!/usr/bin/env python3
from parser import parseTables
from tables import ChallengeTable
from output import createHtml, createPdf
import sys
def parseArgs():
def convertPartyString(x):
# "5 3 3 2" => [5, 3, 3, 2]
return list(map(int, x.strip().split(" ")))
if len(sys.argv) < 3:
raise ValueError()
type = sys.argv[1]
if type not in { "html", "pdf" }:
raise ValueError()
path = sys.argv[2]
levels = list(map(convertPartyString, sys.argv[3:]))
return type, path, levels
if __name__ == "__main__":
uri = "http://dnd.wizards.com/products/tabletop/dm-basic-rules"
print("... parsing table content from URI", uri, file=sys.stderr)
thresholds, multipliers = parseTables(uri)
print("... parsing table content from URI done", file=sys.stderr)
print("... computing challenge table", file=sys.stderr)
table = ChallengeTable(thresholds, multipliers)
type, path, levels = parseArgs()
tables = list(map(table.compute, levels))
content = list(zip(levels, tables)) # [ ( <party levels>, <challenge table> ) ]
html = createHtml(content)
if type == "html":
print("... writing challenge table as HTML to '{}'".format(path), file=sys.stderr)
with open(path, encoding="utf-8", mode="w") as f:
f.write(html)
else:
print("... writing challenge table as PDF to '{}'".format(path), file=sys.stderr)
createPdf(path, html)
print("... done", file=sys.stderr)
| mit | 4,030,577,365,514,959,000 | 29.2 | 90 | 0.617881 | false |
akshaynathr/mailman | src/mailman/app/subscriptions.py | 2 | 8324 | # Copyright (C) 2009-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Module stuff."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'SubscriptionService',
'handle_ListDeletedEvent',
]
from flufl.password import generate
from operator import attrgetter
from storm.expr import And, Or
from uuid import UUID
from zope.component import getUtility
from zope.interface import implementer
from mailman.app.membership import add_member, delete_member
from mailman.config import config
from mailman.core.constants import system_preferences
from mailman.database.transaction import dbconnection
from mailman.interfaces.address import IEmailValidator
from mailman.interfaces.listmanager import (
IListManager, ListDeletedEvent, NoSuchListError)
from mailman.interfaces.member import DeliveryMode, MemberRole
from mailman.interfaces.subscriptions import (
ISubscriptionService, MissingUserError)
from mailman.interfaces.usermanager import IUserManager
from mailman.model.member import Member
def _membership_sort_key(member):
"""Sort function for get_members().
The members are sorted first by fully-qualified mailing list name,
then by subscribed email address, then by role.
"""
# member.mailing_list is already the fqdn_listname, not the IMailingList
# object.
return (member.mailing_list,
member.address.email,
int(member.role))
@implementer(ISubscriptionService)
class SubscriptionService:
"""Subscription services for the REST API."""
__name__ = 'members'
def get_members(self):
"""See `ISubscriptionService`."""
# {fqdn_listname -> {role -> [members]}}
by_list = {}
user_manager = getUtility(IUserManager)
for member in user_manager.members:
by_role = by_list.setdefault(member.mailing_list, {})
members = by_role.setdefault(member.role.name, [])
members.append(member)
# Flatten into single list sorted as per the interface.
all_members = []
address_of_member = attrgetter('address.email')
for fqdn_listname in sorted(by_list):
by_role = by_list[fqdn_listname]
all_members.extend(
sorted(by_role.get('owner', []), key=address_of_member))
all_members.extend(
sorted(by_role.get('moderator', []), key=address_of_member))
all_members.extend(
sorted(by_role.get('member', []), key=address_of_member))
return all_members
@dbconnection
def get_member(self, store, member_id):
"""See `ISubscriptionService`."""
members = store.find(
Member,
Member._member_id == member_id)
if members.count() == 0:
return None
else:
assert members.count() == 1, 'Too many matching members'
return members[0]
@dbconnection
def find_members(self, store,
subscriber=None, fqdn_listname=None, role=None):
"""See `ISubscriptionService`."""
# If `subscriber` is a user id, then we'll search for all addresses
# which are controlled by the user, otherwise we'll just search for
# the given address.
user_manager = getUtility(IUserManager)
if subscriber is None and fqdn_listname is None and role is None:
return []
# Querying for the subscriber is the most complicated part, because
# the parameter can either be an email address or a user id.
query = []
if subscriber is not None:
if isinstance(subscriber, basestring):
# subscriber is an email address.
address = user_manager.get_address(subscriber)
user = user_manager.get_user(subscriber)
# This probably could be made more efficient.
if address is None or user is None:
return []
query.append(Or(Member.address_id == address.id,
Member.user_id == user.id))
else:
# subscriber is a user id.
user = user_manager.get_user_by_id(subscriber)
address_ids = list(address.id for address in user.addresses
if address.id is not None)
if len(address_ids) == 0 or user is None:
return []
query.append(Or(Member.user_id == user.id,
Member.address_id.is_in(address_ids)))
# Calculate the rest of the query expression, which will get And'd
# with the Or clause above (if there is one).
if fqdn_listname is not None:
query.append(Member.mailing_list == fqdn_listname)
if role is not None:
query.append(Member.role == role)
results = store.find(Member, And(*query))
return sorted(results, key=_membership_sort_key)
def __iter__(self):
for member in self.get_members():
yield member
def join(self, fqdn_listname, subscriber,
display_name=None,
delivery_mode=DeliveryMode.regular,
role=MemberRole.member):
"""See `ISubscriptionService`."""
mlist = getUtility(IListManager).get(fqdn_listname)
if mlist is None:
raise NoSuchListError(fqdn_listname)
# Is the subscriber an email address or user id?
if isinstance(subscriber, basestring):
# It's an email address, so we'll want a real name. Make sure
# it's a valid email address, and let InvalidEmailAddressError
# propagate up.
getUtility(IEmailValidator).validate(subscriber)
if display_name is None:
display_name, at, domain = subscriber.partition('@')
# Because we want to keep the REST API simple, there is no
# password or language given to us. We'll use the system's
# default language for the user's default language. We'll set the
# password to a system default. This will have to get reset since
# it can't be retrieved. Note that none of these are used unless
# the address is completely new to us.
password = generate(int(config.passwords.password_length))
return add_member(mlist, subscriber, display_name, password,
delivery_mode,
system_preferences.preferred_language, role)
else:
# We have to assume it's a UUID.
assert isinstance(subscriber, UUID), 'Not a UUID'
user = getUtility(IUserManager).get_user_by_id(subscriber)
if user is None:
raise MissingUserError(subscriber)
return mlist.subscribe(user, role)
def leave(self, fqdn_listname, email):
"""See `ISubscriptionService`."""
mlist = getUtility(IListManager).get(fqdn_listname)
if mlist is None:
raise NoSuchListError(fqdn_listname)
# XXX for now, no notification or user acknowledgment.
delete_member(mlist, email, False, False)
def handle_ListDeletedEvent(event):
"""Delete a mailing list's members when the list is deleted."""
if not isinstance(event, ListDeletedEvent):
return
# Find all the members still associated with the mailing list.
members = getUtility(ISubscriptionService).find_members(
fqdn_listname=event.fqdn_listname)
for member in members:
member.unsubscribe()
| gpl-3.0 | -2,461,463,910,961,409,500 | 39.407767 | 78 | 0.632028 | false |
lukecwik/incubator-beam | sdks/python/apache_beam/io/gcp/gcsio_test.py | 1 | 25627 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for Google Cloud Storage client."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import datetime
import errno
import io
import logging
import os
import random
import sys
import time
import unittest
from builtins import object
from builtins import range
from email.message import Message
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import httplib2
import mock
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp import gcsio
from apache_beam.io.gcp.internal.clients import storage
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
class FakeGcsClient(object):
# Fake storage client. Usage in gcsio.py is client.objects.Get(...) and
# client.objects.Insert(...).
def __init__(self):
self.objects = FakeGcsObjects()
# Referenced in GcsIO.copy_batch() and GcsIO.delete_batch().
self._http = object()
class FakeFile(object):
def __init__(
self, bucket, obj, contents, generation, crc32c=None, last_updated=None):
self.bucket = bucket
self.object = obj
self.contents = contents
self.generation = generation
self.crc32c = crc32c
self.last_updated = last_updated
def get_metadata(self):
last_updated_datetime = None
if self.last_updated:
last_updated_datetime = datetime.datetime.utcfromtimestamp(
self.last_updated)
return storage.Object(
bucket=self.bucket,
name=self.object,
generation=self.generation,
size=len(self.contents),
crc32c=self.crc32c,
updated=last_updated_datetime)
class FakeGcsObjects(object):
def __init__(self):
self.files = {}
# Store the last generation used for a given object name. Note that this
# has to persist even past the deletion of the object.
self.last_generation = {}
self.list_page_tokens = {}
def add_file(self, f):
self.files[(f.bucket, f.object)] = f
self.last_generation[(f.bucket, f.object)] = f.generation
def get_file(self, bucket, obj):
return self.files.get((bucket, obj), None)
def delete_file(self, bucket, obj):
del self.files[(bucket, obj)]
def get_last_generation(self, bucket, obj):
return self.last_generation.get((bucket, obj), 0)
def Get(self, get_request, download=None): # pylint: disable=invalid-name
f = self.get_file(get_request.bucket, get_request.object)
if f is None:
# Failing with an HTTP 404 if file does not exist.
raise HttpError({'status': 404}, None, None)
if download is None:
return f.get_metadata()
else:
stream = download.stream
def get_range_callback(start, end):
if not (start >= 0 and end >= start and end < len(f.contents)):
raise ValueError(
'start=%d end=%d len=%s' % (start, end, len(f.contents)))
stream.write(f.contents[start:end + 1])
download.GetRange = get_range_callback
def Insert(self, insert_request, upload=None): # pylint: disable=invalid-name
assert upload is not None
generation = self.get_last_generation(
insert_request.bucket, insert_request.name) + 1
f = FakeFile(insert_request.bucket, insert_request.name, b'', generation)
# Stream data into file.
stream = upload.stream
data_list = []
while True:
data = stream.read(1024 * 1024)
if not data:
break
data_list.append(data)
f.contents = b''.join(data_list)
self.add_file(f)
REWRITE_TOKEN = 'test_token'
def Rewrite(self, rewrite_request): # pylint: disable=invalid-name
if rewrite_request.rewriteToken == self.REWRITE_TOKEN:
dest_object = storage.Object()
return storage.RewriteResponse(
done=True,
objectSize=100,
resource=dest_object,
totalBytesRewritten=100)
src_file = self.get_file(
rewrite_request.sourceBucket, rewrite_request.sourceObject)
if not src_file:
raise HttpError(
httplib2.Response({'status': '404'}),
'404 Not Found',
'https://fake/url')
generation = self.get_last_generation(
rewrite_request.destinationBucket,
rewrite_request.destinationObject) + 1
dest_file = FakeFile(
rewrite_request.destinationBucket,
rewrite_request.destinationObject,
src_file.contents,
generation)
self.add_file(dest_file)
time.sleep(10) # time.sleep and time.time are mocked below.
return storage.RewriteResponse(
done=False,
objectSize=100,
rewriteToken=self.REWRITE_TOKEN,
totalBytesRewritten=5)
def Delete(self, delete_request): # pylint: disable=invalid-name
# Here, we emulate the behavior of the GCS service in raising a 404 error
# if this object already exists.
if self.get_file(delete_request.bucket, delete_request.object):
self.delete_file(delete_request.bucket, delete_request.object)
else:
raise HttpError(
httplib2.Response({'status': '404'}),
'404 Not Found',
'https://fake/url')
def List(self, list_request): # pylint: disable=invalid-name
bucket = list_request.bucket
prefix = list_request.prefix or ''
matching_files = []
for file_bucket, file_name in sorted(iter(self.files)):
if bucket == file_bucket and file_name.startswith(prefix):
file_object = self.files[(file_bucket, file_name)].get_metadata()
matching_files.append(file_object)
# Handle pagination.
items_per_page = 5
if not list_request.pageToken:
range_start = 0
else:
if list_request.pageToken not in self.list_page_tokens:
raise ValueError('Invalid page token.')
range_start = self.list_page_tokens[list_request.pageToken]
del self.list_page_tokens[list_request.pageToken]
result = storage.Objects(
items=matching_files[range_start:range_start + items_per_page])
if range_start + items_per_page < len(matching_files):
next_range_start = range_start + items_per_page
next_page_token = '_page_token_%s_%s_%d' % (
bucket, prefix, next_range_start)
self.list_page_tokens[next_page_token] = next_range_start
result.nextPageToken = next_page_token
return result
class FakeApiCall(object):
def __init__(self, exception, response):
self.exception = exception
self.is_error = exception is not None
# Response for Rewrite:
self.response = response
class FakeBatchApiRequest(object):
def __init__(self, **unused_kwargs):
self.operations = []
def Add(self, service, method, request): # pylint: disable=invalid-name
self.operations.append((service, method, request))
def Execute(self, unused_http, **unused_kwargs): # pylint: disable=invalid-name
api_calls = []
for service, method, request in self.operations:
exception = None
response = None
try:
response = getattr(service, method)(request)
except Exception as e: # pylint: disable=broad-except
exception = e
api_calls.append(FakeApiCall(exception, response))
return api_calls
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestGCSPathParser(unittest.TestCase):
BAD_GCS_PATHS = [
'gs://',
'gs://bucket',
'gs:///name',
'gs:///',
'gs:/blah/bucket/name',
]
def test_gcs_path(self):
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name'), ('bucket', 'name'))
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name/sub'), ('bucket', 'name/sub'))
def test_bad_gcs_path(self):
for path in self.BAD_GCS_PATHS:
self.assertRaises(ValueError, gcsio.parse_gcs_path, path)
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://bucket/')
def test_gcs_path_object_optional(self):
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name', object_optional=True),
('bucket', 'name'))
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/', object_optional=True),
('bucket', ''))
def test_bad_gcs_path_object_optional(self):
for path in self.BAD_GCS_PATHS:
self.assertRaises(ValueError, gcsio.parse_gcs_path, path, True)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
@mock.patch.multiple(
'time', time=mock.MagicMock(side_effect=range(100)), sleep=mock.MagicMock())
class TestGCSIO(unittest.TestCase):
def _insert_random_file(
self, client, path, size, generation=1, crc32c=None, last_updated=None):
bucket, name = gcsio.parse_gcs_path(path)
f = FakeFile(
bucket,
name,
os.urandom(size),
generation,
crc32c=crc32c,
last_updated=last_updated)
client.objects.add_file(f)
return f
def setUp(self):
self.client = FakeGcsClient()
self.gcs = gcsio.GcsIO(self.client)
def test_num_retries(self):
# BEAM-7424: update num_retries accordingly if storage_client is
# regenerated.
self.assertEqual(gcsio.GcsIO().client.num_retries, 20)
def test_retry_func(self):
# BEAM-7667: update retry_func accordingly if storage_client is
# regenerated.
self.assertIsNotNone(gcsio.GcsIO().client.retry_func)
def test_exists(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertFalse(self.gcs.exists(file_name + 'xyz'))
self.assertTrue(self.gcs.exists(file_name))
@mock.patch.object(FakeGcsObjects, 'Get')
def test_exists_failure(self, mock_get):
# Raising an error other than 404. Raising 404 is a valid failure for
# exists() call.
mock_get.side_effect = HttpError({'status': 400}, None, None)
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
with self.assertRaises(HttpError) as cm:
self.gcs.exists(file_name)
self.assertEqual(400, cm.exception.status_code)
def test_checksum(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
checksum = 'deadbeef'
self._insert_random_file(self.client, file_name, file_size, crc32c=checksum)
self.assertTrue(self.gcs.exists(file_name))
self.assertEqual(checksum, self.gcs.checksum(file_name))
def test_size(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(self.gcs.exists(file_name))
self.assertEqual(1234, self.gcs.size(file_name))
def test_last_updated(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
last_updated = 123456.78
self._insert_random_file(
self.client, file_name, file_size, last_updated=last_updated)
self.assertTrue(self.gcs.exists(file_name))
self.assertEqual(last_updated, self.gcs.last_updated(file_name))
def test_file_mode(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.gcs.open(file_name, 'wb') as f:
assert f.mode == 'wb'
with self.gcs.open(file_name, 'rb') as f:
assert f.mode == 'rb'
def test_bad_file_modes(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'w+')
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'r+b')
def test_empty_batches(self):
self.assertEqual([], self.gcs.copy_batch([]))
self.assertEqual([], self.gcs.delete_batch([]))
def test_delete(self):
file_name = 'gs://gcsio-test/delete_me'
file_size = 1024
# Test deletion of non-existent file.
self.gcs.delete(file_name)
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
self.gcs.delete(file_name)
self.assertFalse(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
@mock.patch('apache_beam.io.gcp.gcsio.BatchApiRequest')
def test_delete_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
file_name_pattern = 'gs://gcsio-test/delete_me_%d'
file_size = 1024
num_files = 10
# Test deletion of non-existent files.
result = self.gcs.delete_batch(
[file_name_pattern % i for i in range(num_files)])
self.assertTrue(result)
for i, (file_name, exception) in enumerate(result):
self.assertEqual(file_name, file_name_pattern % i)
self.assertEqual(exception, None)
self.assertFalse(self.gcs.exists(file_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, file_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(file_name_pattern % i))
# Execute batch delete.
self.gcs.delete_batch([file_name_pattern % i for i in range(num_files)])
# Check files deleted properly.
for i in range(num_files):
self.assertFalse(self.gcs.exists(file_name_pattern % i))
def test_copy(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copy(src_file_name, dest_file_name, dest_kms_key_name='kms_key')
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
# Test copy of non-existent files.
with self.assertRaisesRegex(HttpError, r'Not Found'):
self.gcs.copy(
'gs://gcsio-test/non-existent',
'gs://gcsio-test/non-existent-destination')
@mock.patch('apache_beam.io.gcp.gcsio.BatchApiRequest')
def test_copy_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
from_name_pattern = 'gs://gcsio-test/copy_me_%d'
to_name_pattern = 'gs://gcsio-test/destination_%d'
file_size = 1024
num_files = 10
result = self.gcs.copy_batch([(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)],
dest_kms_key_name='kms_key')
self.assertTrue(result)
for i, (src, dest, exception) in enumerate(result):
self.assertEqual(src, from_name_pattern % i)
self.assertEqual(dest, to_name_pattern % i)
self.assertTrue(isinstance(exception, IOError))
self.assertEqual(exception.errno, errno.ENOENT)
self.assertFalse(self.gcs.exists(from_name_pattern % i))
self.assertFalse(self.gcs.exists(to_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, from_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
# Execute batch copy.
self.gcs.copy_batch([(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)])
# Check files copied properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
self.assertTrue(self.gcs.exists(to_name_pattern % i))
def test_copytree(self):
src_dir_name = 'gs://gcsio-test/source/'
dest_dir_name = 'gs://gcsio-test/dest/'
file_size = 1024
paths = ['a', 'b/c', 'b/d']
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copytree(src_dir_name, dest_dir_name)
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_rename(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.rename(src_file_name, dest_file_name)
self.assertFalse(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_full_file_read(self):
file_name = 'gs://gcsio-test/full_file'
file_size = 5 * 1024 * 1024 + 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
self.assertEqual(f.mode, 'r')
f.seek(0, os.SEEK_END)
self.assertEqual(f.tell(), file_size)
self.assertEqual(f.read(), b'')
f.seek(0)
self.assertEqual(f.read(), random_file.contents)
def test_file_random_seek(self):
file_name = 'gs://gcsio-test/seek_file'
file_size = 5 * 1024 * 1024 - 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
random.seed(0)
for _ in range(0, 10):
a = random.randint(0, file_size - 1)
b = random.randint(0, file_size - 1)
start, end = min(a, b), max(a, b)
f.seek(start)
self.assertEqual(f.tell(), start)
self.assertEqual(
f.read(end - start + 1), random_file.contents[start:end + 1])
self.assertEqual(f.tell(), end + 1)
def test_file_iterator(self):
file_name = 'gs://gcsio-test/iterating_file'
lines = []
line_count = 10
for _ in range(line_count):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace(b'\n', b' ') + b'\n'
lines.append(line)
contents = b''.join(lines)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name)
read_lines = 0
for line in f:
read_lines += 1
self.assertEqual(read_lines, line_count)
def test_file_read_line(self):
file_name = 'gs://gcsio-test/read_line_file'
lines = []
# Set a small buffer size to exercise refilling the buffer.
# First line is carefully crafted so the newline falls as the last character
# of the buffer to exercise this code path.
read_buffer_size = 1024
lines.append(b'x' * 1023 + b'\n')
for _ in range(1, 1000):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace(b'\n', b' ') + b'\n'
lines.append(line)
contents = b''.join(lines)
file_size = len(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name, read_buffer_size=read_buffer_size)
# Test read of first two lines.
f.seek(0)
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.tell(), len(lines[0]))
self.assertEqual(f.readline(), lines[1])
# Test read at line boundary.
f.seek(file_size - len(lines[-1]) - 1)
self.assertEqual(f.readline(), b'\n')
# Test read at end of file.
f.seek(file_size)
self.assertEqual(f.readline(), b'')
# Test reads at random positions.
random.seed(0)
for _ in range(0, 10):
start = random.randint(0, file_size - 1)
line_index = 0
# Find line corresponding to start index.
chars_left = start
while True:
next_line_length = len(lines[line_index])
if chars_left - next_line_length < 0:
break
chars_left -= next_line_length
line_index += 1
f.seek(start)
self.assertEqual(f.readline(), lines[line_index][chars_left:])
def test_file_write(self):
file_name = 'gs://gcsio-test/write_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.write(contents[1000:1024 * 1024])
f.write(contents[1024 * 1024:])
f.close()
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_close(self):
file_name = 'gs://gcsio-test/close_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents)
f.close()
f.close() # This should not crash.
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_flush(self):
file_name = 'gs://gcsio-test/flush_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
bucket, name = gcsio.parse_gcs_path(file_name)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.flush()
f.write(contents[1000:1024 * 1024])
f.flush()
f.flush() # Should be a NOOP.
f.write(contents[1024 * 1024:])
f.close() # This should already call the equivalent of flush() in its body.
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_context_manager(self):
# Test writing with a context manager.
file_name = 'gs://gcsio-test/context_manager_file'
file_size = 1024
contents = os.urandom(file_size)
with self.gcs.open(file_name, 'w') as f:
f.write(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
# Test reading with a context manager.
with self.gcs.open(file_name) as f:
self.assertEqual(f.read(), contents)
# Test that exceptions are not swallowed by the context manager.
with self.assertRaises(ZeroDivisionError):
with self.gcs.open(file_name) as f:
f.read(0 // 0)
def test_list_prefix(self):
bucket_name = 'gcsio-test'
objects = [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]
for (object_name, size) in objects:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, size)
test_cases = [
(
'gs://gcsio-test/c',
[
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
(
'gs://gcsio-test/cow/',
[
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
('gs://gcsio-test/cow/cat/fish', [
('cow/cat/fish', 2),
]),
]
for file_pattern, expected_object_names in test_cases:
expected_file_names = [('gs://%s/%s' % (bucket_name, object_name), size)
for (object_name, size) in expected_object_names]
self.assertEqual(
set(self.gcs.list_prefix(file_pattern).items()),
set(expected_file_names))
def test_mime_binary_encoding(self):
# This test verifies that the MIME email_generator library works properly
# and does not corrupt '\r\n' during uploads (the patch to apitools in
# Python 3 is applied in io/gcp/__init__.py).
from apitools.base.py.transfer import email_generator
if sys.version_info[0] == 3:
generator_cls = email_generator.BytesGenerator
else:
generator_cls = email_generator.Generator
output_buffer = io.BytesIO()
generator = generator_cls(output_buffer)
test_msg = 'a\nb\r\nc\n\r\n\n\nd'
message = Message()
message.set_payload(test_msg)
generator._handle_text(message)
self.assertEqual(test_msg.encode('ascii'), output_buffer.getvalue())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | 1,708,740,197,695,834,400 | 33.30656 | 82 | 0.650174 | false |
Alexx99/sdk | tests/sync_test.py | 15 | 15085 | """
Base class for testing syncing algorithm
(c) 2013-2014 by Mega Limited, Wellsford, New Zealand
This file is part of the MEGA SDK - Client Access Engine.
Applications using the MEGA API must present a valid application key
and comply with the the rules set forth in the Terms of Service.
The MEGA SDK is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@copyright Simplified (2-clause) BSD License.
You should have received a copy of the license along with this
program.
"""
# TODO tests:
# * "pause" sync
# * lock directory
# * large (> 4Gb) files
# * > 10000 folders to synchronize
from sync_test_base import SyncTestBase
from sync_test_base import get_random_str
from sync_test_base import generate_unicode_name
import random
import os
import logging
import time
class SyncTest(SyncTestBase):
"""
Class with MEGA SDK test methods
"""
# tests
def test_create_delete_files(self):
"""
create files with different size,
compare files on both folders,
remove files, check that files removed from the second folder
"""
logging.info("Launching test_create_delete_files test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create files
l_files = self.files_create()
self.assertIsNotNone(l_files, "Creating files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.files_check(l_files), "Comparing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.files_remove(l_files), "Removing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
return True
def test_create_rename_delete_files(self):
"""
create files with different size,
compare files on both folders,
rename files
"""
logging.info("Launching test_create_rename_delete_files test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create files
l_files = self.files_create()
self.assertIsNotNone(l_files, "Creating files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.files_check(l_files), "Comparing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# renaming
self.assertTrue(self.files_rename(l_files), "Renaming files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.files_check(l_files), "Comparing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.files_remove(l_files), "Removing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
return True
def test_create_delete_dirs(self):
"""
create directories with different amount of files,
compare directories on both sync folders,
remove directories, check that directories removed from the second folder
"""
logging.info("Launching test_create_delete_dirs test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create dirs
l_dirs = self.dirs_create()
self.assertIsNotNone(l_dirs, "Creating directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.dirs_check(l_dirs), "Comparing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.dirs_remove(l_dirs), "Removing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
return True
def test_create_rename_delete_dirs(self):
"""
create directories with different amount of files,
compare directories on both sync folders,
rename directories
compare directories on both sync folders,
remove directories, check that directories removed from the second folder
"""
logging.info("Launching test_create_rename_delete_dirs test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create dirs
l_dirs = self.dirs_create()
self.assertIsNotNone(l_dirs, "Creating directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.dirs_check(l_dirs), "Comparing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# rename dirs
self.assertTrue(self.dirs_rename(l_dirs), "Rename directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.dirs_check(l_dirs), "Comparing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.dirs_remove(l_dirs), "Removing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
return True
def test_sync_files_write(self):
"""
write data to a file located in both sync folders
check for the result, expected result: both files contains the same content
"""
logging.info("Launching test_sync_files_write test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
for _ in range(0, 5):
self.assertTrue(self.app.is_alive(), "Test application is not running")
strlen = random.randint(10, 20)
fname = get_random_str(size=strlen)
fname_in = os.path.join(self.app.local_folder_in, fname)
fname_out = os.path.join(self.app.local_folder_out, fname)
logging.debug("Writing to both files: %s and %s" % (fname_in, fname_out))
with open(fname_in, 'a'):
os.utime(fname_in, None)
with open(fname_out, 'a'):
os.utime(fname_out, None)
self.app.sync()
for _ in range(10):
with open(fname_in, 'a') as f_in:
f_in.write(get_random_str(100))
with open(fname_out, 'a') as f_out:
f_out.write(get_random_str(100))
self.app.sync()
md5_in = self.md5_for_file(fname_in)
md5_out = self.md5_for_file(fname_out)
logging.debug("File %s md5: %s" % (fname_in, md5_in))
logging.debug("File %s md5: %s" % (fname_out, md5_out))
self.assertEqual(md5_in, md5_out, "Files do not match")
def test_local_operations(self):
"""
write data to a file located in both sync folders
check for the result, expected result: both files contains the same content
"""
logging.info("Launching test_local_operations test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
l_tree = self.local_tree_create("", 5)
self.assertIsNotNone(l_tree, "Failed to create directory tree!")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
self.assertTrue(self.local_tree_compare(l_tree), "Failed to compare directory trees!")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.assertTrue(self.local_tree_create_and_move(l_tree), "Failed to create a new sub folder and move an existing directory into it!")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.assertTrue(self.local_tree_multiple_renames(l_tree), "Failed to rename folder multiple times and then rename back to the original name!")
self.assertTrue(self.app.is_alive(), "Test application is not running")
def test_update_mtime(self):
"""
update mtime of a file in both local folders
"""
logging.info("Launching test_update_mtime test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# temporary workaround
in_file = os.path.join(self.app.local_mount_in, "mtime_test")
out_file = os.path.join(self.app.local_mount_out, "mtime_test")
for _ in range(self.app.nr_retries):
logging.debug("Touching: %s" % in_file)
now = time.time()
with open(in_file, 'a'):
os.utime(in_file, (now, now))
with open(out_file, 'a'):
os.utime(in_file, (now, now))
self.app.sync()
'''
try:
mtime = os.path.getmtime(out_file)
except OSError:
pass
try:
atime = os.path.getatime(out_file)
except OSError:
pass
logging.debug("atime: %d = %d, mtime: %d = %d" % (now, atime, now, mtime))
self.assertEqual(atime, now, "atime values are different")
self.assertEqual(mtime, now, "mtime values are different")
'''
self.assertTrue(self.app.is_alive(), "Test application is not running")
def test_create_rename_delete_unicode_files_dirs(self):
"""
create directories with different amount of files,
using Unicode encoding for files / directories names,
compare directories on both sync folders,
rename directories
compare directories on both sync folders,
remove directories, check that directories removed from the second folder
"""
logging.info("Launching test_create_rename_delete_unicode_files_dirs test")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create files
l_files = self.files_create(generate_unicode_name)
self.assertIsNotNone(l_files, "Creating files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.files_check(l_files), "Comparing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# renaming
self.assertTrue(self.files_rename(l_files, generate_unicode_name), "Renaming files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.files_check(l_files), "Comparing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.files_remove(l_files), "Removing files")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# create dirs
l_dirs = self.dirs_create(generate_unicode_name)
self.assertIsNotNone(l_dirs, "Creating directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.dirs_check(l_dirs), "Comparing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# rename dirs
self.assertTrue(self.dirs_rename(l_dirs, generate_unicode_name), "Rename directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
self.app.sync()
# comparing
self.assertTrue(self.dirs_check(l_dirs), "Comparing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# remove files
self.assertTrue(self.dirs_remove(l_dirs), "Removing directories")
self.assertTrue(self.app.is_alive(), "Test application is not running")
# make sure remote folders are empty
self.assertTrue(self.dirs_check_empty(), "Checking if remote folders are empty")
self.assertTrue(self.app.is_alive(), "Test application is not running")
return True
| bsd-2-clause | -3,583,841,420,878,914,000 | 38.697368 | 150 | 0.641896 | false |
Rosebotics/cwc-projects | lego-ev3/sandbox/m1/mqtt/m3_ev3_led_button_communication.py | 1 | 6980 | #!/usr/bin/env python3
"""
The goal of this module is to practice doing MQTT communication. In this module you will only write code that runs on
the EV3. The code that runs on the PC (m3_pc_led_button_communication.py) is already written for you. You will need to
implement this module, run it on your EV3, then at the same time run m3_pc_led_button_com.py on your computer to do the
communication. Summary of the communication:
EV3 receiving:
The EV3 will have a delegate that has a method called "set_led" which receives two strings:
led_side_string (the first parameter) will be either "left" or "right"
led_color_string (the second parameter) will be either "green", "red", or "black"
When the EV3 receives a set_led message from the PC it will set the appropriate LED to the appropriate color.
Warning, the strings must be converted into appropriate values before using the ev3.Leds.set_color method.
EV3 sending:
The EV3 will send an mqtt message to the PC whenever the Up, Down, Left, or Right button is pressed on the EV3.
The method name sent will be "button_pressed" which will have 1 parameter (sent as a List with 1 item)
The parameter sent in the List will be the value "Up", "Down", "Left", or "Right"
PC receiving:
The PC will have a delegate that has a method called "button_pressed" which receives 1 string:
button_name (the only parameter) will be either "Up", "Down", "Left", or "Right"
That method is already done and it displays the result to the Tkinter gui window.
PC sending:
The PC will send an mqtt message to the EV3 whenever a Tkinter button is clicked.
The method name sent will be "set_led" which will have 2 parameters (sent as a List with 2 items)
The first parameter will be either "left" or "right"
The second parameter will be either "green", "red", or "black"
That method is already done and it will send when buttons are clicked on the Tkinter GUI.
Implement the TODOs below to complete this module.
Authors: David Fisher and PUT_YOUR_NAME_HERE. January 2017.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import mqtt_remote_method_calls as com
import ev3dev.ev3 as ev3
import time
class MyDelegate(object):
def __init__(self):
self.running = True
# TODO: 2. Prepare the one and only delegate method, set_led, to receive messages as described above.
# Here is some code that will likely be useful in this method to convert the led_side_string and led_color_string
# into a useful led_side and led_color values that can be used with the ev3.Leds.set_color method.
# led_side = None
# if led_side_string == "left":
# led_side = ev3.Leds.LEFT
# elif led_side_string == "right":
# led_side = ev3.Leds.RIGHT
#
# led_color = None
# if led_color_string == "green":
# led_color = ev3.Leds.GREEN
# elif led_color_string == "red":
# led_color = ev3.Leds.RED
# elif led_color_string == "black":
# led_color = ev3.Leds.BLACK
#
# if led_side is None or led_color is None:
# print("Invalid parameters sent to set_led. led_side_string = {} led_color_string = {}".format(
# led_side_string, led_color_string))
def set_led(self, led_side_string, led_color_string):
"""Sets the LED to the appropriate color."""
led_side = None
if led_side_string == "left":
led_side = ev3.Leds.LEFT
elif led_side_string == "right":
led_side = ev3.Leds.RIGHT
led_color = None
if led_color_string == "green":
led_color = ev3.Leds.GREEN
elif led_color_string == "red":
led_color = ev3.Leds.RED
elif led_color_string == "black":
led_color = ev3.Leds.BLACK
if led_side is None or led_color is None:
print("Invalid parameters sent to set_led. led_side_string = {} led_color_string = {}".format(
led_side_string, led_color_string))
ev3.Leds.set_color(led_side, led_color)
def main():
print("--------------------------------------------")
print(" LED Button communication")
print(" Press Back to exit when done.")
print("--------------------------------------------")
ev3.Sound.speak("LED Button communication").wait()
# TODO: 3. Create a delegate (an instance of the class above) and an MQTT client, passing in the delegate
# Once you have that done connect the mqtt_client.
# To help you out this time you simply need to uncomment the code below.
#
# my_delegate = MyDelegate()
# mqtt_client = com.MqttClient(my_delegate)
# mqtt_client.connect_to_pc()
my_delegate = MyDelegate()
mqtt_client = com.MqttClient(my_delegate)
mqtt_client.connect_to_pc()
# Buttons on EV3
btn = ev3.Button()
btn.on_up = lambda state: handle_button_press(state, mqtt_client, "Up")
btn.on_down = lambda state: handle_button_press(state, mqtt_client, "Down")
btn.on_left = lambda state: handle_button_press(state, mqtt_client, "Left")
btn.on_right = lambda state: handle_button_press(state, mqtt_client, "Right")
btn.on_backspace = lambda state: handle_shutdown(state, my_delegate)
while my_delegate.running:
btn.process()
time.sleep(0.01)
ev3.Sound.speak("Goodbye").wait()
ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)
ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)
# ----------------------------------------------------------------------
# Button event callback functions
# ----------------------------------------------------------------------
def handle_button_press(button_state, mqtt_client, button_name):
"""Handle IR / button event."""
if button_state:
print("{} button was pressed".format(button_name))
# TODO 4: Send a message using MQTT that will:
# - Call the method called "button_pressed" on the delegate at the other end of the pipe.
# - Pass the parameters [button_name] as a list.
# This is meant to help you learn the mqtt_client.send_message syntax.
mqtt_client.send_message("button_pressed", [button_name])
# TODO 5: Run this program on your EV3 and run m3_pc_led_button_communication.py on your PC.
# Click the Tkinter buttons on your PC and watch the LEDs on the EV3
# Press the buttons on the EV3 (up, down, left, right) and watch the Tkinter GUI on your PC.
# When done, press the Back button on EV3 to end that program and click Quit on the Tkinter GUI.
def handle_shutdown(button_state, my_delegate):
"""Exit the program."""
if button_state:
my_delegate.running = False
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| gpl-3.0 | -5,162,708,662,583,122,000 | 43.177215 | 120 | 0.625072 | false |
havard024/prego | venv/lib/python2.7/site-packages/captcha/tests/tests.py | 2 | 13903 | # -*- coding: utf-8 -*-
from captcha.conf import settings
from captcha.fields import CaptchaField, CaptchaTextInput
from captcha.models import CaptchaStore, get_safe_now
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext_lazy
import datetime
import json
import re
import six
import os
from six import u
class CaptchaCase(TestCase):
urls = 'captcha.tests.urls'
def setUp(self):
self.stores = {}
self.__current_settings_output_format = settings.CAPTCHA_OUTPUT_FORMAT
self.__current_settings_dictionary = settings.CAPTCHA_WORDS_DICTIONARY
self.__current_settings_punctuation = settings.CAPTCHA_PUNCTUATION
tested_helpers = ['captcha.helpers.math_challenge', 'captcha.helpers.random_char_challenge', 'captcha.helpers.unicode_challenge']
if os.path.exists('/usr/share/dict/words'):
settings.CAPTCHA_WORDS_DICTIONARY = '/usr/share/dict/words'
settings.CAPTCHA_PUNCTUATION = ';-,.'
tested_helpers.append('captcha.helpers.word_challenge')
tested_helpers.append('captcha.helpers.huge_words_and_punctuation_challenge')
for helper in tested_helpers:
challenge, response = settings._callable_from_string(helper)()
self.stores[helper.rsplit('.', 1)[-1].replace('_challenge', '_store')], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
challenge, response = settings.get_challenge()()
self.stores['default_store'], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
self.default_store = self.stores['default_store']
def tearDown(self):
settings.CAPTCHA_OUTPUT_FORMAT = self.__current_settings_output_format
settings.CAPTCHA_WORDS_DICTIONARY = self.__current_settings_dictionary
settings.CAPTCHA_PUNCTUATION = self.__current_settings_punctuation
def __extract_hash_and_response(self, r):
hash_ = re.findall(r'value="([0-9a-f]+)"', str(r.content))[0]
response = CaptchaStore.objects.get(hashkey=hash_).response
return hash_, response
def testImages(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'image/png'))
def testAudio(self):
if not settings.CAPTCHA_FLITE_PATH:
return
for key in (self.stores.get('math_store').hashkey, self.stores.get('math_store').hashkey, self.default_store.hashkey):
response = self.client.get(reverse('captcha-audio', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 1024)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav'))
def testFormSubmit(self):
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testFormModelForm(self):
r = self.client.get(reverse('captcha-test-model-form'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testWrongSubmit(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse(urlname), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
def testDeleteExpired(self):
self.default_store.expiration = get_safe_now() - datetime.timedelta(minutes=5)
self.default_store.save()
hash_ = self.default_store.hashkey
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=self.default_store.response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertFalse('Form validated' in str(r.content))
# expired -> deleted
try:
CaptchaStore.objects.get(hashkey=hash_)
self.fail()
except:
pass
def testCustomErrorMessage(self):
r = self.client.get(reverse('captcha-test-custom-error-message'))
self.assertEqual(r.status_code, 200)
# Wrong answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1='wrong response'))
self.assertFormError(r, 'form', 'captcha', 'TEST CUSTOM ERROR MESSAGE')
# empty answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1=''))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('This field is required.'))
def testRepeatedChallenge(self):
CaptchaStore.objects.create(challenge='xxx', response='xxx')
try:
CaptchaStore.objects.create(challenge='xxx', response='xxx')
except Exception:
self.fail()
def testRepeatedChallengeFormSubmit(self):
__current_challange_function = settings.CAPTCHA_CHALLENGE_FUNCT
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge'
r1 = self.client.get(reverse(urlname))
r2 = self.client.get(reverse(urlname))
self.assertEqual(r1.status_code, 200)
self.assertEqual(r2.status_code, 200)
if re.findall(r'value="([0-9a-f]+)"', str(r1.content)):
hash_1 = re.findall(r'value="([0-9a-f]+)"', str(r1.content))[0]
else:
self.fail()
if re.findall(r'value="([0-9a-f]+)"', str(r2.content)):
hash_2 = re.findall(r'value="([0-9a-f]+)"', str(r2.content))[0]
else:
self.fail()
try:
store_1 = CaptchaStore.objects.get(hashkey=hash_1)
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
self.assertTrue(store_1.pk != store_2.pk)
self.assertTrue(store_1.response == store_2.response)
self.assertTrue(hash_1 != hash_2)
r1 = self.client.post(reverse(urlname), dict(captcha_0=hash_1, captcha_1=store_1.response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r1.status_code, 200)
self.assertTrue(str(r1.content).find('Form validated') > 0)
try:
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
r2 = self.client.post(reverse(urlname), dict(captcha_0=hash_2, captcha_1=store_2.response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r2.status_code, 200)
self.assertTrue(str(r2.content).find('Form validated') > 0)
settings.CAPTCHA_CHALLENGE_FUNCT = __current_challange_function
def testOutputFormat(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_OUTPUT_FORMAT = u('%(image)s<p>Hello, captcha world</p>%(hidden_field)s%(text_field)s')
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
self.assertTrue('<p>Hello, captcha world</p>' in str(r.content))
def testInvalidOutputFormat(self):
__current_settings_debug = django_settings.DEBUG
for urlname in ('captcha-test', 'captcha-test-model-form'):
# we turn on DEBUG because CAPTCHA_OUTPUT_FORMAT is only checked debug
django_settings.DEBUG = True
settings.CAPTCHA_OUTPUT_FORMAT = u('%(image)s')
try:
self.client.get(reverse(urlname))
self.fail()
except ImproperlyConfigured as e:
self.assertTrue('CAPTCHA_OUTPUT_FORMAT' in str(e))
django_settings.DEBUG = __current_settings_debug
def testPerFormFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u('%(image)s testCustomFormatString %(hidden_field)s %(text_field)s')
r = self.client.get(reverse('captcha-test'))
self.assertTrue('testCustomFormatString' in str(r.content))
r = self.client.get(reverse('test_per_form_format'))
self.assertTrue('testPerFieldCustomFormatString' in str(r.content))
def testIssue31ProperLabel(self):
settings.CAPTCHA_OUTPUT_FORMAT = u('%(image)s %(hidden_field)s %(text_field)s')
r = self.client.get(reverse('captcha-test'))
self.assertTrue('<label for="id_captcha_1"' in str(r.content))
def testRefreshView(self):
r = self.client.get(reverse('captcha-refresh'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
try:
new_data = json.loads(six.text_type(r.content, encoding='ascii'))
self.assertTrue('image_url' in new_data)
except:
self.fail()
def testContentLength(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertTrue(response.has_header('content-length'))
self.assertTrue(response['content-length'].isdigit())
self.assertTrue(int(response['content-length']))
def testIssue12ProperInstantiation(self):
"""
This test covers a default django field and widget behavior
It not assert anything. If something is wrong it will raise a error!
"""
settings.CAPTCHA_OUTPUT_FORMAT = u('%(image)s %(hidden_field)s %(text_field)s')
widget = CaptchaTextInput(attrs={'class': 'required'})
CaptchaField(widget=widget)
def testTestMode_Issue15(self):
__current_test_mode_setting = settings.CAPTCHA_TEST_MODE
settings.CAPTCHA_TEST_MODE = False
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
settings.CAPTCHA_TEST_MODE = True
# Test mode, only 'PASSED' is accepted
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='asasd@asdasd.com'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='passed', subject='xxx', sender='asasd@asdasd.com'))
self.assertTrue(str(r.content).find('Form validated') > 0)
settings.CAPTCHA_TEST_MODE = __current_test_mode_setting
def test_get_version(self):
import captcha
captcha.get_version(True)
def test_missing_value(self):
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
# Empty response is okay when required is False
r = self.client.post(reverse('captcha-test-non-required'), dict(subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
# But a valid response is okay, too
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-non-required'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='asasd@asdasd.com'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
def test_autocomplete_off(self):
r = self.client.get(reverse('captcha-test'))
self.assertTrue('autocomplete="off"' in six.text_type(r.content))
def trivial_challenge():
return 'trivial', 'trivial'
| mit | -1,954,035,191,427,403,300 | 48.127208 | 162 | 0.644825 | false |
jaredkoontz/leetcode | Python/game-of-life.py | 3 | 2517 | # Time: O(m * n)
# Space: O(1)
# According to the Wikipedia's article:
# "The Game of Life, also known simply as Life,
# is a cellular automaton devised by the British
# mathematician John Horton Conway in 1970."
#
# Given a board with m by n cells, each cell has
# an initial state live (1) or dead (0).
# Each cell interacts with its eight neighbors
# (horizontal, vertical, diagonal)
# using the following four rules
# (taken from the above Wikipedia article):
#
# - Any live cell with fewer than two live neighbors dies,
# as if caused by under-population.
# - Any live cell with two or three live neighbors lives
# on to the next generation.
# - Any live cell with more than three live neighbors dies,
# as if by over-population..
# - Any dead cell with exactly three live neighbors
# becomes a live cell, as if by reproduction.
#
# Write a function to compute the next state
# (after one update) of the board given its current state.
#
# Follow up:
# - Could you solve it in-place? Remember that the board needs
# to be updated at the same time: You cannot update some cells
# first and then use their updated values to update other cells.
# - In this question, we represent the board using a 2D array.
# In principle, the board is infinite, which would cause problems
# when the active area encroaches the border of the array.
# How would you address these problems?
#
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m = len(board)
n = len(board[0]) if m else 0
for i in xrange(m):
for j in xrange(n):
count = 0
## Count live cells in 3x3 block.
for I in xrange(max(i-1, 0), min(i+2, m)):
for J in xrange(max(j-1, 0), min(j+2, n)):
count += board[I][J] & 1
# if (count == 4 && board[i][j]) means:
# Any live cell with three live neighbors lives.
# if (count == 3) means:
# Any live cell with two live neighbors.
# Any dead cell with exactly three live neighbors lives.
if (count == 4 and board[i][j]) or count == 3:
board[i][j] |= 2 # Mark as live.
for i in xrange(m):
for j in xrange(n):
board[i][j] >>= 1 # Update to the next state.
| mit | 1,793,339,145,262,793,700 | 38.328125 | 76 | 0.604688 | false |
drnextgis/QGIS | python/pyplugin_installer/qgsplugininstallerfetchingdialog.py | 10 | 3439 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
qgsplugininstallerfetchingdialog.py
Plugin Installer module
-------------------
Date : June 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QDialog, QTreeWidgetItem
from .ui_qgsplugininstallerfetchingbase import Ui_QgsPluginInstallerFetchingDialogBase
from .installer_data import repositories
class QgsPluginInstallerFetchingDialog(QDialog, Ui_QgsPluginInstallerFetchingDialogBase):
# ----------------------------------------- #
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.progressBar.setRange(0, len(repositories.allEnabled()) * 100)
self.itemProgress = {}
self.item = {}
for key in repositories.allEnabled():
self.item[key] = QTreeWidgetItem(self.treeWidget)
self.item[key].setText(0, key)
if repositories.all()[key]["state"] > 1:
self.itemProgress[key] = 100
self.displayState(key, 0)
else:
self.itemProgress[key] = 0
self.displayState(key, 2)
self.treeWidget.resizeColumnToContents(0)
repositories.repositoryFetched.connect(self.repositoryFetched)
repositories.anythingChanged.connect(self.displayState)
# ----------------------------------------- #
def displayState(self, key, state, state2=None):
messages = [self.tr("Success"), self.tr("Resolving host name..."), self.tr("Connecting..."), self.tr("Host connected. Sending request..."), self.tr("Downloading data..."), self.tr("Idle"), self.tr("Closing connection..."), self.tr("Error")]
message = messages[state]
if state2:
message += " (%s%%)" % state2
self.item[key].setText(1, message)
if state == 4 and state2:
self.itemProgress[key] = state2
totalProgress = sum(self.itemProgress.values())
self.progressBar.setValue(totalProgress)
# ----------------------------------------- #
def repositoryFetched(self, repoName):
self.itemProgress[repoName] = 100
if repositories.all()[repoName]["state"] == 2:
self.displayState(repoName, 0)
else:
self.displayState(repoName, 7)
if not repositories.fetchingInProgress():
self.close()
| gpl-2.0 | -5,484,864,912,772,397,000 | 44.25 | 248 | 0.500436 | false |
yaqiyang/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureResource/autorestresourceflatteningtestservice/auto_rest_resource_flattening_test_service.py | 3 | 16476 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from msrest.pipeline import ClientRawResponse
import uuid
from . import models
class AutoRestResourceFlatteningTestServiceConfiguration(AzureConfiguration):
"""Configuration for AutoRestResourceFlatteningTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if accept_language is not None and not isinstance(accept_language, str):
raise TypeError("Optional parameter 'accept_language' must be str.")
if not base_url:
base_url = 'http://localhost'
super(AutoRestResourceFlatteningTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestresourceflatteningtestservice/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.accept_language = accept_language
self.long_running_operation_retry_timeout = long_running_operation_retry_timeout
self.generate_client_request_id = generate_client_request_id
class AutoRestResourceFlatteningTestService(object):
"""Resource Flattening for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestResourceFlatteningTestServiceConfiguration
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
self.config = AutoRestResourceFlatteningTestServiceConfiguration(credentials, accept_language, long_running_operation_retry_timeout, generate_client_request_id, base_url, filepath)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def put_array(
self, resource_array=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as an Array.
:param resource_array: External Resource as an Array to put
:type resource_array: list of :class:`Resource
<fixtures.acceptancetestsazureresource.models.Resource>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_array is not None:
body_content = self._serialize.body(resource_array, '[Resource]')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_array(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as an Array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`FlattenedProduct
<fixtures.acceptancetestsazureresource.models.FlattenedProduct>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FlattenedProduct]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_dictionary(
self, resource_dictionary=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a Dictionary.
:param resource_dictionary: External Resource as a Dictionary to put
:type resource_dictionary: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_dictionary is not None:
body_content = self._serialize.body(resource_dictionary, '{FlattenedProduct}')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_dictionary(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a Dictionary.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{FlattenedProduct}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_resource_collection(
self, resource_complex_object=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a ResourceCollection.
:param resource_complex_object: External Resource as a
ResourceCollection to put
:type resource_complex_object: :class:`ResourceCollection
<fixtures.acceptancetestsazureresource.models.ResourceCollection>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if resource_complex_object is not None:
body_content = self._serialize.body(resource_complex_object, 'ResourceCollection')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_resource_collection(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a ResourceCollection.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceCollection
<fixtures.acceptancetestsazureresource.models.ResourceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azure/resource-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | -2,322,840,037,443,468,300 | 41.354756 | 188 | 0.664482 | false |
jonnary/keystone | keystone/tests/unit/ksfixtures/database.py | 4 | 3676 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import fixtures
from oslo_config import cfg
from oslo_db import options as db_options
from keystone.common import sql
from keystone.tests import unit as tests
CONF = cfg.CONF
def run_once(f):
"""A decorator to ensure the decorated function is only executed once.
The decorated function cannot expect any arguments.
"""
@functools.wraps(f)
def wrapper():
if not wrapper.already_ran:
f()
wrapper.already_ran = True
wrapper.already_ran = False
return wrapper
# NOTE(I159): Every execution all the options will be cleared. The method must
# be called at the every fixture initialization.
def initialize_sql_session():
# Make sure the DB is located in the correct location, in this case set
# the default value, as this should be able to be overridden in some
# test cases.
db_options.set_defaults(
CONF,
connection=tests.IN_MEM_DB_CONN_STRING)
@run_once
def _load_sqlalchemy_models():
"""Find all modules containing SQLAlchemy models and import them.
This creates more consistent, deterministic test runs because tables
for all core and extension models are always created in the test
database. We ensure this by importing all modules that contain model
definitions.
The database schema during test runs is created using reflection.
Reflection is simply SQLAlchemy taking the model definitions for
all models currently imported and making tables for each of them.
The database schema created during test runs may vary between tests
as more models are imported. Importing all models at the start of
the test run avoids this problem.
"""
keystone_root = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
for root, dirs, files in os.walk(keystone_root):
# NOTE(morganfainberg): Slice the keystone_root off the root to ensure
# we do not end up with a module name like:
# Users.home.openstack.keystone.assignment.backends.sql
root = root[len(keystone_root):]
if root.endswith('backends') and 'sql.py' in files:
# The root will be prefixed with an instance of os.sep, which will
# make the root after replacement '.<root>', the 'keystone' part
# of the module path is always added to the front
module_name = ('keystone.%s.sql' %
root.replace(os.sep, '.').lstrip('.'))
__import__(module_name)
class Database(fixtures.Fixture):
"""A fixture for setting up and tearing down a database.
"""
def __init__(self):
super(Database, self).__init__()
initialize_sql_session()
_load_sqlalchemy_models()
def setUp(self):
super(Database, self).setUp()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
def recreate(self):
sql.ModelBase.metadata.create_all(bind=self.engine)
| apache-2.0 | 8,122,989,756,085,790,000 | 34.009524 | 78 | 0.684168 | false |