text
stringlengths
213
32.3k
import re import os import sys import time import types import getopt import unittest import traceback try: # Python >=2.7 and >=3.2 from unittest.runner import _TextTestResult except ImportError: from unittest import _TextTestResult __metaclass__ = type def stderr(text): sys.stderr.write(text) sys.stderr.write("\n") class Options: """Configurable properties of the test runner.""" # test location basedir = '' # base directory for tests (defaults to # basedir of argv[0] + 'src'), must be absolute follow_symlinks = True # should symlinks to subdirectories be # followed? (hardcoded, may cause loops) # which tests to run unit_tests = False # unit tests (default if both are false) functional_tests = False # functional tests # test filtering level = 1 # run only tests at this or lower level # (if None, runs all tests) pathname_regex = '' # regexp for filtering filenames test_regex = '' # regexp for filtering test cases # actions to take list_files = False # --list-files list_tests = False # --list-tests list_hooks = False # --list-hooks run_tests = True # run tests (disabled by --list-foo) # output verbosity verbosity = 0 # verbosity level (-v) quiet = 0 # do not print anything on success (-q) warn_omitted = False # produce warnings when a test case is # not included in a test suite (-w) progress = False # show running progress (-p) coverage = False # produce coverage reports (--coverage) coverdir = 'coverage' # where to put them (currently hardcoded) immediate_errors = False # show tracebacks twice (currently hardcoded) screen_width = 80 # screen width (autodetected) def compile_matcher(regex): """Returns a function that takes one argument and returns True or False. Regex is a regular expression. Empty regex matches everything. There is one expression: if the regex starts with "!", the meaning of it is reversed. """ if not regex: return lambda x: True elif regex == '!': return lambda x: False elif regex.startswith('!'): rx = re.compile(regex[1:]) return lambda x: rx.search(x) is None else: rx = re.compile(regex) return lambda x: rx.search(x) is not None def walk_with_symlinks(top, func, arg): """Like os.path.walk, but follows symlinks on POSIX systems. If the symlinks create a loop, this function will never finish. """ try: names = os.listdir(top) except os.error: return func(arg, top, names) exceptions = ('.', '..') for name in names: if name not in exceptions: name = os.path.join(top, name) if os.path.isdir(name): walk_with_symlinks(name, func, arg) def get_test_files(cfg): """Returns a list of test module filenames.""" matcher = compile_matcher(cfg.pathname_regex) results = [] test_names = [] if cfg.unit_tests: test_names.append('tests') if cfg.functional_tests: test_names.append('ftests') baselen = len(cfg.basedir) + 1 def visit(ignored, dir, files): if os.path.basename(dir) not in test_names: for name in test_names: if name + '.py' in files: path = os.path.join(dir, name + '.py') if matcher(path[baselen:]): results.append(path) return if '__init__.py' not in files: stderr("%s is not a package" % dir) return for file in files: if file.startswith('test') and file.endswith('.py'): path = os.path.join(dir, file) if matcher(path[baselen:]): results.append(path) if cfg.follow_symlinks: walker = walk_with_symlinks else: walker = os.path.walk walker(cfg.basedir, visit, None) results.sort() return results def import_module(filename, cfg, cov=None): """Imports and returns a module.""" filename = os.path.splitext(filename)[0] modname = filename[len(cfg.basedir):].replace(os.path.sep, '.') if modname.startswith('.'): modname = modname[1:] if cov is not None: cov.start() mod = __import__(modname) if cov is not None: cov.stop() components = modname.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod def filter_testsuite(suite, matcher, level=None): """Returns a flattened list of test cases that match the given matcher.""" if not isinstance(suite, unittest.TestSuite): raise TypeError('not a TestSuite', suite) results = [] for test in suite._tests: if level is not None and getattr(test, 'level', 0) > level: continue if isinstance(test, unittest.TestCase): testname = test.id() # package.module.class.method if matcher(testname): results.append(test) else: filtered = filter_testsuite(test, matcher, level) results.extend(filtered) return results def get_all_test_cases(module): """Returns a list of all test case classes defined in a given module.""" results = [] for name in dir(module): if not name.startswith('Test'): continue item = getattr(module, name) if (isinstance(item, (type, types.ClassType)) and issubclass(item, unittest.TestCase)): results.append(item) return results def get_test_classes_from_testsuite(suite): """Returns a set of test case classes used in a test suite.""" if not isinstance(suite, unittest.TestSuite): raise TypeError('not a TestSuite', suite) results = set() for test in suite._tests: if isinstance(test, unittest.TestCase): results.add(test.__class__) else: classes = get_test_classes_from_testsuite(test) results.update(classes) return results def get_test_cases(test_files, cfg, cov=None): """Returns a list of test cases from a given list of test modules.""" matcher = compile_matcher(cfg.test_regex) results = [] for file in test_files: module = import_module(file, cfg, cov=cov) if cov is not None: cov.start() test_suite = module.test_suite() if cov is not None: cov.stop() if test_suite is None: continue if cfg.warn_omitted: all_classes = set(get_all_test_cases(module)) classes_in_suite = get_test_classes_from_testsuite(test_suite) difference = all_classes - classes_in_suite for test_class in difference: # surround the warning with blank lines, otherwise it tends # to get lost in the noise stderr("\n%s: WARNING: %s not in test suite\n" % (file, test_class.__name__)) if (cfg.level is not None and getattr(test_suite, 'level', 0) > cfg.level): continue filtered = filter_testsuite(test_suite, matcher, cfg.level) results.extend(filtered) return results def get_test_hooks(test_files, cfg, cov=None): """Returns a list of test hooks from a given list of test modules.""" results = [] dirs = set(map(os.path.dirname, test_files)) for dir in list(dirs): if os.path.basename(dir) == 'ftests': dirs.add(os.path.join(os.path.dirname(dir), 'tests')) dirs = list(dirs) dirs.sort() for dir in dirs: filename = os.path.join(dir, 'checks.py') if os.path.exists(filename): module = import_module(filename, cfg, tracer=tracer) if cov is not None: cov.start() hooks = module.test_hooks() if cov is not None: cov.stop() results.extend(hooks) return results class CustomTestResult(_TextTestResult): """Customised TestResult. It can show a progress bar, and displays tracebacks for errors and failures as soon as they happen, in addition to listing them all at the end. """ __super = _TextTestResult __super_init = __super.__init__ __super_startTest = __super.startTest __super_stopTest = __super.stopTest __super_printErrors = __super.printErrors def __init__(self, stream, descriptions, verbosity, count, cfg, hooks): self.__super_init(stream, descriptions, verbosity) self.count = count self.cfg = cfg self.hooks = hooks if cfg.progress: self.dots = False self._lastWidth = 0 self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1 def startTest(self, test): if self.cfg.progress: # verbosity == 0: 'xxxx/xxxx (xxx.x%)' # verbosity == 1: 'xxxx/xxxx (xxx.x%): test name' # verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok' n = self.testsRun + 1 self.stream.write("\r%4d" % n) if self.count: self.stream.write("/%d (%5.1f%%)" % (self.count, n * 100.0 / self.count)) if self.showAll: # self.cfg.verbosity == 1 self.stream.write(": ") elif self.cfg.verbosity: name = self.getShortDescription(test) width = len(name) if width < self._lastWidth: name += " " * (self._lastWidth - width) self.stream.write(": %s" % name) self._lastWidth = width self.stream.flush() self.__super_startTest(test) for hook in self.hooks: hook.startTest(test) def stopTest(self, test): for hook in self.hooks: hook.stopTest(test) self.__super_stopTest(test) def getShortDescription(self, test): s = self.getDescription(test) if len(s) > self._maxWidth: # s is 'testname (package.module.class)' # try to shorten it to 'testname (...age.module.class)' # if it is still too long, shorten it to 'testnam...' # limit case is 'testname (...)' pos = s.find(" (") if pos + len(" (...)") > self._maxWidth: s = s[:self._maxWidth - 3] + "..." else: s = "%s...%s" % (s[:pos + 2], s[pos + 5 - self._maxWidth:]) return s def printErrors(self): if self.cfg.progress and not (self.dots or self.showAll): self.stream.writeln() self.__super_printErrors() def formatError(self, err): return "".join(traceback.format_exception(*err)) def printTraceback(self, kind, test, err): self.stream.writeln() self.stream.writeln() self.stream.writeln("%s: %s" % (kind, test)) self.stream.writeln(self.formatError(err)) self.stream.writeln() def addFailure(self, test, err): if self.cfg.immediate_errors: self.printTraceback("FAIL", test, err) self.failures.append((test, self.formatError(err))) def addError(self, test, err): if self.cfg.immediate_errors: self.printTraceback("ERROR", test, err) self.errors.append((test, self.formatError(err))) class CustomTestRunner(unittest.TextTestRunner): """Customised TestRunner. See CustomisedTextResult for a list of extensions. """ __super = unittest.TextTestRunner __super_init = __super.__init__ __super_run = __super.run def __init__(self, cfg, hooks=None): self.__super_init(verbosity=cfg.verbosity) self.cfg = cfg if hooks is not None: self.hooks = hooks else: self.hooks = [] def run(self, test): """Run the given test case or test suite.""" self.count = test.countTestCases() result = self._makeResult() startTime = time.time() test(result) stopTime = time.time() timeTaken = float(stopTime - startTime) result.printErrors() run = result.testsRun if not self.cfg.quiet: self.stream.writeln(result.separator2) self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = list(map(len, (result.failures, result.errors))) if failed: self.stream.write("failures=%d" % failed) if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) self.stream.writeln(")") elif not self.cfg.quiet: self.stream.writeln("OK") return result def _makeResult(self): return CustomTestResult(self.stream, self.descriptions, self.verbosity, cfg=self.cfg, count=self.count, hooks=self.hooks) def main(argv): """Main program.""" # Environment if sys.version_info < (2, 7): stderr('%s: need Python 2.7 or later' % argv[0]) stderr('your python is %s' % sys.version) return 1 # Defaults cfg = Options() cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src') cfg.basedir = os.path.abspath(cfg.basedir) # Figure out terminal size try: import curses except ImportError: pass else: try: curses.setupterm() cols = curses.tigetnum('cols') if cols > 0: cfg.screen_width = cols except (curses.error, TypeError): # tigetnum() is broken in PyPy3 and raises TypeError pass # Option processing opts, args = getopt.gnu_getopt(argv[1:], 'hvpqufw', ['list-files', 'list-tests', 'list-hooks', 'level=', 'all-levels', 'coverage']) for k, v in opts: if k == '-h': print(__doc__) return 0 elif k == '-v': cfg.verbosity += 1 cfg.quiet = False elif k == '-p': cfg.progress = True cfg.quiet = False elif k == '-q': cfg.verbosity = 0 cfg.progress = False cfg.quiet = True elif k == '-u': cfg.unit_tests = True elif k == '-f': cfg.functional_tests = True elif k == '-w': cfg.warn_omitted = True elif k == '--list-files': cfg.list_files = True cfg.run_tests = False elif k == '--list-tests': cfg.list_tests = True cfg.run_tests = False elif k == '--list-hooks': cfg.list_hooks = True cfg.run_tests = False elif k == '--coverage': cfg.coverage = True elif k == '--level': try: cfg.level = int(v) except ValueError: stderr('%s: invalid level: %s' % (argv[0], v)) stderr('run %s -h for help') return 1 elif k == '--all-levels': cfg.level = None else: stderr('%s: invalid option: %s' % (argv[0], k)) stderr('run %s -h for help') return 1 if args: cfg.pathname_regex = args[0] if len(args) > 1: cfg.test_regex = args[1] if len(args) > 2: stderr('%s: too many arguments: %s' % (argv[0], args[2])) stderr('run %s -h for help') return 1 if not cfg.unit_tests and not cfg.functional_tests: cfg.unit_tests = True # Set up the python path sys.path[0] = cfg.basedir # Set up tracing before we start importing things cov = None if cfg.run_tests and cfg.coverage: from coverage import coverage cov = coverage(omit=['test.py']) # Finding and importing test_files = get_test_files(cfg) if cov is not None: cov.start() if cfg.list_tests or cfg.run_tests: test_cases = get_test_cases(test_files, cfg, cov=cov) if cfg.list_hooks or cfg.run_tests: test_hooks = get_test_hooks(test_files, cfg, cov=cov) # Configure the logging module import logging logging.basicConfig() logging.root.setLevel(logging.CRITICAL) # Running success = True if cfg.list_files: baselen = len(cfg.basedir) + 1 print("\n".join([fn[baselen:] for fn in test_files])) if cfg.list_tests: print("\n".join([test.id() for test in test_cases])) if cfg.list_hooks: print("\n".join([str(hook) for hook in test_hooks])) if cfg.run_tests: runner = CustomTestRunner(cfg, test_hooks) suite = unittest.TestSuite() suite.addTests(test_cases) if cov is not None: cov.start() run_result = runner.run(suite) if cov is not None: cov.stop() success = run_result.wasSuccessful() del run_result if cov is not None: traced_file_types = ('.py', '.pyx', '.pxi', '.pxd') modules = [] def add_file(_, path, files): if 'tests' in os.path.relpath(path, cfg.basedir).split(os.sep): return for filename in files: if filename.endswith(traced_file_types): modules.append(os.path.join(path, filename)) if cfg.follow_symlinks: walker = walk_with_symlinks else: walker = os.path.walk walker(os.path.abspath(cfg.basedir), add_file, None) try: cov.xml_report(modules, outfile='coverage.xml') if cfg.coverdir: cov.html_report(modules, directory=cfg.coverdir) finally: # test runs can take a while, so at least try to print something cov.report() # That's all if success: return 0 else: return 1 if __name__ == '__main__': exitcode = main(sys.argv) sys.exit(exitcode)
from homeassistant.components.binary_sensor import ( DOMAIN as BINARY_SENSOR_DOMAIN, BinarySensorEntity, ) from homeassistant.core import callback from homeassistant.util import slugify from . import DOMAIN as MYCHEVY_DOMAIN, UPDATE_TOPIC, EVBinarySensorConfig SENSORS = [EVBinarySensorConfig("Plugged In", "plugged_in", "plug")] async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the MyChevy sensors.""" if discovery_info is None: return sensors = [] hub = hass.data[MYCHEVY_DOMAIN] for sconfig in SENSORS: for car in hub.cars: sensors.append(EVBinarySensor(hub, sconfig, car.vid)) async_add_entities(sensors) class EVBinarySensor(BinarySensorEntity): """Base EVSensor class. The only real difference between sensors is which units and what attribute from the car object they are returning. All logic can be built with just setting subclass attributes. """ def __init__(self, connection, config, car_vid): """Initialize sensor with car connection.""" self._conn = connection self._name = config.name self._attr = config.attr self._type = config.device_class self._is_on = None self._car_vid = car_vid self.entity_id = f"{BINARY_SENSOR_DOMAIN}.{MYCHEVY_DOMAIN}_{slugify(self._car.name)}_{slugify(self._name)}" @property def name(self): """Return the name.""" return self._name @property def is_on(self): """Return if on.""" return self._is_on @property def _car(self): """Return the car.""" return self._conn.get_car(self._car_vid) async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( UPDATE_TOPIC, self.async_update_callback ) ) @callback def async_update_callback(self): """Update state.""" if self._car is not None: self._is_on = getattr(self._car, self._attr, None) self.async_write_ha_state() @property def should_poll(self): """Return the polling state.""" return False
import pathlib import re from typing import Dict import voluptuous as vol from voluptuous.humanize import humanize_error from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_validation as cv from homeassistant.util.yaml import load_yaml from .model import Integration def exists(value): """Check if value exists.""" if value is None: raise vol.Invalid("Value cannot be None") return value FIELD_SCHEMA = vol.Schema( { vol.Required("description"): str, vol.Optional("example"): exists, vol.Optional("default"): exists, vol.Optional("values"): exists, vol.Optional("required"): bool, } ) SERVICE_SCHEMA = vol.Schema( { vol.Required("description"): str, vol.Optional("fields"): vol.Schema({str: FIELD_SCHEMA}), } ) SERVICES_SCHEMA = vol.Schema({cv.slug: SERVICE_SCHEMA}) def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> bool: """Recursively go through a dir and it's children and find the regex.""" pattern = re.compile(search_pattern) for fil in path.glob(glob_pattern): if not fil.is_file(): continue if pattern.search(fil.read_text()): return True return False def validate_services(integration: Integration): """Validate services.""" # Find if integration uses services has_services = grep_dir( integration.path, "**/*.py", r"hass\.services\.(register|async_register)" ) if not has_services: return try: data = load_yaml(str(integration.path / "services.yaml")) except FileNotFoundError: integration.add_error("services", "Registers services but has no services.yaml") return except HomeAssistantError: integration.add_error( "services", "Registers services but unable to load services.yaml" ) return try: SERVICES_SCHEMA(data) except vol.Invalid as err: integration.add_error( "services", f"Invalid services.yaml: {humanize_error(data, err)}" ) def validate(integrations: Dict[str, Integration], config): """Handle dependencies for integrations.""" # check services.yaml is cool for integration in integrations.values(): if not integration.manifest: continue validate_services(integration)
import asyncio import datetime as dt import os from typing import List from httpx import RequestError import onvif from onvif import ONVIFCamera from onvif.exceptions import ONVIFError from zeep.exceptions import Fault from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.core import HomeAssistant import homeassistant.util.dt as dt_util from .const import ( ABSOLUTE_MOVE, CONTINUOUS_MOVE, GOTOPRESET_MOVE, LOGGER, PAN_FACTOR, RELATIVE_MOVE, TILT_FACTOR, ZOOM_FACTOR, ) from .event import EventManager from .models import PTZ, Capabilities, DeviceInfo, Profile, Resolution, Video class ONVIFDevice: """Manages an ONVIF device.""" def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry = None): """Initialize the device.""" self.hass: HomeAssistant = hass self.config_entry: ConfigEntry = config_entry self.available: bool = True self.device: ONVIFCamera = None self.events: EventManager = None self.info: DeviceInfo = DeviceInfo() self.capabilities: Capabilities = Capabilities() self.profiles: List[Profile] = [] self.max_resolution: int = 0 self._dt_diff_seconds: int = 0 @property def name(self) -> str: """Return the name of this device.""" return self.config_entry.data[CONF_NAME] @property def host(self) -> str: """Return the host of this device.""" return self.config_entry.data[CONF_HOST] @property def port(self) -> int: """Return the port of this device.""" return self.config_entry.data[CONF_PORT] @property def username(self) -> int: """Return the username of this device.""" return self.config_entry.data[CONF_USERNAME] @property def password(self) -> int: """Return the password of this device.""" return self.config_entry.data[CONF_PASSWORD] async def async_setup(self) -> bool: """Set up the device.""" self.device = get_device( self.hass, host=self.config_entry.data[CONF_HOST], port=self.config_entry.data[CONF_PORT], username=self.config_entry.data[CONF_USERNAME], password=self.config_entry.data[CONF_PASSWORD], ) # Get all device info try: await self.device.update_xaddrs() await self.async_check_date_and_time() # Create event manager self.events = EventManager( self.hass, self.device, self.config_entry.unique_id ) # Fetch basic device info and capabilities self.info = await self.async_get_device_info() self.capabilities = await self.async_get_capabilities() self.profiles = await self.async_get_profiles() # No camera profiles to add if not self.profiles: return False if self.capabilities.ptz: self.device.create_ptz_service() # Determine max resolution from profiles self.max_resolution = max( profile.video.resolution.width for profile in self.profiles if profile.video.encoding == "H264" ) except RequestError as err: LOGGER.warning( "Couldn't connect to camera '%s', but will retry later. Error: %s", self.name, err, ) self.available = False except Fault as err: LOGGER.error( "Couldn't connect to camera '%s', please verify " "that the credentials are correct. Error: %s", self.name, err, ) return False return True async def async_stop(self, event=None): """Shut it all down.""" if self.events: await self.events.async_stop() await self.device.close() async def async_check_date_and_time(self) -> None: """Warns if device and system date not synced.""" LOGGER.debug("Setting up the ONVIF device management service") device_mgmt = self.device.create_devicemgmt_service() LOGGER.debug("Retrieving current device date/time") try: system_date = dt_util.utcnow() device_time = await device_mgmt.GetSystemDateAndTime() if not device_time: LOGGER.debug( """Couldn't get device '%s' date/time. GetSystemDateAndTime() return null/empty""", self.name, ) return if device_time.UTCDateTime: tzone = dt_util.UTC cdate = device_time.UTCDateTime else: tzone = ( dt_util.get_time_zone(device_time.TimeZone) or dt_util.DEFAULT_TIME_ZONE ) cdate = device_time.LocalDateTime if cdate is None: LOGGER.warning("Could not retrieve date/time on this camera") else: cam_date = dt.datetime( cdate.Date.Year, cdate.Date.Month, cdate.Date.Day, cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second, 0, tzone, ) cam_date_utc = cam_date.astimezone(dt_util.UTC) LOGGER.debug( "Device date/time: %s | System date/time: %s", cam_date_utc, system_date, ) dt_diff = cam_date - system_date self._dt_diff_seconds = dt_diff.total_seconds() if self._dt_diff_seconds > 5: LOGGER.warning( "The date/time on the device (UTC) is '%s', " "which is different from the system '%s', " "this could lead to authentication issues", cam_date_utc, system_date, ) except RequestError as err: LOGGER.warning( "Couldn't get device '%s' date/time. Error: %s", self.name, err ) async def async_get_device_info(self) -> DeviceInfo: """Obtain information about this device.""" device_mgmt = self.device.create_devicemgmt_service() device_info = await device_mgmt.GetDeviceInformation() # Grab the last MAC address for backwards compatibility mac = None try: network_interfaces = await device_mgmt.GetNetworkInterfaces() for interface in network_interfaces: if interface.Enabled: mac = interface.Info.HwAddress except Fault as fault: if "not implemented" not in fault.message: raise fault LOGGER.debug( "Couldn't get network interfaces from ONVIF device '%s'. Error: %s", self.name, fault, ) return DeviceInfo( device_info.Manufacturer, device_info.Model, device_info.FirmwareVersion, device_info.SerialNumber, mac, ) async def async_get_capabilities(self): """Obtain information about the available services on the device.""" snapshot = False try: media_service = self.device.create_media_service() media_capabilities = await media_service.GetServiceCapabilities() snapshot = media_capabilities and media_capabilities.SnapshotUri except (ONVIFError, Fault, RequestError): pass pullpoint = False try: pullpoint = await self.events.async_start() except (ONVIFError, Fault): pass ptz = False try: self.device.get_definition("ptz") ptz = True except ONVIFError: pass return Capabilities(snapshot, pullpoint, ptz) async def async_get_profiles(self) -> List[Profile]: """Obtain media profiles for this device.""" media_service = self.device.create_media_service() result = await media_service.GetProfiles() profiles = [] if not isinstance(result, list): return profiles for key, onvif_profile in enumerate(result): # Only add H264 profiles if ( not onvif_profile.VideoEncoderConfiguration or onvif_profile.VideoEncoderConfiguration.Encoding != "H264" ): continue profile = Profile( key, onvif_profile.token, onvif_profile.Name, Video( onvif_profile.VideoEncoderConfiguration.Encoding, Resolution( onvif_profile.VideoEncoderConfiguration.Resolution.Width, onvif_profile.VideoEncoderConfiguration.Resolution.Height, ), ), ) # Configure PTZ options if self.capabilities.ptz and onvif_profile.PTZConfiguration: profile.ptz = PTZ( onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace is not None, onvif_profile.PTZConfiguration.DefaultRelativePanTiltTranslationSpace is not None, onvif_profile.PTZConfiguration.DefaultAbsolutePantTiltPositionSpace is not None, ) try: ptz_service = self.device.create_ptz_service() presets = await ptz_service.GetPresets(profile.token) profile.ptz.presets = [preset.token for preset in presets if preset] except (Fault, RequestError): # It's OK if Presets aren't supported profile.ptz.presets = [] profiles.append(profile) return profiles async def async_get_stream_uri(self, profile: Profile) -> str: """Get the stream URI for a specified profile.""" media_service = self.device.create_media_service() req = media_service.create_type("GetStreamUri") req.ProfileToken = profile.token req.StreamSetup = { "Stream": "RTP-Unicast", "Transport": {"Protocol": "RTSP"}, } result = await media_service.GetStreamUri(req) return result.Uri async def async_perform_ptz( self, profile: Profile, distance, speed, move_mode, continuous_duration, preset, pan=None, tilt=None, zoom=None, ): """Perform a PTZ action on the camera.""" if not self.capabilities.ptz: LOGGER.warning("PTZ actions are not supported on device '%s'", self.name) return ptz_service = self.device.create_ptz_service() pan_val = distance * PAN_FACTOR.get(pan, 0) tilt_val = distance * TILT_FACTOR.get(tilt, 0) zoom_val = distance * ZOOM_FACTOR.get(zoom, 0) speed_val = speed preset_val = preset LOGGER.debug( "Calling %s PTZ | Pan = %4.2f | Tilt = %4.2f | Zoom = %4.2f | Speed = %4.2f | Preset = %s", move_mode, pan_val, tilt_val, zoom_val, speed_val, preset_val, ) try: req = ptz_service.create_type(move_mode) req.ProfileToken = profile.token if move_mode == CONTINUOUS_MOVE: # Guard against unsupported operation if not profile.ptz.continuous: LOGGER.warning( "ContinuousMove not supported on device '%s'", self.name ) return req.Velocity = { "PanTilt": {"x": pan_val, "y": tilt_val}, "Zoom": {"x": zoom_val}, } await ptz_service.ContinuousMove(req) await asyncio.sleep(continuous_duration) req = ptz_service.create_type("Stop") req.ProfileToken = profile.token await ptz_service.Stop( {"ProfileToken": req.ProfileToken, "PanTilt": True, "Zoom": False} ) elif move_mode == RELATIVE_MOVE: # Guard against unsupported operation if not profile.ptz.relative: LOGGER.warning( "RelativeMove not supported on device '%s'", self.name ) return req.Translation = { "PanTilt": {"x": pan_val, "y": tilt_val}, "Zoom": {"x": zoom_val}, } req.Speed = { "PanTilt": {"x": speed_val, "y": speed_val}, "Zoom": {"x": speed_val}, } await ptz_service.RelativeMove(req) elif move_mode == ABSOLUTE_MOVE: # Guard against unsupported operation if not profile.ptz.absolute: LOGGER.warning( "AbsoluteMove not supported on device '%s'", self.name ) return req.Position = { "PanTilt": {"x": pan_val, "y": tilt_val}, "Zoom": {"x": zoom_val}, } req.Speed = { "PanTilt": {"x": speed_val, "y": speed_val}, "Zoom": {"x": speed_val}, } await ptz_service.AbsoluteMove(req) elif move_mode == GOTOPRESET_MOVE: # Guard against unsupported operation if preset_val not in profile.ptz.presets: LOGGER.warning( "PTZ preset '%s' does not exist on device '%s'. Available Presets: %s", preset_val, self.name, ", ".join(profile.ptz.presets), ) return req.PresetToken = preset_val req.Speed = { "PanTilt": {"x": speed_val, "y": speed_val}, "Zoom": {"x": speed_val}, } await ptz_service.GotoPreset(req) except ONVIFError as err: if "Bad Request" in err.reason: LOGGER.warning("Device '%s' doesn't support PTZ.", self.name) else: LOGGER.error("Error trying to perform PTZ action: %s", err) def get_device(hass, host, port, username, password) -> ONVIFCamera: """Get ONVIFCamera instance.""" return ONVIFCamera( host, port, username, password, f"{os.path.dirname(onvif.__file__)}/wsdl/", no_cache=True, )
import sys import mne def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True, ecg_proj_fname=None, eog_proj_fname=None, ecg_event_fname=None, eog_event_fname=None, in_path='.', quiet=False): """Clean ECG from raw fif file. Parameters ---------- in_fif_fname : str Raw fif File eog_event_fname : str name of EOG event file required. eog : bool Reject or not EOG artifacts. ecg : bool Reject or not ECG artifacts. ecg_event_fname : str name of ECG event file required. in_path : str Path where all the files are. """ if not eog and not ecg: raise Exception("EOG and ECG cannot be both disabled") # Reading fif File raw_in = mne.io.read_raw_fif(in_fif_fname) if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'): prefix = in_fif_fname[:-8] else: prefix = in_fif_fname[:-4] if out_fif_fname is None: out_fif_fname = prefix + '_clean_ecg_eog_raw.fif' if ecg_proj_fname is None: ecg_proj_fname = prefix + '_ecg-proj.fif' if eog_proj_fname is None: eog_proj_fname = prefix + '_eog-proj.fif' if ecg_event_fname is None: ecg_event_fname = prefix + '_ecg-eve.fif' if eog_event_fname is None: eog_event_fname = prefix + '_eog-eve.fif' print('Implementing ECG and EOG artifact rejection on data') kwargs = dict() if quiet else dict(stdout=None, stderr=None) if ecg: ecg_events, _, _ = mne.preprocessing.find_ecg_events( raw_in, reject_by_annotation=True) print("Writing ECG events in %s" % ecg_event_fname) mne.write_events(ecg_event_fname, ecg_events) print('Computing ECG projector') command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, '--events', ecg_event_fname, '--makeproj', '--projtmin', '-0.08', '--projtmax', '0.08', '--saveprojtag', '_ecg-proj', '--projnmag', '2', '--projngrad', '1', '--projevent', '999', '--highpass', '5', '--lowpass', '35', '--projmagrej', '4000', '--projgradrej', '3000') mne.utils.run_subprocess(command, **kwargs) if eog: eog_events = mne.preprocessing.find_eog_events(raw_in) print("Writing EOG events in %s" % eog_event_fname) mne.write_events(eog_event_fname, eog_events) print('Computing EOG projector') command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, '--events', eog_event_fname, '--makeproj', '--projtmin', '-0.15', '--projtmax', '0.15', '--saveprojtag', '_eog-proj', '--projnmag', '2', '--projngrad', '2', '--projevent', '998', '--lowpass', '35', '--projmagrej', '4000', '--projgradrej', '3000') mne.utils.run_subprocess(command, **kwargs) if out_fif_fname is not None: # Applying the ECG EOG projector print('Applying ECG EOG projector') command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname, '--proj', in_fif_fname, '--projoff', '--save', out_fif_fname, '--filteroff', '--proj', ecg_proj_fname, '--proj', eog_proj_fname) mne.utils.run_subprocess(command, **kwargs) print('Done removing artifacts.') print("Cleaned raw data saved in: %s" % out_fif_fname) print('IMPORTANT : Please eye-ball the data !!') else: print('Projection not applied to raw data.') def run(): """Run command.""" from mne.commands.utils import get_optparser parser = get_optparser(__file__) parser.add_option("-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE") parser.add_option("-o", "--out", dest="raw_out", help="Output raw FIF file", metavar="FILE", default=None) parser.add_option("-e", "--no-eog", dest="eog", action="store_false", help="Remove EOG", default=True) parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false", help="Remove ECG", default=True) parser.add_option("-q", "--quiet", dest="quiet", action="store_true", help="Suppress mne_process_raw output", default=False) options, args = parser.parse_args() if options.raw_in is None: parser.print_help() sys.exit(1) raw_in = options.raw_in raw_out = options.raw_out eog = options.eog ecg = options.ecg quiet = options.quiet clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet) mne.utils.run_command_if_main()
from datetime import timedelta import logging import aiodns from aiodns.error import DNSError import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_HOSTNAME = "hostname" CONF_IPV6 = "ipv6" CONF_RESOLVER = "resolver" CONF_RESOLVER_IPV6 = "resolver_ipv6" DEFAULT_HOSTNAME = "myip.opendns.com" DEFAULT_IPV6 = False DEFAULT_NAME = "myip" DEFAULT_RESOLVER = "208.67.222.222" DEFAULT_RESOLVER_IPV6 = "2620:0:ccc::2" SCAN_INTERVAL = timedelta(seconds=120) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_HOSTNAME, default=DEFAULT_HOSTNAME): cv.string, vol.Optional(CONF_RESOLVER, default=DEFAULT_RESOLVER): cv.string, vol.Optional(CONF_RESOLVER_IPV6, default=DEFAULT_RESOLVER_IPV6): cv.string, vol.Optional(CONF_IPV6, default=DEFAULT_IPV6): cv.boolean, } ) async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the DNS IP sensor.""" hostname = config[CONF_HOSTNAME] name = config.get(CONF_NAME) if not name: if hostname == DEFAULT_HOSTNAME: name = DEFAULT_NAME else: name = hostname ipv6 = config[CONF_IPV6] if ipv6: resolver = config[CONF_RESOLVER_IPV6] else: resolver = config[CONF_RESOLVER] async_add_devices([WanIpSensor(hass, name, hostname, resolver, ipv6)], True) class WanIpSensor(Entity): """Implementation of a DNS IP sensor.""" def __init__(self, hass, name, hostname, resolver, ipv6): """Initialize the DNS IP sensor.""" self.hass = hass self._name = name self.hostname = hostname self.resolver = aiodns.DNSResolver() self.resolver.nameservers = [resolver] self.querytype = "AAAA" if ipv6 else "A" self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the current DNS IP address for hostname.""" return self._state async def async_update(self): """Get the current DNS IP address for hostname.""" try: response = await self.resolver.query(self.hostname, self.querytype) except DNSError as err: _LOGGER.warning("Exception while resolving host: %s", err) response = None if response: self._state = response[0].host else: self._state = None
import StringIO import sys import time from nose import tools from docker_registry.core import exceptions import docker_registry.testing as testing from docker_registry.testing import mock_boto # noqa from . import mock_s3 # noqa class StringIOWithError(StringIO.StringIO): '''Throw IOError after reaching EOF.''' def read(self, size): if self.pos == self.len: raise IOError('Reading beyond EOF') return StringIO.StringIO.read(self, size) class TestDriver(testing.Driver): '''Extra tests for coverage completion.''' def __init__(self): self.scheme = 's3' self.path = '' self.config = testing.Config({}) def tearDown(self): self._storage._boto_bucket.delete() super(TestDriver, self).tearDown() @tools.raises(exceptions.FileNotFoundError) def test_list_bucket(self): # Add a couple of bucket keys filename1 = self.gen_random_string() filename2 = self.gen_random_string() content = self.gen_random_string() self._storage.put_content(filename1, content) # Check bucket key is stored in normalized form self._storage.put_content(filename2 + '/', content) # Check both keys are in the bucket assert sorted([filename1, filename2]) == sorted( list(self._storage.list_directory())) # Check listing bucket raises exception after removing keys self._storage.remove(filename1) self._storage.remove(filename2) s = self._storage.list_directory() s.next() def test_stream_write(self): # Check stream write with buffer bigger than default 5MB self._storage.buffer_size = 7 * 1024 * 1024 filename = self.gen_random_string() # Test 8MB content = self.gen_random_string(8 * 1024 * 1024) io = StringIOWithError(content) assert not self._storage.exists(filename) try: self._storage.stream_write(filename, io) except IOError: pass assert self._storage.exists(filename) # Test that EOFed io string throws IOError on lib/storage/s3 try: self._storage.stream_write(filename, io) except IOError: pass # Cleanup io.close() self._storage.remove(filename) self._storage.buffer_size = 5 * 1024 * 1024 assert not self._storage.exists(filename) def test_init_path(self): # s3 storage _init_path result keys are relative (no / at start) root_path = self._storage._root_path if root_path.startswith('/'): self._storage._root_path = root_path[1:] assert not self._storage._init_path().startswith('/') self._storage._root_path = root_path def test_debug_key(self): # Create a valid s3 key object to debug filename = self.gen_random_string() content = self.gen_random_string() self._storage.put_content(filename, content) # Get filename key path as stored key_path = self._storage._init_path(filename) key = self._storage._boto_bucket.lookup(key_path) self._storage._debug_key(key) # Capture debugged output saved_stdout = sys.stdout output = StringIO.StringIO() sys.stdout = output # As key is mocked for unittest purposes, we call make_request directly dummy = "################\n('d', 1)\n{'v': 2}\n################\n" # '{}\n{}\n{}\n{}\n'.format( # '#' * 16, ('d', 1), {'v': 2}, '#' * 16) result = self._storage._boto_bucket.connection.make_request( 'd', 1, v=2) assert output.getvalue() == dummy assert result == 'request result' sys.stdout = saved_stdout # We don't call self._storage.remove(filename) here to ensure tearDown # cleanup properly and that other tests keep running as expected. # Validation test for docker-index#486 def test_get_tags(self): store = self._storage store._root_path = 'my/custom/path' store._init_path() assert store._root_path == 'my/custom/path' tag_path = store.tag_path('test', 'test', '0.0.2') store.put_content(tag_path, 'randomdata') tags_path = store.tag_path('test', 'test') for fname in store.list_directory(tags_path): full_tag_name = fname.split('/').pop() if not full_tag_name == 'tag_0.0.2': continue try: store.get_content(fname) except exceptions.FileNotFoundError: pass except Exception as e: raise e else: assert False tag_content = store.get_content(tag_path) assert tag_content == 'randomdata' def test_consistency_latency(self): self.testCount = -1 mockKey = mock_boto.Key() def mockExists(): self.testCount += 1 return self.testCount == 1 mockKey.exists = mockExists mockKey.get_contents_as_string = lambda: "Foo bar" self._storage.makeKey = lambda x: mockKey startTime = time.time() content = self._storage.get_content("/FOO") waitTime = time.time() - startTime assert waitTime >= 0.1, ("Waiting time was less than %sms " "(actual : %sms)" % (0.1 * 1000, waitTime * 1000)) assert content == "Foo bar", ("expected : %s; actual: %s" % ("Foo bar", content)) @tools.raises(exceptions.FileNotFoundError) def test_too_many_read_retries(self): self.testCount = -1 mockKey = mock_boto.Key() def mockExists(): self.testCount += 1 return self.testCount == 5 mockKey.exists = mockExists mockKey.get_contents_as_string = lambda: "Foo bar" self._storage.makeKey = lambda x: mockKey self._storage.get_content("/FOO")
import unittest from absl import flags import mock from perfkitbenchmarker import benchmark_sets from perfkitbenchmarker import configs from perfkitbenchmarker import linux_benchmarks # This import to ensure required FLAGS are defined. from perfkitbenchmarker import pkb # NOQA import six import yaml FLAGS = flags.FLAGS FLAGS.mark_as_parsed() USER_CONFIG = """ internal_iprf: name: iperf flags: ip_addresses: INTERNAL """ MATRIX_CONFIG = """ netperf: flag_matrix: GCP flag_matrix_defs: GCP: machine_type: [n1-standard-1, n1-standard-4] zones: [us-central1-a, us-central1-b] """ EXPECTED_MATRIX_FLAGS = [ {'machine_type': 'n1-standard-1', 'zones': 'us-central1-a'}, {'machine_type': 'n1-standard-1', 'zones': 'us-central1-b'}, {'machine_type': 'n1-standard-4', 'zones': 'us-central1-a'}, {'machine_type': 'n1-standard-4', 'zones': 'us-central1-b'} ] ZIP_CONFIG_DIFFERENT_AXES_LENGTH = """ netperf: flags: gpu_type: k80 flag_zip: GCP flag_zip_defs: GCP: machine_type: [n1-standard-4, n1-standard-8] gpu_count: [1, 2, 3] """ ZIP_CONFIG = """ netperf: flags: gpu_type: k80 flag_zip: GCP flag_zip_defs: GCP: machine_type: [n1-standard-4, n1-standard-8] gpu_count: [1, 2] """ EXPECTED_ZIP_FLAGS = [ {'machine_type': 'n1-standard-4', 'gpu_count': 1, 'gpu_type': 'k80'}, {'machine_type': 'n1-standard-8', 'gpu_count': 2, 'gpu_type': 'k80'} ] SINGLE_ZIP_CONFIG = """ netperf: flags: gpu_type: k80 flag_zip: GCP flag_zip_defs: GCP: machine_type: [n1-standard-4, n1-standard-8] """ EXPECTED_SINGLE_ZIP_FLAGS = [ {'machine_type': 'n1-standard-4', 'gpu_type': 'k80'}, {'machine_type': 'n1-standard-8', 'gpu_type': 'k80'} ] ZIP_AND_MATRIX_CONFIG = """ netperf: flags: gpu_type: k80 flag_zip: GCP flag_matrix: GCP flag_zip_defs: GCP: machine_type: [n1-standard-4, n1-standard-8] gpu_count: [1, 2] flag_matrix_defs: GCP: zones: [us-central1-a, us-central1-b] """ EXPECTED_ZIP_AND_MATRIX_FLAGS = [ {'zones': 'us-central1-a', 'gpu_type': 'k80', 'machine_type': 'n1-standard-4', 'gpu_count': 1}, {'zones': 'us-central1-b', 'gpu_type': 'k80', 'machine_type': 'n1-standard-4', 'gpu_count': 1}, {'zones': 'us-central1-b', 'gpu_type': 'k80', 'machine_type': 'n1-standard-8', 'gpu_count': 2}, {'zones': 'us-central1-a', 'gpu_type': 'k80', 'machine_type': 'n1-standard-8', 'gpu_count': 2} ] FILTER_CONFIG = """ netperf: flag_matrix: GCP flag_matrix_filters: GCP: "machine_type == 'n1-standard-1' and zones == 'us-central1-a'" flag_matrix_defs: GCP: machine_type: [n1-standard-1, n1-standard-4] zones: [us-central1-a, us-central1-b] """ FLAG_PRECEDENCE_CONFIG = """ flags: netperf_benchmarks: TCP_RR netperf_test_length: 30 netperf_max_iter: 3 netperf: flags: netperf_benchmarks: UDP_RR netperf_test_length: 40 flag_matrix: test_matrix flag_matrix_defs: test_matrix: netperf_benchmarks: [TCP_STREAM] """ class BenchmarkSetsTestCase(unittest.TestCase): def setUp(self): # create set of valid benchmark names from the benchmark directory self.valid_benchmark_names = set() for benchmark_module in linux_benchmarks.BENCHMARKS: self.valid_benchmark_names.add(benchmark_module.BENCHMARK_NAME) self.valid_benchmark_set_names = set() # include the benchmark_set names since these can also appear # as a valid name. At runtime they get expanded. for benchmark_set_name in benchmark_sets.BENCHMARK_SETS: self.valid_benchmark_set_names.add(benchmark_set_name) # Mock flags to simulate setting --benchmarks. p = mock.patch(benchmark_sets.__name__ + '.FLAGS') self.mock_flags = p.start() self.addCleanup(p.stop) self.addCleanup(configs.GetConfigFlags.cache_clear) self.mock_flags.flag_matrix = None self.mock_flags.flag_zip = None self.mock_flags.num_benchmark_copies = 1 def testStandardSet(self): self.assertIn(benchmark_sets.STANDARD_SET, benchmark_sets.BENCHMARK_SETS) standard_set = (benchmark_sets.BENCHMARK_SETS[ benchmark_sets.STANDARD_SET])[benchmark_sets.BENCHMARK_LIST] self.assertIn('iperf', standard_set) self.assertIn('fio', standard_set) def testBenchmarkSetsHaveValidNames(self): # check all the benchmark sets to make sure they contain valid names valid_benchmark_and_set_names = (self.valid_benchmark_names | self.valid_benchmark_set_names) benchmark_set_items = list(benchmark_sets.BENCHMARK_SETS.items()) for _, key_value in benchmark_set_items: benchmark_def_list = key_value[benchmark_sets.BENCHMARK_LIST] for benchmark_name in benchmark_def_list: self.assertIn(benchmark_name, valid_benchmark_and_set_names) def testBenchmarkDerivedSets(self): # make sure that sets which are derived from the standard_set # expands into a valid set of benchmarks with mock.patch.dict( benchmark_sets.BENCHMARK_SETS, { 'test_derived_set': { benchmark_sets.MESSAGE: 'test derived benchmark set.', benchmark_sets.BENCHMARK_LIST: [benchmark_sets.STANDARD_SET] } }): self.mock_flags.benchmarks = ['test_derived_set'] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertIsNotNone(benchmark_tuple_list) self.assertGreater(len(benchmark_tuple_list), 0) for benchmark_tuple in benchmark_tuple_list: self.assertIn(benchmark_tuple[0].BENCHMARK_NAME, self.valid_benchmark_names) def testBenchmarkNestedDerivedSets(self): # make sure that sets which are derived from the standard_set # expands into a valid set of benchmarks self.mock_flags.benchmarks = [benchmark_sets.STANDARD_SET] standard_module_list = benchmark_sets.GetBenchmarksFromFlags() with mock.patch.dict( benchmark_sets.BENCHMARK_SETS, { 'test_derived_set': { benchmark_sets.MESSAGE: 'test derived benchmark set.', benchmark_sets.BENCHMARK_LIST: [benchmark_sets.STANDARD_SET] }, 'test_nested_derived_set': { benchmark_sets.MESSAGE: 'test nested derived benchmark set.', benchmark_sets.BENCHMARK_LIST: ['test_derived_set'] } }): # TODO(voellm): better check would be to make sure both lists are the same benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertIsNotNone(benchmark_tuple_list) self.assertIsNotNone(standard_module_list) self.assertEqual(len(benchmark_tuple_list), len(standard_module_list)) for benchmark_tuple in benchmark_tuple_list: self.assertIn(benchmark_tuple[0].BENCHMARK_NAME, self.valid_benchmark_names) def testBenchmarkValidCommandLine1(self): # make sure the standard_set expands to a valid set of benchmarks self.mock_flags.benchmarks = ['standard_set'] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertIsNotNone(benchmark_tuple_list) self.assertGreater(len(benchmark_tuple_list), 0) for benchmark_tuple in benchmark_tuple_list: self.assertIn(benchmark_tuple[0].BENCHMARK_NAME, self.valid_benchmark_names) @staticmethod def _ContainsModule(module_name, module_list): for module_tuple in module_list: if module_tuple[0].BENCHMARK_NAME == module_name: return True return False def testBenchmarkValidCommandLine2(self): # make sure the standard_set plus a listed benchmark expands # to a valid set of benchmarks self.mock_flags.benchmarks = ['standard_set', 'bonnieplusplus'] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertIsNotNone(benchmark_tuple_list) self.assertGreater(len(benchmark_tuple_list), 0) for benchmark_tuple in benchmark_tuple_list: self.assertIn(benchmark_tuple[0].BENCHMARK_NAME, self.valid_benchmark_names) # make sure bonnieplusplus is a listed benchmark self.assertTrue(self._ContainsModule('bonnieplusplus', benchmark_tuple_list)) def testBenchmarkValidCommandLine3(self): # make sure the command with two benchmarks is processed correctly self.mock_flags.benchmarks = ['iperf', 'fio'] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertIsNotNone(benchmark_tuple_list) self.assertEqual(len(benchmark_tuple_list), 2) for benchmark_tuple in benchmark_tuple_list: self.assertIn(benchmark_tuple[0].BENCHMARK_NAME, self.valid_benchmark_names) # make sure listed benchmarks are present self.assertTrue(self._ContainsModule('iperf', benchmark_tuple_list)) self.assertTrue(self._ContainsModule('fio', benchmark_tuple_list)) def testBenchmarkInvalidCommandLine1(self): # make sure invalid benchmark names and sets cause a failure self.mock_flags.benchmarks = ['standard_set_invalid_name'] self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags) def testBenchmarkInvalidCommandLine2(self): # make sure invalid benchmark names and sets cause a failure self.mock_flags.benchmarks = ['standard_set', 'iperf_invalid_name'] self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags) def testConfigNames(self): self.mock_flags.benchmarks = ['internal_iprf', 'netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(USER_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertTrue(self._ContainsModule('iperf', benchmark_tuple_list)) self.assertTrue(self._ContainsModule('netperf', benchmark_tuple_list)) def testMatrices(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(MATRIX_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 4) flag_list = [benchmark_tuple[1]['flags'] for benchmark_tuple in benchmark_tuple_list] six.assertCountEqual(self, flag_list, EXPECTED_MATRIX_FLAGS) def testZipWithDifferentAxesLengths(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(ZIP_CONFIG_DIFFERENT_AXES_LENGTH)): self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags) def testZip(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(ZIP_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 2) flag_list = [benchmark_tuple[1]['flags'] for benchmark_tuple in benchmark_tuple_list] six.assertCountEqual(self, flag_list, EXPECTED_ZIP_FLAGS) def testZipSingleAxis(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(SINGLE_ZIP_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 2) flag_list = [benchmark_tuple[1]['flags'] for benchmark_tuple in benchmark_tuple_list] six.assertCountEqual(self, flag_list, EXPECTED_SINGLE_ZIP_FLAGS) def testZipAndMatrix(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(ZIP_AND_MATRIX_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 4) flag_list = [benchmark_tuple[1]['flags'] for benchmark_tuple in benchmark_tuple_list] six.assertCountEqual(self, flag_list, EXPECTED_ZIP_AND_MATRIX_FLAGS) def testFilters(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(FILTER_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 1) self.assertEqual(benchmark_tuple_list[0][1]['flags'], {'zones': 'us-central1-a', 'machine_type': 'n1-standard-1'}) def testFlagPrecedence(self): self.mock_flags.benchmarks = ['netperf'] with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(FLAG_PRECEDENCE_CONFIG)): benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() self.assertEqual(len(benchmark_tuple_list), 1) self.assertEqual(benchmark_tuple_list[0][1]['flags'], {'netperf_benchmarks': 'TCP_STREAM', 'netperf_test_length': 40, 'netperf_max_iter': 3}) def testFlagMatrixNotFound(self): self.mock_flags.benchmarks = ['netperf'] self.mock_flags.flag_matrix = 'bad_flag_matrix_name' with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(USER_CONFIG)): with self.assertRaises(benchmark_sets.FlagMatrixNotFoundException): benchmark_sets.GetBenchmarksFromFlags() def testFlagZipNotFound(self): self.mock_flags.benchmarks = ['netperf'] self.mock_flags.flag_zip = 'bad_flag_zip_name' with mock.patch( 'perfkitbenchmarker.configs.GetUserConfig', return_value=yaml.safe_load(USER_CONFIG)): with self.assertRaises(benchmark_sets.FlagZipNotFoundException): benchmark_sets.GetBenchmarksFromFlags() if __name__ == '__main__': unittest.main()
from absl import flags from perfkitbenchmarker.linux_packages import nvidia_driver FLAGS = flags.FLAGS flags.DEFINE_string('torch_version', '1.7.1', 'The torch version.') flags.DEFINE_string('torchvision_version', '0.8.2', 'The torchvision version.') flags.DEFINE_string('torchaudio_version', '0.7.2', 'The torchaudio version.') flags.DEFINE_string('torch_env', 'PATH=/opt/conda/bin:$PATH', 'The torch install environment.') _PYTORCH_WHL = 'https://download.pytorch.org/whl/torch_stable.html' def Install(vm): """Installs PyTorch on the VM.""" vm.InstallPackages('python3-pip') toolkit = 'cpu' if nvidia_driver.CheckNvidiaGpuExists(vm): # Translates --cuda_toolkit_version=10.2 to "cu102" for the toolkit to # install toolkit = f'cu{"".join(FLAGS.cuda_toolkit_version.split("."))}' vm.RemoteCommand( f'{FLAGS.torch_env} python3 -m pip install ' f'torch=={FLAGS.torch_version}+{toolkit} ' f'torchvision=={FLAGS.torchvision_version}+{toolkit} ' f'torchaudio=={FLAGS.torchaudio_version} ' f'-f {_PYTORCH_WHL}') def Uninstall(vm): """Uninstalls TensorFlow on the VM.""" vm.RemoteCommand(f'{FLAGS.torch_env} pip uninstall ' 'torch torchvision torchaudio')
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') # EC2 provides unique random hostnames. def test_hostname(host): pass def test_etc_molecule_directory(host): f = host.file('/etc/molecule') assert f.is_directory assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o755 def test_etc_molecule_ansible_hostname_file(host): filename = '/etc/molecule/{}'.format(host.check_output('hostname -s')) f = host.file(filename) assert f.is_file assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o644
import re import os.path import functools from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QObject from PyQt5.QtWebEngineWidgets import QWebEngineDownloadItem from qutebrowser.browser import downloads, pdfjs from qutebrowser.utils import debug, usertypes, message, log, objreg class DownloadItem(downloads.AbstractDownloadItem): """A wrapper over a QWebEngineDownloadItem. Attributes: _qt_item: The wrapped item. """ def __init__(self, qt_item: QWebEngineDownloadItem, manager: downloads.AbstractDownloadManager, parent: QObject = None) -> None: super().__init__(manager=manager, parent=manager) self._qt_item = qt_item qt_item.downloadProgress.connect( # type: ignore[attr-defined] self.stats.on_download_progress) qt_item.stateChanged.connect( # type: ignore[attr-defined] self._on_state_changed) # Ensure wrapped qt_item is deleted manually when the wrapper object # is deleted. See https://github.com/qutebrowser/qutebrowser/issues/3373 self.destroyed.connect(self._qt_item.deleteLater) def _is_page_download(self): """Check if this item is a page (i.e. mhtml) download.""" return (self._qt_item.savePageFormat() != QWebEngineDownloadItem.UnknownSaveFormat) @pyqtSlot(QWebEngineDownloadItem.DownloadState) def _on_state_changed(self, state): state_name = debug.qenum_key(QWebEngineDownloadItem, state) log.downloads.debug("State for {!r} changed to {}".format( self, state_name)) if state == QWebEngineDownloadItem.DownloadRequested: pass elif state == QWebEngineDownloadItem.DownloadInProgress: pass elif state == QWebEngineDownloadItem.DownloadCompleted: log.downloads.debug("Download {} finished".format(self.basename)) if self._is_page_download(): # Same logging as QtWebKit mhtml downloads. log.downloads.debug("File successfully written.") self.successful = True self.done = True self.finished.emit() self.stats.finish() elif state == QWebEngineDownloadItem.DownloadCancelled: self.successful = False self.done = True self.cancelled.emit() self.stats.finish() elif state == QWebEngineDownloadItem.DownloadInterrupted: self.successful = False reason = self._qt_item.interruptReasonString() self._die(reason) else: raise ValueError("_on_state_changed was called with unknown state " "{}".format(state_name)) def _do_die(self): progress_signal = self._qt_item.downloadProgress progress_signal.disconnect() # type: ignore[attr-defined] if self._qt_item.state() != QWebEngineDownloadItem.DownloadInterrupted: self._qt_item.cancel() def _do_cancel(self): state = self._qt_item.state() state_name = debug.qenum_key(QWebEngineDownloadItem, state) assert state not in [QWebEngineDownloadItem.DownloadCompleted, QWebEngineDownloadItem.DownloadCancelled], state_name self._qt_item.cancel() def retry(self): state = self._qt_item.state() if state != QWebEngineDownloadItem.DownloadInterrupted: log.downloads.warning( "Refusing to retry download in state {}".format( debug.qenum_key(QWebEngineDownloadItem, state))) return self._qt_item.resume() def _get_open_filename(self): return self._filename def url(self) -> QUrl: return self._qt_item.url() def _set_fileobj(self, fileobj, *, autoclose=True): raise downloads.UnsupportedOperationError def _set_tempfile(self, fileobj): fileobj.close() self._set_filename(fileobj.name, force_overwrite=True, remember_directory=False) def _ensure_can_set_filename(self, filename): state = self._qt_item.state() if state != QWebEngineDownloadItem.DownloadRequested: state_name = debug.qenum_key(QWebEngineDownloadItem, state) raise ValueError("Trying to set filename {} on {!r} which is " "state {} (not in requested state)!".format( filename, self, state_name)) def _ask_confirm_question(self, title, msg, *, custom_yes_action=None): yes_action = custom_yes_action or self._after_set_filename no_action = functools.partial(self.cancel, remove_data=False) question = usertypes.Question() question.title = title question.text = msg question.url = 'file://{}'.format(self._filename) question.mode = usertypes.PromptMode.yesno question.answered_yes.connect(yes_action) question.answered_no.connect(no_action) question.cancelled.connect(no_action) self.cancelled.connect(question.abort) self.error.connect(question.abort) message.global_bridge.ask(question, blocking=True) def _ask_create_parent_question(self, title, msg, force_overwrite, remember_directory): assert self._filename is not None no_action = functools.partial(self.cancel, remove_data=False) question = usertypes.Question() question.title = title question.text = msg question.url = 'file://{}'.format(os.path.dirname(self._filename)) question.mode = usertypes.PromptMode.yesno question.answered_yes.connect(lambda: self._after_create_parent_question( force_overwrite, remember_directory)) question.answered_no.connect(no_action) question.cancelled.connect(no_action) self.cancelled.connect(question.abort) self.error.connect(question.abort) message.global_bridge.ask(question, blocking=True) def _after_set_filename(self): assert self._filename is not None dirname, basename = os.path.split(self._filename) try: # Qt 5.14 self._qt_item.setDownloadDirectory(dirname) self._qt_item.setDownloadFileName(basename) except AttributeError: self._qt_item.setPath(self._filename) self._qt_item.accept() def _get_conflicting_download(self): """Return another potential active download with the same name. webenginedownloads.DownloadItem needs to look for downloads both in its manager and in qtnetwork-download-manager as both are used simultaneously. This method can be safely removed once #2328 is fixed. """ conflicting_download = super()._get_conflicting_download() if conflicting_download: return conflicting_download qtnetwork_download_manager = objreg.get( 'qtnetwork-download-manager') for download in qtnetwork_download_manager.downloads: if self._conflicts_with(download): return download return None def _get_suggested_filename(path): """Convert a path we got from chromium to a suggested filename. Chromium thinks we want to download stuff to ~/Download, so even if we don't, we get downloads with a suffix like (1) for files existing there. We simply strip the suffix off via regex. See https://bugreports.qt.io/browse/QTBUG-56978 """ filename = os.path.basename(path) suffix_re = re.compile(r""" \ ? # Optional space between filename and suffix ( # Numerical suffix \([0-9]+\) | # ISO-8601 suffix # https://cs.chromium.org/chromium/src/base/time/time_to_iso8601.cc \ -\ \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z ) (?=\.|$) # Begin of extension, or filename without extension """, re.VERBOSE) return suffix_re.sub('', filename) class DownloadManager(downloads.AbstractDownloadManager): """Manager for currently running downloads. Attributes: _mhtml_target: DownloadTarget for the next MHTML download. """ def __init__(self, parent=None): super().__init__(parent) self._mhtml_target = None def install(self, profile): """Set up the download manager on a QWebEngineProfile.""" profile.downloadRequested.connect(self.handle_download, Qt.DirectConnection) @pyqtSlot(QWebEngineDownloadItem) def handle_download(self, qt_item): """Start a download coming from a QWebEngineProfile.""" suggested_filename = _get_suggested_filename(qt_item.path()) use_pdfjs = pdfjs.should_use_pdfjs(qt_item.mimeType(), qt_item.url()) download = DownloadItem(qt_item, manager=self) self._init_item(download, auto_remove=use_pdfjs, suggested_filename=suggested_filename) if self._mhtml_target is not None: download.set_target(self._mhtml_target) self._mhtml_target = None return if use_pdfjs: download.set_target(downloads.PDFJSDownloadTarget()) return filename = downloads.immediate_download_path() if filename is not None: # User doesn't want to be asked, so just use the download_dir target = downloads.FileDownloadTarget(filename) download.set_target(target) return # Ask the user for a filename - needs to be blocking! question = downloads.get_filename_question( suggested_filename=suggested_filename, url=qt_item.url(), parent=self) self._init_filename_question(question, download) message.global_bridge.ask(question, blocking=True) # The filename is set via the question.answered signal, connected in # _init_filename_question. def get_mhtml(self, tab, target): """Download the given tab as mhtml to the given target.""" assert tab.backend == usertypes.Backend.QtWebEngine assert self._mhtml_target is None, self._mhtml_target self._mhtml_target = target tab.action.save_page()
from unittest import TestCase import numpy as np import pandas as pd from scattertext import whitespace_nlp from scattertext.TermDocMatrixFromPandas import TermDocMatrixWithoutCategoriesFromPandas, TermDocMatrixFromPandas from scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories from scattertext.test.test_corpusFromPandas import get_docs_categories class CorpusFromPandasWithoutCategories(): pass def get_term_doc_matrix_without_categories(): categories, documents = get_docs_categories() df = pd.DataFrame({'text': documents}) tdm = TermDocMatrixWithoutCategoriesFromPandas(df, 'text', nlp=whitespace_nlp).build() return tdm class TestCorpusFromPandasWithoutCategories(TestCase): def test_term_category_matrix_from_pandas_without_categories(self): tdm = get_term_doc_matrix_without_categories() categories, documents = get_docs_categories() reg_tdm = TermDocMatrixFromPandas(pd.DataFrame({'text': documents, 'categories': categories}), text_col='text', category_col='categories', nlp=whitespace_nlp).build() self.assertIsInstance(tdm, TermDocMatrixWithoutCategories) self.assertEqual(tdm.get_terms(), reg_tdm.get_terms()) self.assertEqual(tdm.get_num_docs(), reg_tdm.get_num_docs()) np.testing.assert_equal(tdm.get_term_doc_mat().data, reg_tdm.get_term_doc_mat().data)
from ...utils import verbose from ..utils import _data_path, _data_path_doc @verbose def data_path(path=None, force_update=False, update_path=True, download=True, verbose=None): # noqa: D103 return _data_path(path=path, force_update=force_update, update_path=update_path, name='misc', download=download) data_path.__doc__ = _data_path_doc.format(name='misc', conf='MNE_DATASETS_MISC_PATH')
import threading import imp import os from stash.system import shthreads def get_stash(): """ returns the currently active StaSh-instance. returns None if it can not be found. This is useful for modules. """ if "_stash" in globals(): return globals()["_stash"] for thr in threading.enumerate(): if isinstance(thr, shthreads.ShBaseThread): ct = thr while not ct.is_top_level(): ct = ct.parent return ct.parent.stash return None def load_from_dir(dirpath, varname): """ returns a list of all variables named 'varname' in .py files in a directofy 'dirname'. """ if not os.path.isdir(dirpath): return [] ret = [] for fn in os.listdir(dirpath): fp = os.path.join(dirpath, fn) if not os.path.isfile(fp): continue with open(fp, "r") as fin: mod = imp.load_source(fn[:fn.index(".")], fp, fin) if not hasattr(mod, varname): continue else: ret.append(getattr(mod, varname)) return ret
import numpy as np import warnings import chainer from chainer.backends import cuda from chainercv.transforms import center_crop from chainercv.transforms import resize from chainercv.transforms import scale from chainercv.transforms import ten_crop class FeaturePredictor(chainer.Chain): """Wrapper that adds a prediction method to a feature extraction link. The :meth:`predict` takes three steps to make a prediction. 1. Preprocess input images 2. Forward the preprocessed images to the network 3. Average features in the case when more than one crops are extracted. Example: >>> from chainercv.links import VGG16 >>> from chainercv.links import FeaturePredictor >>> base_model = VGG16() >>> model = FeaturePredictor(base_model, 224, 256) >>> prob = model.predict([img]) # Predicting multiple features >>> model.extractor.pick = ['conv5_3', 'fc7'] >>> conv5_3, fc7 = model.predict([img]) When :obj:`self.crop == 'center'`, :meth:`predict` extracts features from the center crop of the input images. When :obj:`self.crop == '10'`, :meth:`predict` extracts features from patches that are ten-cropped from the input images. When extracting more than one crops from an image, the output of :meth:`predict` returns the average of the features computed from the crops. Args: extractor: A feature extraction link. This is a callable chain that takes a batch of images and returns a variable or a tuple of variables. crop_size (int or tuple): The height and the width of an image after cropping in preprocessing. If this is an integer, the image is cropped to :math:`(crop\_size, crop\_size)`. scale_size (int or tuple): If :obj:`scale_size` is :obj:`None`, neither scaling nor resizing is conducted during preprocessing. This is the default behavior. If this is an integer, an image is resized so that the length of the shorter edge is equal to :obj:`scale_size`. If this is a tuple :obj:`(height, width)`, the image is resized to :math:`(height, width)`. crop ({'center', '10'}): Determines the style of cropping. mean (numpy.ndarray): A mean value. If this is :obj:`None`, :obj:`extractor.mean` is used as the mean value. """ def __init__(self, extractor, crop_size, scale_size=None, crop='center', mean=None): super(FeaturePredictor, self).__init__() self.scale_size = scale_size if isinstance(crop_size, int): crop_size = (crop_size, crop_size) self.crop_size = crop_size self.crop = crop with self.init_scope(): self.extractor = extractor if mean is None: self.mean = self.extractor.mean else: self.mean = mean def _prepare(self, img): """Prepare an image for feeding it to a model. This is a standard preprocessing scheme used by feature extraction models. First, the image is scaled or resized according to :math:`scale_size`. Note that this step is optional. Next, the image is cropped to :math:`crop_size`. Last, the image is mean subtracted by an array :obj:`mean`. Args: img (~numpy.ndarray): An image. This is in CHW format. The range of its value is :math:`[0, 255]`. Returns: ~numpy.ndarray: A preprocessed image. This is 4D array whose batch size is the number of crops. """ if self.scale_size is not None: if isinstance(self.scale_size, int): img = scale(img, size=self.scale_size) else: img = resize(img, size=self.scale_size) else: img = img.copy() if self.crop == '10': imgs = ten_crop(img, self.crop_size) elif self.crop == 'center': imgs = center_crop(img, self.crop_size)[np.newaxis] imgs -= self.mean[np.newaxis] return imgs def _average_crops(self, y, n_crop): if y.ndim == 4: warnings.warn( 'Four dimensional features are averaged. ' 'If these are batch of 2D spatial features, ' 'their spatial information would be lost.') xp = chainer.backends.cuda.get_array_module(y) y = y.reshape((-1, n_crop) + y.shape[1:]) y = xp.mean(y, axis=1) return y def predict(self, imgs): """Predict features from images. Given :math:`N` input images, this method outputs a batched array with batchsize :math:`N`. Args: imgs (iterable of numpy.ndarray): Array-images. All images are in CHW format and the range of their value is :math:`[0, 255]`. Returns: numpy.ndarray or tuple of numpy.ndarray: A batch of features or a tuple of them. """ # [(C, H_0, W_0), ..., (C, H_{B-1}, W_{B-1})] -> (B, N, C, H, W) imgs = self.xp.asarray([self._prepare(img) for img in imgs]) n_crop = imgs.shape[-4] shape = (-1, imgs.shape[-3]) + self.crop_size # (B, N, C, H, W) -> (B * N, C, H, W) imgs = imgs.reshape(shape) with chainer.using_config('train', False), \ chainer.function.no_backprop_mode(): imgs = chainer.Variable(imgs) features = self.extractor(imgs) if isinstance(features, tuple): output = [] for feature in features: feature = feature.array if n_crop > 1: feature = self._average_crops(feature, n_crop) output.append(cuda.to_cpu(feature)) output = tuple(output) else: output = cuda.to_cpu(features.array) if n_crop > 1: output = self._average_crops(output, n_crop) return output
import argparse import imp import yaml from yaml.scanner import ScannerError TYPE = 'type' LIST = 'list' DESCRIPTION = 'description' REQUIRED = 'required' DEFAULT = 'default' ALLOWED = 'allowed' VALUES_DSC = 'values_description' ONE_OF = 'one of' SCHEMA = 'schema' EXAMPLES = 'examples' ANYOF = 'anyof' NO_DSC = '(no description)' VALIDATOR = 'validator' NoneType = type(None) class TextBlock(object): def __init__(self, text, tab_replacement=' ', ending=''): """ :type text: str """ self.text = str(text).replace('\t', tab_replacement) self.lines = self.text.splitlines() self.width = max([len(line) for line in self.lines] + [0]) self.padded_width = self.width + 2 self.height = len(self.lines) def get_line(self, item, raise_index_error=False, default=''): try: return self.lines[item] except IndexError: if raise_index_error: raise else: return default def get_line_justified(self, item, fillchar=' ', raise_index_error=False, default=''): return self.get_line(item, raise_index_error, default).ljust(self.width, fillchar) def __str__(self): return self.text def to_text_block(method): def decorated(content): if not isinstance(content, TextBlock): return method(TextBlock(content)) else: return method(TextBlock) return decorated class RSTRenderer(object): def with_escape(method): def escaped(content): return method(RSTRenderer.escape(content)) return escaped @staticmethod def any_of_table(blocks): """ :type blocks: list of TextBlock """ HEADER = 'any of' cnt = len(blocks) # no need table for single content if cnt < 2: return blocks[0] if blocks else '' # width = widths of contents + separators width = max((len(HEADER), sum([c.padded_width for c in blocks]))) + (cnt + 1) height = max([c.height for c in blocks]) # rows separators top_bar = '+{}+'.format('-' * (width - 2)) header_bar = '+{}+'.format('+'.join(['=' * c.padded_width for c in blocks])) bottom_bar = '+{}+'.format('+'.join(['-' * c.padded_width for c in blocks])) header = '|{}|'.format(HEADER.center(width - 2)) body = '\n'.join( ['| {} |'.format(' | '.join([c.get_line_justified(i) for c in blocks])) for i in range(height)]) return '\n'.join([top_bar, header, header_bar, body, bottom_bar]) @staticmethod def preserve_indents(block): """ :type block: TextBlock """ return '\n'.join(['| {}'.format(line) for line in block.lines]) @staticmethod def bold(content): """ :type content: str :return: str """ return '\n'.join(['**{}**'.format(line) for line in content.splitlines()]) @staticmethod def title(content, new_line_replacement=' ', tab_replacement=' '): """ Underlines content with '='. New lines and tabs will be replaced :param str content: :param str new_line_replacement: :param str tab_replacement: :return: str """ prepared_content = content.strip().replace('\n', new_line_replacement).replace('\t', tab_replacement) return '{}\n{}'.format(prepared_content, '=' * len(prepared_content)) @staticmethod def subtitle(content, new_line_replacement=' ', tab_replacement=' '): prepared_content = content.strip().replace('\n', new_line_replacement).replace('\t', tab_replacement) return '{}\n{}'.format(prepared_content, '-' * len(prepared_content)) @staticmethod @with_escape @to_text_block def italic(block): """ :type block: TextBlock """ return '\n'.join(['*{}*'.format(line) for line in block.lines]) @staticmethod @to_text_block def mono(block): """ :type block: TextBlock """ return '\n'.join(['``{}``'.format(line) for line in block.lines]) @classmethod def bullet_list(cls, blocks): """ :type blocks: list of TextBlock :rtype: TextBlock """ return TextBlock('\n'.join([cls._list_item(block) for block in blocks])) @staticmethod def _list_item(block): """ :type block: TextBlock """ return '- ' + '\n '.join(block.lines) @staticmethod def def_list(items, sort=True, newlines=True): def format_value(value): if isinstance(value, (int, bool, NoneType)): return format_value(str(value)) if isinstance(value, str): return '\n '.join(value.splitlines()) elif isinstance(value, TextBlock): return '\n '.join(value.lines) elif isinstance(value, dict): return '\n '.join(RSTRenderer.def_list(value, sort, newlines).splitlines()) elif isinstance(value, list): return '\n '.join(RSTRenderer.bullet_list([TextBlock(item) for item in value]).lines) else: raise ValueError('Unsupported value type: {}\n{}'.format(type(value), value)) sort = sorted if sort else lambda x: x template = '{}\n {}' if newlines else ':{}: {}' return '\n' + '\n'.join([template.format(k.replace('\n', ' '), format_value(v).strip()) for k, v in sort(items.items())]) if items else '' @staticmethod def field_list(items, sort=True, newlines=True): """ :param bool newlines: add newlines between names and values :param bool sort: sort items alphabetically by key :type items: dict :rtype: TextBlock """ def format_value(value): if isinstance(value, (int, bool, NoneType)): return format_value(str(value)) if isinstance(value, str): return '\n '.join(value.splitlines()) elif isinstance(value, TextBlock): return '\n '.join(value.lines) elif isinstance(value, dict): return '\n '.join(RSTRenderer.field_list(value, sort, newlines).splitlines()) elif isinstance(value, list): return '\n '.join(RSTRenderer.bullet_list([TextBlock(item) for item in value]).lines) else: raise ValueError('Unsupported value type: {}\n{}'.format(type(value), value)) sort = sorted if sort else lambda x: x template = ':{}:\n {}' if newlines else ':{}: {}' return '\n' + '\n'.join([template.format(k.replace('\n', ' '), format_value(v).strip()) for k, v in sort(items.items())]) if items else '' @staticmethod def dict_list_structure(items, sort_dict=True): if isinstance(items, str): return TextBlock(items) elif isinstance(items, int): return TextBlock(str(items)) elif isinstance(items, list): return RSTRenderer.bullet_list([RSTRenderer.dict_list_structure(item) for item in items]) elif isinstance(items, dict): return RSTRenderer.field_list({k: RSTRenderer.dict_list_structure(v) for k, v in items.items()}, sort_dict) @staticmethod def escape(content): """ :type content: str """ return content.replace('-', r'\-') del with_escape def render_body(renderer, option_kwargs, exclude_keys, special_keys=None): """ :type option_kwargs: dict :type exclude_keys: list :type special_keys: dict """ common_formatters = { EXAMPLES: lambda examples: renderer.def_list({renderer.mono(example): annotation for example, annotation in examples.items()}) } def default_fmt(x): return x special_keys = special_keys or {} special_part = '\n'.join([special_handler(renderer, option_kwargs[special_key]) for special_key, special_handler in special_keys.items() if special_key in option_kwargs]) uncommon_keys = set(exclude_keys) | set(special_keys.keys()) common_part = renderer.field_list({ k: common_formatters.get(k, default_fmt)(v) for k, v in option_kwargs.items() if k not in uncommon_keys }) return '\n'.join([_ for _ in [common_part, special_part] if _]) def render_values_description(renderer, option_kwargs): values_description_dict = { value: option_kwargs[VALUES_DSC].get(value, '') for value in option_kwargs[ALLOWED] } \ if ALLOWED in option_kwargs \ else \ option_kwargs[VALUES_DSC] values_description = renderer.field_list( {renderer.mono(value): dsc for value, dsc in values_description_dict.items()}, newlines=False ) return renderer.field_list({ONE_OF: values_description}) def allowed(renderer, values): return renderer.field_list({ONE_OF: '[{}]'.format(', '.join([renderer.mono(value) for value in values]))}, newlines=False) class OptionFormatter(object): def __init__(self, option_schema): """ :type option_schema: dict """ self.option_name, self.option_kwargs = next(iter(option_schema.items())) # print(option_name, option_kwargs) self.formatter = self.__guess_formatter() def format_dsc(self, renderer): dsc = self.option_kwargs.get(DESCRIPTION, NO_DSC).strip('. ') if DEFAULT in self.option_kwargs: default_value = self.option_kwargs.get(DEFAULT) if default_value == '': default_value = '""' return ' '.join([renderer.italic('- {}. Default:'.format(dsc)), renderer.mono(default_value)]) elif REQUIRED in self.option_kwargs: return renderer.italic('- {}.'.format(dsc)) +\ ' ' +\ renderer.bold('Required.') else: return renderer.italic('- {}.'.format(dsc)) def scalar_formatter(self, renderer, header=True): hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \ if header else '' dsc = self.format_dsc(renderer) body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED], {'allowed': allowed}) return '\n'.join([_ for _ in [hdr, dsc, body] if _]) def scalar_with_values_description(self, renderer, header=True): hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \ if header else '' dsc = self.format_dsc(renderer) body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, ALLOWED, VALUES_DSC]) values_description_block = render_values_description(renderer, self.option_kwargs) return '\n'.join([_ for _ in [hdr, dsc, body, values_description_block] if _]) def dict_formatter(self, renderer, header=True): hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \ if header else '' dsc = self.format_dsc(renderer) dict_schema = self.option_kwargs[SCHEMA] schema_block = renderer.field_list({ '{} ({})'.format(renderer.mono(key), dict_schema[key].get(TYPE, 'anyof')): get_formatter({key: value})(renderer, header=False) for key, value in dict_schema.items()}) body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, SCHEMA]) return '\n'.join([_ for _ in [hdr, dsc, schema_block, body] if _]) def anyof_formatter(self, renderer, header=True): types = [case[TYPE] for case in self.option_kwargs[ANYOF] if TYPE in case] hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(' or '.join(types))) \ if header else '' dsc = self.format_dsc(renderer) values_description_block = render_values_description(renderer, self.option_kwargs) \ if VALUES_DSC in self.option_kwargs else '' body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, ANYOF, VALUES_DSC]) return '\n'.join([_ for _ in [hdr, dsc, values_description_block, body] if _]) def list_formatter(self, renderer, header=True): schema = self.option_kwargs[SCHEMA] hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({} of {})'.format(self.option_kwargs.get(TYPE, LIST), schema.get(TYPE, ''))) dsc = self.format_dsc(renderer) body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DEFAULT, REQUIRED, DESCRIPTION, SCHEMA]) if set(schema.keys()) - {TYPE}: schema_block = renderer.field_list({ '[list_element] ({})'.format(schema.get(TYPE, '')): get_formatter({'list_element': schema})(renderer, header=False) }) return '\n'.join([_ for _ in [hdr, dsc, schema_block, body] if _]) else: return '\n'.join([_ for _ in [hdr, dsc, body] if _]) def __guess_formatter(self): if ANYOF in self.option_kwargs: return self.anyof_formatter elif SCHEMA in self.option_kwargs: return self.list_formatter if self.option_kwargs.get(TYPE) == LIST else self.dict_formatter elif VALUES_DSC in self.option_kwargs: return self.scalar_with_values_description else: return self.scalar_formatter def get_formatter(option_schema): """ :type option_schema: dict """ return OptionFormatter(option_schema).formatter def format_option(option_schema, renderer): return get_formatter(option_schema)(renderer) def format_schema(schema, renderer, title=None): """ :param dict schema: Cerberus config schema :type renderer: RSTRenderer """ body = '\n\n'.join( sorted([format_option({option_name: option_schema}, renderer) for option_name, option_schema in schema.items()])) if title: title = renderer.title(title) return title + '\n\n' + body else: return body def main(): parser = argparse.ArgumentParser() parser.add_argument('schema', help='Path to schema file') parser.add_argument('-o', '--output_filename', default='output.rst', help='Name for the output rst document') parser.add_argument('--title', default=None, help='Document title') parser.add_argument('-a', '--append', action='store_true', help='Don\'t overwrite output file') args = parser.parse_args() schema_path = args.schema output_filename = args.output_filename title = args.title append = args.append try: with open(schema_path) as f: schema = yaml.load(f, Loader=yaml.FullLoader) except ScannerError: schema_module = imp.load_source('schema', schema_path) schema = schema_module.OPTIONS document = format_schema(schema, RSTRenderer(), title) if append: with open(output_filename, 'a') as f: f.write('\n\n') f.write(document) else: with open(output_filename, 'w') as f: f.write(document) if __name__ == '__main__': main()
from homeassistant.components.ozw.fan import SPEED_TO_VALUE from .common import setup_ozw async def test_fan(hass, fan_data, fan_msg, sent_messages, caplog): """Test fan.""" receive_message = await setup_ozw(hass, fixture=fan_data) # Test loaded state = hass.states.get("fan.in_wall_smart_fan_control_level") assert state is not None assert state.state == "on" # Test turning off await hass.services.async_call( "fan", "turn_off", {"entity_id": "fan.in_wall_smart_fan_control_level"}, blocking=True, ) assert len(sent_messages) == 1 msg = sent_messages[-1] assert msg["topic"] == "OpenZWave/1/command/setvalue/" assert msg["payload"] == {"Value": 0, "ValueIDKey": 172589073} # Feedback on state fan_msg.decode() fan_msg.payload["Value"] = 0 fan_msg.encode() receive_message(fan_msg) await hass.async_block_till_done() state = hass.states.get("fan.in_wall_smart_fan_control_level") assert state is not None assert state.state == "off" # Test turning on new_speed = "medium" await hass.services.async_call( "fan", "turn_on", {"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed}, blocking=True, ) assert len(sent_messages) == 2 msg = sent_messages[-1] assert msg["topic"] == "OpenZWave/1/command/setvalue/" assert msg["payload"] == { "Value": SPEED_TO_VALUE[new_speed], "ValueIDKey": 172589073, } # Feedback on state fan_msg.decode() fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed] fan_msg.encode() receive_message(fan_msg) await hass.async_block_till_done() state = hass.states.get("fan.in_wall_smart_fan_control_level") assert state is not None assert state.state == "on" assert state.attributes["speed"] == new_speed # Test turn on without speed await hass.services.async_call( "fan", "turn_on", {"entity_id": "fan.in_wall_smart_fan_control_level"}, blocking=True, ) assert len(sent_messages) == 3 msg = sent_messages[-1] assert msg["topic"] == "OpenZWave/1/command/setvalue/" assert msg["payload"] == { "Value": 255, "ValueIDKey": 172589073, } # Feedback on state fan_msg.decode() fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed] fan_msg.encode() receive_message(fan_msg) await hass.async_block_till_done() state = hass.states.get("fan.in_wall_smart_fan_control_level") assert state is not None assert state.state == "on" assert state.attributes["speed"] == new_speed # Test set speed to off new_speed = "off" await hass.services.async_call( "fan", "set_speed", {"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed}, blocking=True, ) assert len(sent_messages) == 4 msg = sent_messages[-1] assert msg["topic"] == "OpenZWave/1/command/setvalue/" assert msg["payload"] == { "Value": SPEED_TO_VALUE[new_speed], "ValueIDKey": 172589073, } # Feedback on state fan_msg.decode() fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed] fan_msg.encode() receive_message(fan_msg) await hass.async_block_till_done() state = hass.states.get("fan.in_wall_smart_fan_control_level") assert state is not None assert state.state == "off" # Test invalid speed new_speed = "invalid" await hass.services.async_call( "fan", "set_speed", {"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed}, blocking=True, ) assert len(sent_messages) == 4 assert "Invalid speed received: invalid" in caplog.text
from __future__ import print_function from pyVim.connect import SmartConnect, Disconnect from pyVmomi import vim, vmodl import argparse import atexit import getpass import sys import ssl def GetArgs(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser(description='Process args for powering on a Virtual Machine') parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to') parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on') parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host') parser.add_argument('-p', '--password', required=False, action='store', help='Password to use when connecting to host') parser.add_argument('-v', '--vmname', required=True, action='append', help='Names of the Virtual Machines to power on') args = parser.parse_args() return args def WaitForTasks(tasks, si): """ Given the service instance si and tasks, it returns after all the tasks are complete """ pc = si.content.propertyCollector taskList = [str(task) for task in tasks] # Create filter objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task) for task in tasks] propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True) filterSpec = vmodl.query.PropertyCollector.FilterSpec() filterSpec.objectSet = objSpecs filterSpec.propSet = [propSpec] filter = pc.CreateFilter(filterSpec, True) try: version, state = None, None # Loop looking for updates till the state moves to a completed state. while len(taskList): update = pc.WaitForUpdates(version) for filterSet in update.filterSet: for objSet in filterSet.objectSet: task = objSet.obj for change in objSet.changeSet: if change.name == 'info': state = change.val.state elif change.name == 'info.state': state = change.val else: continue if not str(task) in taskList: continue if state == vim.TaskInfo.State.success: # Remove task from taskList taskList.remove(str(task)) elif state == vim.TaskInfo.State.error: raise task.info.error # Move to next version version = update.version finally: if filter: filter.Destroy() # Start program def main(): """ Simple command-line program for powering on virtual machines on a system. """ args = GetArgs() if args.password: password = args.password else: password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user)) try: vmnames = args.vmname if not len(vmnames): print("No virtual machine specified for poweron") sys.exit() context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port), sslContext=context) if not si: print("Cannot connect to specified host using specified username and password") sys.exit() atexit.register(Disconnect, si) # Retreive the list of Virtual Machines from the inventory objects # under the rootFolder content = si.content objView = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True) vmList = objView.view objView.Destroy() # Find the vm and power it on tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames] # Wait for power on to complete WaitForTasks(tasks, si) print("Virtual Machine(s) have been powered on successfully") except vmodl.MethodFault as e: print("Caught vmodl fault : " + e.msg) except Exception as e: print("Caught Exception : " + str(e)) # Start program if __name__ == "__main__": main()
from homeassistant.const import STATE_OFF, STATE_ON from .util import async_init_integration async def test_create_binary_sensors(hass): """Test creation of binary sensors.""" await async_init_integration(hass) state = hass.states.get("binary_sensor.master_suite_blower_active") assert state.state == STATE_ON expected_attributes = { "attribution": "Data provided by mynexia.com", "friendly_name": "Master Suite Blower Active", } # Only test for a subset of attributes in case # HA changes the implementation and a new one appears assert all( state.attributes[key] == expected_attributes[key] for key in expected_attributes ) state = hass.states.get("binary_sensor.downstairs_east_wing_blower_active") assert state.state == STATE_OFF expected_attributes = { "attribution": "Data provided by mynexia.com", "friendly_name": "Downstairs East Wing Blower Active", } # Only test for a subset of attributes in case # HA changes the implementation and a new one appears assert all( state.attributes[key] == expected_attributes[key] for key in expected_attributes )
import numpy as np import pandas as pd class PhraseSelector(object): def __init__(self, minimum_pmi=16): ''' Filter n-grams using PMI. Parameters ---------- alpha : float labmda_ : "cressie_read" See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.power_divergence.html for options. ''' self.minimum_pmi = minimum_pmi def compact(self, term_doc_matrix, non_text=False): ''' Parameters ------- term_doc_matrix : TermDocMatrix Returns ------- New term doc matrix ''' count_df = self._get_statistics_dataframe(term_doc_matrix, non_text) return term_doc_matrix.remove_terms( count_df[count_df['pmi'] < self.minimum_pmi].index, non_text=non_text ) def _get_statistics_dataframe(self, term_doc_matrix, non_text): tdf = (term_doc_matrix.get_metadata_freq_df().sum(axis=1) if non_text else term_doc_matrix.get_term_freq_df().sum(axis=1)) gram_df = pd.Series(tdf.index).apply(lambda x: pd.Series(list(reversed(x.split())))) gram_df['c'] = tdf.values gram_df['term'] = tdf.index gram_df = gram_df.set_index('term') unigram_df = gram_df[gram_df[1].isnull()][['c']] ngram_df = gram_df.dropna() count_df = pd.merge(pd.merge(ngram_df, unigram_df, left_on=0, right_index=True, suffixes=('', '0')), unigram_df, left_on=1, right_index=True, suffixes=('', '1')) p0 = count_df['c0'] / unigram_df['c'].sum() p1 = count_df['c1'] / unigram_df['c'].sum() p = count_df['c'] / ngram_df['c'].sum() count_df['pmi'] = np.log(p / (p0 * p1)) / np.log(2) return count_df
import os import os.path as op from numpy.testing import assert_array_equal from mne.utils import requires_mayavi, run_tests_if_main, traits_test @requires_mayavi @traits_test def test_mri_model(subjects_dir_tmp): """Test MRIHeadWithFiducialsModel Traits Model.""" from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel tgt_fname = op.join(subjects_dir_tmp, 'test-fiducials.fif') # Remove the two files that will make the fiducials okay via MNI estimation os.remove(op.join(subjects_dir_tmp, 'sample', 'bem', 'sample-fiducials.fif')) os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms', 'talairach.xfm')) model = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir_tmp) model.subject = 'sample' assert model.default_fid_fname[-20:] == "sample-fiducials.fif" assert not model.can_reset assert not model.can_save model.lpa = [[-1, 0, 0]] model.nasion = [[0, 1, 0]] model.rpa = [[1, 0, 0]] assert not model.can_reset assert model.can_save bem_fname = op.basename(model.bem_high_res.file) assert not model.can_reset assert bem_fname == 'sample-head-dense.fif' model.save(tgt_fname) assert model.fid_file == tgt_fname # resetting the file should not affect the model's fiducials model.fid_file = '' assert_array_equal(model.lpa, [[-1, 0, 0]]) assert_array_equal(model.nasion, [[0, 1, 0]]) assert_array_equal(model.rpa, [[1, 0, 0]]) # reset model model.lpa = [[0, 0, 0]] model.nasion = [[0, 0, 0]] model.rpa = [[0, 0, 0]] assert_array_equal(model.lpa, [[0, 0, 0]]) assert_array_equal(model.nasion, [[0, 0, 0]]) assert_array_equal(model.rpa, [[0, 0, 0]]) # loading the file should assign the model's fiducials model.fid_file = tgt_fname assert_array_equal(model.lpa, [[-1, 0, 0]]) assert_array_equal(model.nasion, [[0, 1, 0]]) assert_array_equal(model.rpa, [[1, 0, 0]]) # after changing from file model should be able to reset model.nasion = [[1, 1, 1]] assert model.can_reset model.reset = True assert_array_equal(model.nasion, [[0, 1, 0]]) run_tests_if_main()
import logging import voluptuous as vol from homeassistant.components.mqtt import valid_publish_topic, valid_subscribe_topic from homeassistant.const import CONF_OPTIMISTIC from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from .const import ( ATTR_DEVICES, CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAYS, CONF_NODES, CONF_PERSISTENCE, CONF_PERSISTENCE_FILE, CONF_RETAIN, CONF_TCP_PORT, CONF_TOPIC_IN_PREFIX, CONF_TOPIC_OUT_PREFIX, CONF_VERSION, DOMAIN, MYSENSORS_GATEWAYS, ) from .device import get_mysensors_devices from .gateway import finish_setup, get_mysensors_gateway, setup_gateways _LOGGER = logging.getLogger(__name__) CONF_DEBUG = "debug" CONF_NODE_NAME = "name" DEFAULT_BAUD_RATE = 115200 DEFAULT_TCP_PORT = 5003 DEFAULT_VERSION = "1.4" def has_all_unique_files(value): """Validate that all persistence files are unique and set if any is set.""" persistence_files = [gateway.get(CONF_PERSISTENCE_FILE) for gateway in value] if None in persistence_files and any( name is not None for name in persistence_files ): raise vol.Invalid( "persistence file name of all devices must be set if any is set" ) if not all(name is None for name in persistence_files): schema = vol.Schema(vol.Unique()) schema(persistence_files) return value def is_persistence_file(value): """Validate that persistence file path ends in either .pickle or .json.""" if value.endswith((".json", ".pickle")): return value raise vol.Invalid(f"{value} does not end in either `.json` or `.pickle`") def deprecated(key): """Mark key as deprecated in configuration.""" def validator(config): """Check if key is in config, log warning and remove key.""" if key not in config: return config _LOGGER.warning( "%s option for %s is deprecated. Please remove %s from your " "configuration file", key, DOMAIN, key, ) config.pop(key) return config return validator NODE_SCHEMA = vol.Schema({cv.positive_int: {vol.Required(CONF_NODE_NAME): cv.string}}) GATEWAY_SCHEMA = { vol.Required(CONF_DEVICE): cv.string, vol.Optional(CONF_PERSISTENCE_FILE): vol.All(cv.string, is_persistence_file), vol.Optional(CONF_BAUD_RATE, default=DEFAULT_BAUD_RATE): cv.positive_int, vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port, vol.Optional(CONF_TOPIC_IN_PREFIX): valid_subscribe_topic, vol.Optional(CONF_TOPIC_OUT_PREFIX): valid_publish_topic, vol.Optional(CONF_NODES, default={}): NODE_SCHEMA, } CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( vol.All( deprecated(CONF_DEBUG), { vol.Required(CONF_GATEWAYS): vol.All( cv.ensure_list, has_all_unique_files, [GATEWAY_SCHEMA] ), vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean, vol.Optional(CONF_PERSISTENCE, default=True): cv.boolean, vol.Optional(CONF_RETAIN, default=True): cv.boolean, vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): cv.string, }, ) ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the MySensors component.""" gateways = await setup_gateways(hass, config) if not gateways: _LOGGER.error("No devices could be setup as gateways, check your configuration") return False hass.data[MYSENSORS_GATEWAYS] = gateways hass.async_create_task(finish_setup(hass, config, gateways)) return True def _get_mysensors_name(gateway, node_id, child_id): """Return a name for a node child.""" node_name = f"{gateway.sensors[node_id].sketch_name} {node_id}" node_name = next( ( node[CONF_NODE_NAME] for conf_id, node in gateway.nodes_config.items() if node.get(CONF_NODE_NAME) is not None and conf_id == node_id ), node_name, ) return f"{node_name} {child_id}" @callback def setup_mysensors_platform( hass, domain, discovery_info, device_class, device_args=None, async_add_entities=None, ): """Set up a MySensors platform.""" # Only act if called via MySensors by discovery event. # Otherwise gateway is not set up. if not discovery_info: return None if device_args is None: device_args = () new_devices = [] new_dev_ids = discovery_info[ATTR_DEVICES] for dev_id in new_dev_ids: devices = get_mysensors_devices(hass, domain) if dev_id in devices: continue gateway_id, node_id, child_id, value_type = dev_id gateway = get_mysensors_gateway(hass, gateway_id) if not gateway: continue device_class_copy = device_class if isinstance(device_class, dict): child = gateway.sensors[node_id].children[child_id] s_type = gateway.const.Presentation(child.type).name device_class_copy = device_class[s_type] name = _get_mysensors_name(gateway, node_id, child_id) args_copy = (*device_args, gateway, node_id, child_id, name, value_type) devices[dev_id] = device_class_copy(*args_copy) new_devices.append(devices[dev_id]) if new_devices: _LOGGER.info("Adding new devices: %s", new_devices) if async_add_entities is not None: async_add_entities(new_devices, True) return new_devices
from collections import Counter from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc class PyatePhrases(FeatsFromSpacyDoc): def __init__(self, extractor=None, **args): import pyate self._extractor = pyate.combo_basic if extractor is None else extractor FeatsFromSpacyDoc.__init__(self, **args) def get_feats(self, doc): return Counter(self._extractor(str(doc)).to_dict())
from collections import deque from functools import partial from io import BytesIO from time import time from kombu.asynchronous.hub import READ, WRITE, get_event_loop from kombu.exceptions import HttpError from kombu.utils.encoding import bytes_to_str from .base import BaseClient try: import pycurl # noqa except ImportError: # pragma: no cover pycurl = Curl = METH_TO_CURL = None # noqa else: from pycurl import Curl # noqa METH_TO_CURL = { # noqa 'GET': pycurl.HTTPGET, 'POST': pycurl.POST, 'PUT': pycurl.UPLOAD, 'HEAD': pycurl.NOBODY, } __all__ = ('CurlClient',) DEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)' EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH']) class CurlClient(BaseClient): """Curl HTTP Client.""" Curl = Curl def __init__(self, hub=None, max_clients=10): if pycurl is None: raise ImportError('The curl client requires the pycurl library.') hub = hub or get_event_loop() super().__init__(hub) self.max_clients = max_clients self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self.Curl() for i in range(max_clients)] self._free_list = self._curls[:] self._pending = deque() self._fds = {} self._socket_action = self._multi.socket_action self._timeout_check_tref = self.hub.call_repeatedly( 1.0, self._timeout_check, ) # pycurl 7.29.0 workaround dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) def close(self): self._timeout_check_tref.cancel() for _curl in self._curls: _curl.close() self._multi.close() def add_request(self, request): self._pending.append(request) self._process_queue() self._set_timeout(0) return request # the next two methods are used for linux/epoll workaround: # we temporarily remove all curl fds from hub, so curl cannot # close a fd which is still inside epoll def _pop_from_hub(self): for fd in self._fds: self.hub.remove(fd) def _push_to_hub(self): for fd, events in self._fds.items(): if events & READ: self.hub.add_reader(fd, self.on_readable, fd) if events & WRITE: self.hub.add_writer(fd, self.on_writable, fd) def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl): if event == _pycurl.POLL_REMOVE: if fd in self._fds: self._fds.pop(fd, None) else: if event == _pycurl.POLL_IN: self._fds[fd] = READ elif event == _pycurl.POLL_OUT: self._fds[fd] = WRITE elif event == _pycurl.POLL_INOUT: self._fds[fd] = READ | WRITE def _set_timeout(self, msecs): pass # TODO def _timeout_check(self, _pycurl=pycurl): self._pop_from_hub() try: while 1: try: ret, _ = self._multi.socket_all() except pycurl.error as exc: ret = exc.args[0] if ret != _pycurl.E_CALL_MULTI_PERFORM: break finally: self._push_to_hub() self._process_pending_requests() def on_readable(self, fd, _pycurl=pycurl): return self._on_event(fd, _pycurl.CSELECT_IN) def on_writable(self, fd, _pycurl=pycurl): return self._on_event(fd, _pycurl.CSELECT_OUT) def _on_event(self, fd, event, _pycurl=pycurl): self._pop_from_hub() try: while 1: try: ret, _ = self._socket_action(fd, event) except pycurl.error as exc: ret = exc.args[0] if ret != _pycurl.E_CALL_MULTI_PERFORM: break finally: self._push_to_hub() self._process_pending_requests() def _process_pending_requests(self): while 1: q, succeeded, failed = self._multi.info_read() for curl in succeeded: self._process(curl) for curl, errno, reason in failed: self._process(curl, errno, reason) if q == 0: break self._process_queue() def _process_queue(self): while 1: started = 0 while self._free_list and self._pending: started += 1 curl = self._free_list.pop() request = self._pending.popleft() headers = self.Headers() buf = BytesIO() curl.info = { 'headers': headers, 'buffer': buf, 'request': request, 'curl_start_time': time(), } self._setup_request(curl, request, buf, headers) self._multi.add_handle(curl) if not started: break def _process(self, curl, errno=None, reason=None, _pycurl=pycurl): info, curl.info = curl.info, None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info['buffer'] if errno: code = 599 error = HttpError(code, reason) error.errno = errno effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(_pycurl.HTTP_CODE) effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL) buffer.seek(0) # try: request = info['request'] request.on_ready(self.Response( request=request, code=code, headers=info['headers'], buffer=buffer, effective_url=effective_url, error=error, )) def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl): setopt = curl.setopt setopt(_pycurl.URL, bytes_to_str(request.url)) # see tornado curl client request.headers.setdefault('Expect', '') request.headers.setdefault('Pragma', '') setopt( _pycurl.HTTPHEADER, ['{}: {}'.format(*h) for h in request.headers.items()], ) setopt( _pycurl.HEADERFUNCTION, partial(request.on_header or self.on_header, request.headers), ) setopt( _pycurl.WRITEFUNCTION, request.on_stream or buffer.write, ) setopt( _pycurl.FOLLOWLOCATION, request.follow_redirects, ) setopt( _pycurl.USERAGENT, bytes_to_str(request.user_agent or DEFAULT_USER_AGENT), ) if request.network_interface: setopt(_pycurl.INTERFACE, request.network_interface) setopt( _pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none', ) if request.proxy_host: if not request.proxy_port: raise ValueError('Request with proxy_host but no proxy_port') setopt(_pycurl.PROXY, request.proxy_host) setopt(_pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format( request.proxy_username, request.proxy_password or '')) else: setopt(_pycurl.PROXY, '') curl.unsetopt(_pycurl.PROXYUSERPWD) setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0) setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0) if request.ca_certs is not None: setopt(_pycurl.CAINFO, request.ca_certs) setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) for meth in METH_TO_CURL.values(): setopt(meth, False) try: meth = METH_TO_CURL[request.method] except KeyError: curl.setopt(_pycurl.CUSTOMREQUEST, request.method) else: curl.unsetopt(_pycurl.CUSTOMREQUEST) setopt(meth, True) if request.method in ('POST', 'PUT'): body = request.body.encode('utf-8') if request.body else bytes() reqbuffer = BytesIO(body) setopt(_pycurl.READFUNCTION, reqbuffer.read) if request.method == 'POST': def ioctl(cmd): if cmd == _pycurl.IOCMD_RESTARTREAD: reqbuffer.seek(0) setopt(_pycurl.IOCTLFUNCTION, ioctl) setopt(_pycurl.POSTFIELDSIZE, len(body)) else: setopt(_pycurl.INFILESIZE, len(body)) elif request.method == 'GET': assert not request.body if request.auth_username is not None: auth_mode = { 'basic': _pycurl.HTTPAUTH_BASIC, 'digest': _pycurl.HTTPAUTH_DIGEST }[request.auth_mode or 'basic'] setopt(_pycurl.HTTPAUTH, auth_mode) userpwd = '{}:{}'.format( request.auth_username, request.auth_password or '', ) setopt(_pycurl.USERPWD, userpwd) else: curl.unsetopt(_pycurl.USERPWD) if request.client_cert is not None: setopt(_pycurl.SSLCERT, request.client_cert) if request.client_key is not None: setopt(_pycurl.SSLKEY, request.client_key) if request.on_prepare is not None: request.on_prepare(curl)
from unittest import TestCase import pandas as pd from scattertext.CorpusFromParsedDocuments import CorpusFromParsedDocuments from scattertext.WhitespaceNLP import whitespace_nlp from scattertext.representations.Word2VecFromParsedCorpus import GensimPhraseAdder from scattertext.test.test_corpusFromPandas import get_docs_categories class TestGensimPhraseAdder(TestCase): @classmethod def setUp(cls): cls.categories, cls.documents = get_docs_categories() cls.parsed_docs = [] for doc in cls.documents: cls.parsed_docs.append(whitespace_nlp(doc)) cls.df = pd.DataFrame({'category': cls.categories, 'author': ['a', 'a', 'c', 'c', 'c', 'c', 'd', 'd', 'e', 'e'], 'parsed': cls.parsed_docs, 'document_lengths': [len(doc) for doc in cls.documents]}) cls.corpus = CorpusFromParsedDocuments(cls.df, 'category', 'parsed').build() def test_add_phrase(self): adder = GensimPhraseAdder() # to do #res = adder.add_phrases(self.corpus) # self.fail()
import logging SUPPORTED_SCALING_FACTORS = [(7, 8), (3, 4), (5, 8), (1, 2), (3, 8), (1, 4), (1, 8)] _LOGGER = logging.getLogger(__name__) def scale_jpeg_camera_image(cam_image, width, height): """Scale a camera image as close as possible to one of the supported scaling factors.""" turbo_jpeg = TurboJPEGSingleton.instance() if not turbo_jpeg: return cam_image.content (current_width, current_height, _, _) = turbo_jpeg.decode_header(cam_image.content) if current_width <= width or current_height <= height: return cam_image.content ratio = width / current_width scaling_factor = SUPPORTED_SCALING_FACTORS[-1] for supported_sf in SUPPORTED_SCALING_FACTORS: if ratio >= (supported_sf[0] / supported_sf[1]): scaling_factor = supported_sf break return turbo_jpeg.scale_with_quality( cam_image.content, scaling_factor=scaling_factor, quality=75, ) class TurboJPEGSingleton: """ Load TurboJPEG only once. Ensures we do not log load failures each snapshot since camera image fetches happen every few seconds. """ __instance = None @staticmethod def instance(): """Singleton for TurboJPEG.""" if TurboJPEGSingleton.__instance is None: TurboJPEGSingleton() return TurboJPEGSingleton.__instance def __init__(self): """Try to create TurboJPEG only once.""" try: # TurboJPEG checks for libturbojpeg # when its created, but it imports # numpy which may or may not work so # we have to guard the import here. from turbojpeg import TurboJPEG # pylint: disable=import-outside-toplevel TurboJPEGSingleton.__instance = TurboJPEG() except Exception: # pylint: disable=broad-except _LOGGER.exception( "libturbojpeg is not installed, cameras may impact HomeKit performance" ) TurboJPEGSingleton.__instance = False
from gitless import core from . import helpers, pprint def parser(subparsers, _): """Adds the tag parser to the given subparsers object.""" desc = 'list, create, or delete tags' tag_parser = subparsers.add_parser( 'tag', help=desc, description=desc.capitalize(), aliases=['tg']) list_group = tag_parser.add_argument_group('list tags') list_group.add_argument( '-r', '--remote', help='list remote tags in addition to local tags', action='store_true') create_group = tag_parser.add_argument_group('create tags') create_group.add_argument( '-c', '--create', nargs='+', help='create tag(s)', dest='create_t', metavar='tag') create_group.add_argument( '-ci', '--commit', help='the commit to tag (only relevant if a new ' 'tag is created; defaults to the HEAD commit)', dest='ci') delete_group = tag_parser.add_argument_group('delete tags') delete_group.add_argument( '-d', '--delete', nargs='+', help='delete tag(s)', dest='delete_t', metavar='tag') tag_parser.set_defaults(func=main) def main(args, repo): is_list = bool(args.remote) is_create = bool(args.create_t or args.ci) is_delete = bool(args.delete_t) if is_list + is_create + is_delete > 1: pprint.err('Invalid flag combination') pprint.err_exp('Can only do one of list, create, or delete tags at a time') return False ret = True if args.create_t: ret = _do_create(args.create_t, args.ci or 'HEAD', repo) elif args.delete_t: ret = _do_delete(args.delete_t, repo) else: _do_list(repo, args.remote) return ret def _do_list(repo, list_remote): pprint.msg('List of tags:') pprint.exp('do gl tag -c t to create tag t') pprint.exp('do gl tag -d t to delete tag t') pprint.blank() no_tags = True for t in (repo.lookup_tag(n) for n in sorted(repo.listall_tags())): pprint.item('{0} ➜ tags {1}'.format(t, pprint.commit_str(t.commit))) no_tags = False if list_remote: for r in sorted(repo.remotes, key=lambda r: r.name): for t in (r.lookup_tag(n) for n in sorted(r.listall_tags())): pprint.item('{0} ➜ tags {1}'.format(t, pprint.commit_str(t.commit))) no_tags = False if no_tags: pprint.item('There are no tags to list') def _do_create(create_t, dp, repo): errors_found = False try: target = repo.revparse_single(dp) except KeyError: raise ValueError('Invalid commit {0}'.format(dp)) for t_name in create_t: r = repo remote_str = '' if '/' in t_name: # might want to create a remote tag maybe_remote, maybe_remote_tag = t_name.split('/', 1) if maybe_remote in repo.remotes: r = repo.remotes[maybe_remote] t_name = maybe_remote_tag conf_msg = 'Tag {0} will be created in remote repository {1}'.format( t_name, maybe_remote) if not pprint.conf_dialog(conf_msg): pprint.msg( 'Aborted: creation of tag {0} in remote repository {1}'.format( t_name, maybe_remote)) continue remote_str = ' in remote repository {0}'.format(maybe_remote) try: r.create_tag(t_name, target) pprint.ok('Created new tag {0}{1}'.format(t_name, remote_str)) except ValueError as e: pprint.err(e) errors_found = True return not errors_found def _do_delete(delete_t, repo): errors_found = False for t_name in delete_t: try: t = helpers.get_tag(t_name, repo) tag_str = 'Tag {0} will be removed'.format(t.tag_name) remote_str = '' if isinstance(t, core.RemoteTag): remote_str = 'from remote repository {0}'.format(t.remote_name) if not pprint.conf_dialog('{0} {1}'.format(tag_str, remote_str)): pprint.msg('Aborted: removal of tag {0}'.format(t)) continue t.delete() pprint.ok('Tag {0} removed successfully'.format(t)) except ValueError as e: pprint.err(e) errors_found = True return not errors_found
import warnings from typing import Awaitable, TYPE_CHECKING, Dict import discord from .commands import ( bot_has_permissions, bot_in_a_guild, has_permissions, is_owner, guildowner, guildowner_or_permissions, admin, admin_or_permissions, mod, mod_or_permissions, ) from .utils.mod import ( is_mod_or_superior as _is_mod_or_superior, is_admin_or_superior as _is_admin_or_superior, check_permissions as _check_permissions, ) if TYPE_CHECKING: from .bot import Red from .commands import Context __all__ = [ "bot_has_permissions", "bot_in_a_guild", "has_permissions", "is_owner", "guildowner", "guildowner_or_permissions", "admin", "admin_or_permissions", "mod", "mod_or_permissions", "is_mod_or_superior", "is_admin_or_superior", "check_permissions", ] def is_mod_or_superior(ctx: "Context") -> Awaitable[bool]: warnings.warn( "`redbot.core.checks.is_mod_or_superior` is deprecated and will be removed in a future " "release, please use `redbot.core.utils.mod.is_mod_or_superior` instead.", category=DeprecationWarning, stacklevel=2, ) return _is_mod_or_superior(ctx.bot, ctx.author) def is_admin_or_superior(ctx: "Context") -> Awaitable[bool]: warnings.warn( "`redbot.core.checks.is_admin_or_superior` is deprecated and will be removed in a future " "release, please use `redbot.core.utils.mod.is_admin_or_superior` instead.", category=DeprecationWarning, stacklevel=2, ) return _is_admin_or_superior(ctx.bot, ctx.author) def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> Awaitable[bool]: warnings.warn( "`redbot.core.checks.check_permissions` is deprecated and will be removed in a future " "release, please use `redbot.core.utils.mod.check_permissions`.", DeprecationWarning, stacklevel=2, ) return _check_permissions(ctx, perms)
from contextlib import contextmanager from datetime import datetime import mock from freezegun import freeze_time from paasta_tools.autoscaling import load_boost TEST_CURRENT_TIME = datetime(2020, 2, 14) @contextmanager def patch_zk_client(mock_values=None): with mock.patch( "paasta_tools.utils.KazooClient", autospec=True ) as mock_client, mock.patch( "paasta_tools.utils.load_system_paasta_config", autospec=True ): def mock_get(key): if not mock_values or key not in mock_values: raise load_boost.NoNodeError return (mock_values[key], None) mock_client.return_value = mock.Mock(get=mock_get) yield mock_client() def test_get_zk_cluster_boost_path(): fake_region = "westeros-1" fake_pool = "default" expected_result = "/paasta_cluster_autoscaler/westeros-1/default/boost" assert ( load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) == expected_result ) def test_get_boost_values(): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) fake_end_time = 12345.0 fake_boost_factor = 1.5 fake_expected_load = 80 with patch_zk_client( { base_path + "/end_time": str(fake_end_time).encode("utf-8"), base_path + "/factor": str(fake_boost_factor).encode("utf-8"), base_path + "/expected_load": str(fake_expected_load).encode("utf-8"), } ) as mock_zk_client: assert load_boost.get_boost_values( zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost", zk=mock_zk_client, ) == load_boost.BoostValues( end_time=fake_end_time, boost_factor=fake_boost_factor, expected_load=fake_expected_load, ) def test_get_boost_values_when_no_values_exist(): fake_region = "westeros-1" fake_pool = "default" with patch_zk_client() as mock_zk_client: assert load_boost.get_boost_values( zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost", zk=mock_zk_client, ) == load_boost.BoostValues(end_time=0, boost_factor=1.0, expected_load=0) @freeze_time(TEST_CURRENT_TIME) def test_set_boost_factor_with_defaults(): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) with patch_zk_client() as mock_zk_client: load_boost.set_boost_factor(base_path) expected_end_time = ( float(TEST_CURRENT_TIME.timestamp()) + 60 * load_boost.DEFAULT_BOOST_DURATION ) assert mock_zk_client.set.call_args_list == [ mock.call(base_path + "/end_time", str(expected_end_time).encode("utf-8")), mock.call( base_path + "/factor", str(load_boost.DEFAULT_BOOST_FACTOR).encode("utf-8") ), mock.call(base_path + "/expected_load", "0".encode("utf-8")), ] def test_set_boost_factor(): pass @freeze_time(TEST_CURRENT_TIME) def test_set_boost_factor_with_active_boost(): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) fake_end_time = float(TEST_CURRENT_TIME.timestamp()) + 10 fake_boost_factor = 1.5 fake_expected_load = 80 # patch zk client so that it returns an end time that # indicates an active boost with patch_zk_client( { base_path + "/end_time": str(fake_end_time).encode("utf-8"), base_path + "/factor": str(fake_boost_factor).encode("utf-8"), base_path + "/expected_load": str(fake_expected_load).encode("utf-8"), } ): # by default, set boost should not go through if there's an active boost assert not load_boost.set_boost_factor(zk_boost_path=base_path) @freeze_time(TEST_CURRENT_TIME) def test_set_boost_factor_with_active_boost_override(): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) fake_end_time = float(TEST_CURRENT_TIME.timestamp()) + 10 fake_boost_factor = 1.5 fake_expected_load = 80 mock_boost_values = { base_path + "/end_time": str(fake_end_time).encode("utf-8"), base_path + "/factor": str(fake_boost_factor).encode("utf-8"), base_path + "/expected_load": str(fake_expected_load).encode("utf-8"), } # patch zk client so that it returns an end time that # indicates an active boost with patch_zk_client(mock_boost_values) as mock_zk_client: # we need the zk.set to actually override the initial mocked values def mock_set(key, value): mock_boost_values[key] = value mock_zk_client.set = mock_set # set boost will go through with an active boost if override is toggled on assert load_boost.set_boost_factor( zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost", override=True, ) @freeze_time(TEST_CURRENT_TIME) def test_clear_boost(): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) with patch_zk_client() as mock_zk_client: load_boost.clear_boost(base_path, region=fake_region, pool=fake_pool) expected_end_time = float(TEST_CURRENT_TIME.timestamp()) assert mock_zk_client.set.call_args_list == [ mock.call(base_path + "/end_time", str(expected_end_time).encode("utf-8")), mock.call(base_path + "/factor", "1".encode("utf-8")), mock.call(base_path + "/expected_load", "0".encode("utf-8")), ] @mock.patch("paasta_tools.autoscaling.load_boost.clusterman_metrics", autospec=True) @mock.patch( "paasta_tools.autoscaling.load_boost.load_system_paasta_config", autospec=True ) @freeze_time(TEST_CURRENT_TIME) def test_send_clusterman_metrics( mock_load_system_paasta_config, mock_clusterman_metrics ): fake_region = "westeros-1" fake_pool = "default" base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) mock_load_system_paasta_config.return_value.get_cluster.return_value = ( "westeros-prod" ) mock_clusterman_metrics.generate_key_with_dimensions = ( lambda key, dims: f"{key}|{dims}" ) mock_writer = ( mock_clusterman_metrics.ClustermanMetricsBotoClient().get_writer().__enter__() ) with patch_zk_client(): load_boost.set_boost_factor( zk_boost_path=base_path, region=fake_region, pool=fake_pool, factor=1.3, duration_minutes=10, ) expected_metrics_dimensions = {"cluster": "westeros-prod", "pool": "default"} expected_metrics_key = f"boost_factor|{expected_metrics_dimensions}" assert mock_writer.send.call_args_list == [ mock.call((expected_metrics_key, TEST_CURRENT_TIME.timestamp(), 1.3)), mock.call((expected_metrics_key, TEST_CURRENT_TIME.timestamp() + 10 * 60, 1.0)), ]
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from absl.testing import absltest # This stanza exercises setting $TEST_RANDOMIZE_ORDERING_SEED *after* importing # the absltest library. if os.environ.get('LATE_SET_TEST_RANDOMIZE_ORDERING_SEED', ''): os.environ['TEST_RANDOMIZE_ORDERING_SEED'] = os.environ[ 'LATE_SET_TEST_RANDOMIZE_ORDERING_SEED'] class ClassA(absltest.TestCase): def test_a(self): sys.stderr.write('\nclass A test A\n') def test_b(self): sys.stderr.write('\nclass A test B\n') def test_c(self): sys.stderr.write('\nclass A test C\n') if __name__ == '__main__': absltest.main()
from flexx import app, ui class Red(ui.Widget): CSS = '.flx-Red { background: #ff0000;}' class Deep1(ui.Widget): # This was broken on Chrome earlier def init(self): with ui.VBox(): ui.Label(text='Widget in a vbox in a widget in a vbox') with ui.VBox(flex=1): with ui.Widget(flex=1): with ui.VBox(): ui.Label(text='---') Red(flex=1) class Deep2(ui.Widget): def init(self): with ui.VBox(): ui.Label(text='Widget in a vbox in a vbox in a vbox') with ui.VBox(flex=1): with ui.VBox(flex=1): ui.Label(text='---') Red(flex=1) class Deep3(ui.Widget): def init(self): with ui.VBox(): ui.Label(text='Widget in a vbox in a hbox in a vbox') with ui.HBox(flex=1): ui.Label(text='|||') with ui.VBox(flex=1): ui.Label(text='---') Red(flex=1) class Deep4(ui.Widget): def init(self): with ui.HBox(): ui.Label(text='Widget in a hbox in a widget in a hbox') with ui.HBox(flex=1): with ui.Widget(flex=1): with ui.HBox(): ui.Label(text='|||') Red(flex=1) class Deep5(ui.Widget): def init(self): with ui.HBox(): ui.Label(text='Widget in a hbox in a hbox in a hbox') with ui.HBox(flex=1): with ui.HBox(flex=1): ui.Label(text='|||') Red(flex=1) class Deep6(ui.Widget): def init(self): with ui.HBox(): ui.Label(text='Widget in a hbox in a vbox in a hbox') with ui.VBox(flex=1): ui.Label(text='---') with ui.HBox(flex=1): ui.Label(text='|||') Red(flex=1) class Deep(ui.Widget): def init(self): with ui.HFix(): with ui.HFix(): Deep1() Deep2() Deep3() with ui.VFix(): Deep4() Deep5() Deep6() if __name__ == '__main__': app.launch(Deep, 'app') app.run()
from ...common.interfaces import AbstractPlugin class Plugin(AbstractPlugin): SECTION = 'rcassert' def __init__(self, core, cfg, name): AbstractPlugin.__init__(self, core, cfg, name) self.ok_codes = [] self.fail_code = 10 @staticmethod def get_key(): return __file__ def get_available_options(self): return ["pass", "fail_code"] def configure(self): codes = self.get_option("pass", '').split(' ') for code in codes: if code: self.ok_codes.append(int(code)) self.fail_code = int(self.get_option("fail_code", self.fail_code)) def post_process(self, retcode): if not self.ok_codes: return retcode for code in self.ok_codes: self.log.debug("Comparing %s with %s codes", code, retcode) if code == int(retcode): self.log.info( "Exit code %s was changed to 0 by RCAssert plugin", code) return 0 self.log.info( "Changing exit code to %s because RCAssert pass list was unsatisfied", self.fail_code) return self.fail_code
import pandas as pd import pytz from qstrader.system.rebalance.rebalance import Rebalance class EndOfMonthRebalance(Rebalance): """ Generates a list of rebalance timestamps for pre- or post-market, for the final calendar day of the month between the starting and ending dates provided. All timestamps produced are set to UTC. Parameters ---------- start_dt : `pd.Timestamp` The starting datetime of the rebalance range. end_dt : `pd.Timestamp` The ending datetime of the rebalance range. pre_market : `Boolean`, optional Whether to carry out the rebalance at market open/close on the final day of the month. Defaults to False, i.e at market close. """ def __init__( self, start_dt, end_dt, pre_market=False ): self.start_dt = start_dt self.end_dt = end_dt self.market_time = self._set_market_time(pre_market) self.rebalances = self._generate_rebalances() def _set_market_time(self, pre_market): """ Determines whether to use market open or market close as the rebalance time. Parameters ---------- pre_market : `Boolean` Whether the rebalance is carried out at market open/close. Returns ------- `str` The time string used for Pandas timestamp construction. """ return "14:30:00" if pre_market else "21:00:00" def _generate_rebalances(self): """ Utilise the Pandas date_range method to create the appropriate list of rebalance timestamps. Returns ------- `List[pd.Timestamp]` The list of rebalance timestamps. """ rebalance_dates = pd.date_range( start=self.start_dt, end=self.end_dt, freq='BM' ) rebalance_times = [ pd.Timestamp( "%s %s" % (date, self.market_time), tz=pytz.utc ) for date in rebalance_dates ] return rebalance_times
import posixpath from absl import flags from perfkitbenchmarker.linux_packages import cuda_toolkit from perfkitbenchmarker.linux_packages import nvidia_driver FLAGS = flags.FLAGS flags.DEFINE_string('tf_cpu_pip_package', 'https://anaconda.org/intel/tensorflow/1.12.0/download/' 'tensorflow-1.12.0-cp27-cp27mu-linux_x86_64.whl', 'TensorFlow CPU pip package to install. By default, PKB ' 'will install an Intel-optimized CPU build when using ' 'CPUs.') flags.DEFINE_string('tf_gpu_pip_package', 'tensorflow-gpu==1.12.0', 'TensorFlow GPU pip package to install. By default, PKB ' 'will install tensorflow-gpu==1.12 when using GPUs.') flags.DEFINE_string( 't2t_pip_package', 'tensor2tensor==1.7', 'Tensor2Tensor pip package to install. By default, PKB ' 'will install tensor2tensor==1.7 .') flags.DEFINE_string('tf_cnn_benchmarks_branch', 'cnn_tf_v1.12_compatible', 'TensorFlow CNN branchmarks branch that is compatible with ' 'A TensorFlow version.') NCCL_URL = 'https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb' NCCL_PACKAGE = 'nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb' def GetEnvironmentVars(vm): """Return a string containing TensorFlow-related environment variables. Args: vm: vm to get environment varibles Returns: string of environment variables """ env_vars = [] if nvidia_driver.CheckNvidiaGpuExists(vm): output, _ = vm.RemoteCommand('getconf LONG_BIT', should_log=True) long_bit = output.strip() lib_name = 'lib' if long_bit == '32' else 'lib64' env_vars.extend([ 'PATH=%s${PATH:+:${PATH}}' % posixpath.join(cuda_toolkit.CUDA_HOME, 'bin'), 'CUDA_HOME=%s' % cuda_toolkit.CUDA_HOME, 'LD_LIBRARY_PATH=%s${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}' % posixpath.join(cuda_toolkit.CUDA_HOME, lib_name)]) if FLAGS.aws_s3_region: env_vars.append('AWS_REGION={}'.format(FLAGS.aws_s3_region)) return ' '.join(env_vars) def GetTensorFlowVersion(vm): """Returns the version of tensorflow installed on the vm. Args: vm: the target vm on which to check the tensorflow version Returns: installed python tensorflow version as a string """ stdout, _ = vm.RemoteCommand( ('echo -e "import tensorflow\nprint(tensorflow.__version__)" | {0} python' .format(GetEnvironmentVars(vm))) ) return stdout.strip() def Install(vm): """Installs TensorFlow on the VM.""" has_gpu = nvidia_driver.CheckNvidiaGpuExists(vm) tf_pip_package = (FLAGS.tf_gpu_pip_package if has_gpu else FLAGS.tf_cpu_pip_package) if has_gpu: vm.Install('cuda_toolkit') vm.Install('nccl') vm.Install('cudnn') vm.Install('pip') vm.RemoteCommand('sudo pip install requests') vm.RemoteCommand('sudo pip install --upgrade absl-py') vm.RemoteCommand('sudo pip install --upgrade %s' % tf_pip_package, should_log=True) vm.RemoteCommand( 'sudo pip install --upgrade %s' % FLAGS.t2t_pip_package, should_log=True) vm.InstallPackages('git') _, _, retcode = vm.RemoteHostCommandWithReturnCode( 'test -d benchmarks', ignore_failure=True, suppress_warning=True) if retcode != 0: vm.RemoteCommand( 'git clone https://github.com/tensorflow/benchmarks.git', should_log=True) vm.RemoteCommand( 'cd benchmarks && git checkout {}'.format(FLAGS.tf_cnn_benchmarks_branch) ) if FLAGS.cloud == 'AWS' and FLAGS.tf_data_dir and ( not FLAGS.tf_use_local_data): vm.Install('aws_credentials') def Uninstall(vm): """Uninstalls TensorFlow on the VM.""" vm.RemoteCommand('sudo pip uninstall tensorflow', should_log=True)
from flexx import flx class Split(flx.Widget): def init(self): with flx.HSplit(): flx.Widget(style='background:#f00') with flx.VSplit(): flx.Widget(style='background:#0f0') with flx.HSplit(): flx.Widget(style='background:#ff0') with flx.VSplit(): flx.Widget(style='background:#f0f') with flx.HSplit(): flx.Widget(style='background:#0ff') flx.Widget(style='background:#00f') if __name__ == '__main__': m = flx.launch(Split) flx.run()
from datetime import timedelta import logging import voluptuous as vol from xboxapi import Client from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_API_KEY, CONF_SCAN_INTERVAL from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval _LOGGER = logging.getLogger(__name__) CONF_XUID = "xuid" ICON = "mdi:microsoft-xbox" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_XUID): vol.All(cv.ensure_list, [cv.string]), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Xbox platform.""" api = Client(api_key=config[CONF_API_KEY]) entities = [] # request profile info to check api connection response = api.api_get("profile") if not response.ok: _LOGGER.error( "Can't setup X API connection. Check your account or " "api key on xapi.us. Code: %s Description: %s ", response.status_code, response.reason, ) return users = config[CONF_XUID] interval = timedelta(minutes=1 * len(users)) interval = config.get(CONF_SCAN_INTERVAL, interval) for xuid in users: gamercard = get_user_gamercard(api, xuid) if gamercard is None: continue entities.append(XboxSensor(api, xuid, gamercard, interval)) if entities: add_entities(entities, True) def get_user_gamercard(api, xuid): """Get profile info.""" gamercard = api.gamer(gamertag="", xuid=xuid).get("gamercard") _LOGGER.debug("User gamercard: %s", gamercard) if gamercard.get("success", True) and gamercard.get("code") is None: return gamercard _LOGGER.error( "Can't get user profile %s. Error Code: %s Description: %s", xuid, gamercard.get("code", "unknown"), gamercard.get("description", "unknown"), ) return None class XboxSensor(Entity): """A class for the Xbox account.""" def __init__(self, api, xuid, gamercard, interval): """Initialize the sensor.""" self._state = None self._presence = [] self._xuid = xuid self._api = api self._gamertag = gamercard["gamertag"] self._gamerscore = gamercard["gamerscore"] self._interval = interval self._picture = gamercard["gamerpicSmallSslImagePath"] self._tier = gamercard["tier"] @property def name(self): """Return the name of the sensor.""" return self._gamertag @property def should_poll(self): """Return False as this entity has custom polling.""" return False @property def state(self): """Return the state of the sensor.""" return self._state @property def device_state_attributes(self): """Return the state attributes.""" attributes = {"gamerscore": self._gamerscore, "tier": self._tier} for device in self._presence: for title in device["titles"]: attributes[f'{device["type"]} {title["placement"]}'] = title["name"] return attributes @property def entity_picture(self): """Avatar of the account.""" return self._picture @property def icon(self): """Return the icon to use in the frontend.""" return ICON async def async_added_to_hass(self): """Start custom polling.""" @callback def async_update(event_time=None): """Update the entity.""" self.async_schedule_update_ha_state(True) async_track_time_interval(self.hass, async_update, self._interval) def update(self): """Update state data from Xbox API.""" presence = self._api.gamer(gamertag="", xuid=self._xuid).get("presence") _LOGGER.debug("User presence: %s", presence) self._state = presence["state"] self._presence = presence.get("devices", [])
from homeassistant.components.websocket_api.const import TYPE_RESULT from homeassistant.const import HTTP_NOT_FOUND from homeassistant.helpers import intent async def test_add_item(hass, sl_setup): """Test adding an item intent.""" response = await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) assert response.speech["plain"]["speech"] == "I've added beer to your shopping list" async def test_recent_items_intent(hass, sl_setup): """Test recent items.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "soda"}} ) response = await intent.async_handle(hass, "test", "HassShoppingListLastItems") assert ( response.speech["plain"]["speech"] == "These are the top 3 items on your shopping list: soda, wine, beer" ) async def test_deprecated_api_get_all(hass, hass_client, sl_setup): """Test the API.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) client = await hass_client() resp = await client.get("/api/shopping_list") assert resp.status == 200 data = await resp.json() assert len(data) == 2 assert data[0]["name"] == "beer" assert not data[0]["complete"] assert data[1]["name"] == "wine" assert not data[1]["complete"] async def test_ws_get_items(hass, hass_ws_client, sl_setup): """Test get shopping_list items websocket command.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) client = await hass_ws_client(hass) await client.send_json({"id": 5, "type": "shopping_list/items"}) msg = await client.receive_json() assert msg["success"] is True assert msg["id"] == 5 assert msg["type"] == TYPE_RESULT assert msg["success"] data = msg["result"] assert len(data) == 2 assert data[0]["name"] == "beer" assert not data[0]["complete"] assert data[1]["name"] == "wine" assert not data[1]["complete"] async def test_deprecated_api_update(hass, hass_client, sl_setup): """Test the API.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) beer_id = hass.data["shopping_list"].items[0]["id"] wine_id = hass.data["shopping_list"].items[1]["id"] client = await hass_client() resp = await client.post( f"/api/shopping_list/item/{beer_id}", json={"name": "soda"} ) assert resp.status == 200 data = await resp.json() assert data == {"id": beer_id, "name": "soda", "complete": False} resp = await client.post( f"/api/shopping_list/item/{wine_id}", json={"complete": True} ) assert resp.status == 200 data = await resp.json() assert data == {"id": wine_id, "name": "wine", "complete": True} beer, wine = hass.data["shopping_list"].items assert beer == {"id": beer_id, "name": "soda", "complete": False} assert wine == {"id": wine_id, "name": "wine", "complete": True} async def test_ws_update_item(hass, hass_ws_client, sl_setup): """Test update shopping_list item websocket command.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) beer_id = hass.data["shopping_list"].items[0]["id"] wine_id = hass.data["shopping_list"].items[1]["id"] client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "shopping_list/items/update", "item_id": beer_id, "name": "soda", } ) msg = await client.receive_json() assert msg["success"] is True data = msg["result"] assert data == {"id": beer_id, "name": "soda", "complete": False} await client.send_json( { "id": 6, "type": "shopping_list/items/update", "item_id": wine_id, "complete": True, } ) msg = await client.receive_json() assert msg["success"] is True data = msg["result"] assert data == {"id": wine_id, "name": "wine", "complete": True} beer, wine = hass.data["shopping_list"].items assert beer == {"id": beer_id, "name": "soda", "complete": False} assert wine == {"id": wine_id, "name": "wine", "complete": True} async def test_api_update_fails(hass, hass_client, sl_setup): """Test the API.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) client = await hass_client() resp = await client.post("/api/shopping_list/non_existing", json={"name": "soda"}) assert resp.status == HTTP_NOT_FOUND beer_id = hass.data["shopping_list"].items[0]["id"] resp = await client.post(f"/api/shopping_list/item/{beer_id}", json={"name": 123}) assert resp.status == 400 async def test_ws_update_item_fail(hass, hass_ws_client, sl_setup): """Test failure of update shopping_list item websocket command.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "shopping_list/items/update", "item_id": "non_existing", "name": "soda", } ) msg = await client.receive_json() assert msg["success"] is False data = msg["error"] assert data == {"code": "item_not_found", "message": "Item not found"} await client.send_json({"id": 6, "type": "shopping_list/items/update", "name": 123}) msg = await client.receive_json() assert msg["success"] is False async def test_deprecated_api_clear_completed(hass, hass_client, sl_setup): """Test the API.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) beer_id = hass.data["shopping_list"].items[0]["id"] wine_id = hass.data["shopping_list"].items[1]["id"] client = await hass_client() # Mark beer as completed resp = await client.post( f"/api/shopping_list/item/{beer_id}", json={"complete": True} ) assert resp.status == 200 resp = await client.post("/api/shopping_list/clear_completed") assert resp.status == 200 items = hass.data["shopping_list"].items assert len(items) == 1 assert items[0] == {"id": wine_id, "name": "wine", "complete": False} async def test_ws_clear_items(hass, hass_ws_client, sl_setup): """Test clearing shopping_list items websocket command.""" await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}} ) await intent.async_handle( hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}} ) beer_id = hass.data["shopping_list"].items[0]["id"] wine_id = hass.data["shopping_list"].items[1]["id"] client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "shopping_list/items/update", "item_id": beer_id, "complete": True, } ) msg = await client.receive_json() assert msg["success"] is True await client.send_json({"id": 6, "type": "shopping_list/items/clear"}) msg = await client.receive_json() assert msg["success"] is True items = hass.data["shopping_list"].items assert len(items) == 1 assert items[0] == {"id": wine_id, "name": "wine", "complete": False} async def test_deprecated_api_create(hass, hass_client, sl_setup): """Test the API.""" client = await hass_client() resp = await client.post("/api/shopping_list/item", json={"name": "soda"}) assert resp.status == 200 data = await resp.json() assert data["name"] == "soda" assert data["complete"] is False items = hass.data["shopping_list"].items assert len(items) == 1 assert items[0]["name"] == "soda" assert items[0]["complete"] is False async def test_deprecated_api_create_fail(hass, hass_client, sl_setup): """Test the API.""" client = await hass_client() resp = await client.post("/api/shopping_list/item", json={"name": 1234}) assert resp.status == 400 assert len(hass.data["shopping_list"].items) == 0 async def test_ws_add_item(hass, hass_ws_client, sl_setup): """Test adding shopping_list item websocket command.""" client = await hass_ws_client(hass) await client.send_json({"id": 5, "type": "shopping_list/items/add", "name": "soda"}) msg = await client.receive_json() assert msg["success"] is True data = msg["result"] assert data["name"] == "soda" assert data["complete"] is False items = hass.data["shopping_list"].items assert len(items) == 1 assert items[0]["name"] == "soda" assert items[0]["complete"] is False async def test_ws_add_item_fail(hass, hass_ws_client, sl_setup): """Test adding shopping_list item failure websocket command.""" client = await hass_ws_client(hass) await client.send_json({"id": 5, "type": "shopping_list/items/add", "name": 123}) msg = await client.receive_json() assert msg["success"] is False assert len(hass.data["shopping_list"].items) == 0
import quantities as pq from pylatex.quantities import _dimensionality_to_siunitx, Quantity def test_quantity(): v = 1 * pq.m/pq.s q1 = Quantity(v) assert q1.dumps() == r'\SI{1.0}{\meter\per\second}' q2 = Quantity(v, format_cb=lambda x: str(int(x))) assert q2.dumps() == r'\SI{1}{\meter\per\second}' q3 = Quantity(v, options={'zero-decimal-to-integer': 'true'}) ref = r'\SI[zero-decimal-to-integer=true]{1.0}{\meter\per\second}' assert q3.dumps() == ref def test_quantity_float(): q1 = Quantity(42.0) assert q1.dumps() == r'\num{42.0}' def test_quantity_uncertain(): t = pq.UncertainQuantity(7., pq.second, 1.) q1 = Quantity(t) assert q1.dumps() == r'\SI{7.0 +- 1.0}{\second}' def test_dimensionality_to_siunitx(): assert _dimensionality_to_siunitx((pq.volt/pq.kelvin).dimensionality) == \ r'\volt\per\Kelvin' if __name__ == '__main__': test_quantity() test_quantity_uncertain() test_dimensionality_to_siunitx()
import diamond.collector import os class NfsdCollector(diamond.collector.Collector): PROC = '/proc/net/rpc/nfsd' def get_default_config_help(self): config_help = super(NfsdCollector, self).get_default_config_help() config_help.update({ }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(NfsdCollector, self).get_default_config() config.update({ 'path': 'nfsd' }) return config def collect(self): """ Collect stats """ if os.access(self.PROC, os.R_OK): results = {} # Open file file = open(self.PROC) for line in file: line = line.split() if line[0] == 'rc': results['reply_cache.hits'] = line[1] results['reply_cache.misses'] = line[2] results['reply_cache.nocache'] = line[3] elif line[0] == 'fh': results['filehandle.stale'] = line[1] results['filehandle.total-lookups'] = line[2] results['filehandle.anonlookups'] = line[3] results['filehandle.dir-not-in-cache'] = line[4] results['filehandle.nodir-not-in-cache'] = line[5] elif line[0] == 'io': results['input_output.bytes-read'] = line[1] results['input_output.bytes-written'] = line[2] elif line[0] == 'th': results['threads.threads'] = line[1] results['threads.fullcnt'] = line[2] results['threads.10-20-pct'] = line[3] results['threads.20-30-pct'] = line[4] results['threads.30-40-pct'] = line[5] results['threads.40-50-pct'] = line[6] results['threads.50-60-pct'] = line[7] results['threads.60-70-pct'] = line[8] results['threads.70-80-pct'] = line[9] results['threads.80-90-pct'] = line[10] results['threads.90-100-pct'] = line[11] results['threads.100-pct'] = line[12] elif line[0] == 'ra': results['read-ahead.cache-size'] = line[1] results['read-ahead.10-pct'] = line[2] results['read-ahead.20-pct'] = line[3] results['read-ahead.30-pct'] = line[4] results['read-ahead.40-pct'] = line[5] results['read-ahead.50-pct'] = line[6] results['read-ahead.60-pct'] = line[7] results['read-ahead.70-pct'] = line[8] results['read-ahead.80-pct'] = line[9] results['read-ahead.90-pct'] = line[10] results['read-ahead.100-pct'] = line[11] results['read-ahead.not-found'] = line[12] elif line[0] == 'net': results['net.cnt'] = line[1] results['net.udpcnt'] = line[2] results['net.tcpcnt'] = line[3] results['net.tcpconn'] = line[4] elif line[0] == 'rpc': results['rpc.cnt'] = line[1] results['rpc.badfmt'] = line[2] results['rpc.badauth'] = line[3] results['rpc.badclnt'] = line[4] elif line[0] == 'proc2': results['v2.unknown'] = line[1] results['v2.null'] = line[2] results['v2.getattr'] = line[3] results['v2.setattr'] = line[4] results['v2.root'] = line[5] results['v2.lookup'] = line[6] results['v2.readlink'] = line[7] results['v2.read'] = line[8] results['v2.wrcache'] = line[9] results['v2.write'] = line[10] results['v2.create'] = line[11] results['v2.remove'] = line[12] results['v2.rename'] = line[13] results['v2.link'] = line[14] results['v2.symlink'] = line[15] results['v2.mkdir'] = line[16] results['v2.rmdir'] = line[17] results['v2.readdir'] = line[18] results['v2.fsstat'] = line[19] elif line[0] == 'proc3': results['v3.unknown'] = line[1] results['v3.null'] = line[2] results['v3.getattr'] = line[3] results['v3.setattr'] = line[4] results['v3.lookup'] = line[5] results['v3.access'] = line[6] results['v3.readlink'] = line[7] results['v3.read'] = line[8] results['v3.write'] = line[9] results['v3.create'] = line[10] results['v3.mkdir'] = line[11] results['v3.symlink'] = line[12] results['v3.mknod'] = line[13] results['v3.remove'] = line[14] results['v3.rmdir'] = line[15] results['v3.rename'] = line[16] results['v3.link'] = line[17] results['v3.readdir'] = line[18] results['v3.readdirplus'] = line[19] results['v3.fsstat'] = line[20] results['v3.fsinfo'] = line[21] results['v3.pathconf'] = line[22] results['v3.commit'] = line[23] elif line[0] == 'proc4': results['v4.unknown'] = line[1] results['v4.null'] = line[2] results['v4.compound'] = line[3] elif line[0] == 'proc4ops': results['v4.ops.unknown'] = line[1] results['v4.ops.op0-unused'] = line[2] results['v4.ops.op1-unused'] = line[3] results['v4.ops.op2-future'] = line[4] results['v4.ops.access'] = line[5] results['v4.ops.close'] = line[6] results['v4.ops.commit'] = line[7] results['v4.ops.create'] = line[8] results['v4.ops.delegpurge'] = line[9] results['v4.ops.delegreturn'] = line[10] results['v4.ops.getattr'] = line[11] results['v4.ops.getfh'] = line[12] results['v4.ops.link'] = line[13] results['v4.ops.lock'] = line[14] results['v4.ops.lockt'] = line[15] results['v4.ops.locku'] = line[16] results['v4.ops.lookup'] = line[17] results['v4.ops.lookup_root'] = line[18] results['v4.ops.nverify'] = line[19] results['v4.ops.open'] = line[20] results['v4.ops.openattr'] = line[21] results['v4.ops.open_conf'] = line[22] results['v4.ops.open_dgrd'] = line[23] results['v4.ops.putfh'] = line[24] results['v4.ops.putpubfh'] = line[25] results['v4.ops.putrootfh'] = line[26] results['v4.ops.read'] = line[27] results['v4.ops.readdir'] = line[28] results['v4.ops.readlink'] = line[29] results['v4.ops.remove'] = line[30] results['v4.ops.rename'] = line[31] results['v4.ops.renew'] = line[32] results['v4.ops.restorefh'] = line[33] results['v4.ops.savefh'] = line[34] results['v4.ops.secinfo'] = line[35] results['v4.ops.setattr'] = line[36] results['v4.ops.setcltid'] = line[37] results['v4.ops.setcltidconf'] = line[38] results['v4.ops.verify'] = line[39] results['v4.ops.write'] = line[40] results['v4.ops.rellockowner'] = line[41] # Close File file.close() for stat in results.keys(): metric_name = '.' + stat metric_value = long(float(results[stat])) metric_value = self.derivative(metric_name, metric_value) self.publish(metric_name, metric_value, precision=3) return True return False
YAML Metadata Warning: The task_categories "code-generation" is not in the official list: text-classification, token-classification, table-question-answering, question-answering, zero-shot-classification, translation, summarization, feature-extraction, text-generation, text2text-generation, fill-mask, sentence-similarity, text-to-speech, text-to-audio, automatic-speech-recognition, audio-to-audio, audio-classification, voice-activity-detection, depth-estimation, image-classification, object-detection, image-segmentation, text-to-image, image-to-text, image-to-image, image-to-video, unconditional-image-generation, video-classification, reinforcement-learning, robotics, tabular-classification, tabular-regression, tabular-to-text, table-to-text, multiple-choice, text-retrieval, time-series-forecasting, text-to-video, image-text-to-text, visual-question-answering, document-question-answering, zero-shot-image-classification, graph-ml, mask-generation, zero-shot-object-detection, text-to-3d, image-to-3d, image-feature-extraction, other
YAML Metadata Warning: The task_categories "conditional-text-generation" is not in the official list: text-classification, token-classification, table-question-answering, question-answering, zero-shot-classification, translation, summarization, feature-extraction, text-generation, text2text-generation, fill-mask, sentence-similarity, text-to-speech, text-to-audio, automatic-speech-recognition, audio-to-audio, audio-classification, voice-activity-detection, depth-estimation, image-classification, object-detection, image-segmentation, text-to-image, image-to-text, image-to-image, image-to-video, unconditional-image-generation, video-classification, reinforcement-learning, robotics, tabular-classification, tabular-regression, tabular-to-text, table-to-text, multiple-choice, text-retrieval, time-series-forecasting, text-to-video, image-text-to-text, visual-question-answering, document-question-answering, zero-shot-image-classification, graph-ml, mask-generation, zero-shot-object-detection, text-to-3d, image-to-3d, image-feature-extraction, other
YAML Metadata Warning: The task_ids "code-generation" is not in the official list: acceptability-classification, entity-linking-classification, fact-checking, intent-classification, language-identification, multi-class-classification, multi-label-classification, multi-input-text-classification, natural-language-inference, semantic-similarity-classification, sentiment-classification, topic-classification, semantic-similarity-scoring, sentiment-scoring, sentiment-analysis, hate-speech-detection, text-scoring, named-entity-recognition, part-of-speech, parsing, lemmatization, word-sense-disambiguation, coreference-resolution, extractive-qa, open-domain-qa, closed-domain-qa, news-articles-summarization, news-articles-headline-generation, dialogue-modeling, dialogue-generation, conversational, language-modeling, text-simplification, explanation-generation, abstractive-qa, open-domain-abstractive-qa, closed-domain-qa, open-book-qa, closed-book-qa, slot-filling, masked-language-modeling, keyword-spotting, speaker-identification, audio-intent-classification, audio-emotion-recognition, audio-language-identification, multi-label-image-classification, multi-class-image-classification, face-detection, vehicle-detection, instance-segmentation, semantic-segmentation, panoptic-segmentation, image-captioning, image-inpainting, image-colorization, super-resolution, grasping, task-planning, tabular-multi-class-classification, tabular-multi-label-classification, tabular-single-column-regression, rdf-to-text, multiple-choice-qa, multiple-choice-coreference-resolution, document-retrieval, utterance-retrieval, entity-linking-retrieval, fact-checking-retrieval, univariate-time-series-forecasting, multivariate-time-series-forecasting, visual-question-answering, document-question-answering

Dataset Card for notional-python

Dataset Summary

The Notional-python dataset contains python code files from 100 well-known repositories gathered from Google Bigquery Github Dataset. The dataset was created to test the ability of programming language models. Follow our repo to do the model evaluation using notional-python dataset.

Languages

Python

Dataset Creation

Curation Rationale

Notional-python was built to provide a dataset for testing the ability of the machine to generate python code.

Source Data

Initial Data Collection and Normalization

The data was obtained by filtering code from Google Bigquery Github data In order to improve the quality of the dataset, only python code files that meet the below conditions are added to the dataset:

  • Code with more than 60% of executable lines
  • Code with logic, not config files or comment-only files
  • Code with more than 30% of attribute declaration lines (E.G.: Some files contain just only class names and their class attributes, usually used for configuration of the project, these files were not selected)
  • Code without TODO and FIXME.

Who are the source language producers?

The producers are users of github.

Downloads last month
70
Edit dataset card