index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
4,991
cerberus.validator
_resolve_rules_set
null
def _resolve_rules_set(self, rules_set): if isinstance(rules_set, Mapping): return rules_set elif isinstance(rules_set, _str_type): return self.rules_set_registry.get(rules_set) return None
(self, rules_set)
4,992
cerberus.validator
_resolve_schema
null
def _resolve_schema(self, schema): if isinstance(schema, Mapping): return schema elif isinstance(schema, _str_type): return self.schema_registry.get(schema) return None
(self, schema)
4,993
cerberus.validator
_validate_allof
{'type': 'list', 'logical': 'allof'}
def _validate_allof(self, definitions, field, value): """{'type': 'list', 'logical': 'allof'}""" valids, _errors = self.__validate_logical('allof', definitions, field, value) if valids < len(definitions): self._error(field, errors.ALLOF, _errors, valids, len(definitions))
(self, definitions, field, value)
4,994
cerberus.validator
dummy
{'oneof': [{'type': 'boolean'}, {'type': ['dict', 'string'], 'check_with': 'bulk_schema'}]}
def dummy_for_rule_validation(rule_constraints): def dummy(self, constraint, field, value): raise RuntimeError( 'Dummy method called. Its purpose is to hold just' 'validation constraints for a rule in its ' 'docstring.' ) f = dummy f.__doc__ = rule_constraints return f
(self, constraint, field, value)
4,995
cerberus.validator
_validate_allowed
{'type': 'container'}
def _validate_allowed(self, allowed_values, field, value): """{'type': 'container'}""" if isinstance(value, Iterable) and not isinstance(value, _str_type): unallowed = tuple(x for x in value if x not in allowed_values) if unallowed: self._error(field, errors.UNALLOWED_VALUES, unallowed) else: if value not in allowed_values: self._error(field, errors.UNALLOWED_VALUE, value)
(self, allowed_values, field, value)
4,996
cerberus.validator
_validate_anyof
{'type': 'list', 'logical': 'anyof'}
def _validate_anyof(self, definitions, field, value): """{'type': 'list', 'logical': 'anyof'}""" valids, _errors = self.__validate_logical('anyof', definitions, field, value) if valids < 1: self._error(field, errors.ANYOF, _errors, valids, len(definitions))
(self, definitions, field, value)
4,997
cerberus.validator
_validate_check_with
{'oneof': [ {'type': 'callable'}, {'type': 'list', 'schema': {'oneof': [{'type': 'callable'}, {'type': 'string'}]}}, {'type': 'string'} ]}
def _validate_check_with(self, checks, field, value): """ {'oneof': [ {'type': 'callable'}, {'type': 'list', 'schema': {'oneof': [{'type': 'callable'}, {'type': 'string'}]}}, {'type': 'string'} ]} """ if isinstance(checks, _str_type): try: value_checker = self.__get_rule_handler('check_with', checks) # TODO remove on next major release except RuntimeError: value_checker = self.__get_rule_handler('validator', checks) warn( "The 'validator' rule was renamed to 'check_with'. Please update " "your schema and method names accordingly.", DeprecationWarning, ) value_checker(field, value) elif isinstance(checks, Iterable): for v in checks: self._validate_check_with(v, field, value) else: checks(field, value, self._error)
(self, checks, field, value)
4,998
cerberus.validator
_validate_contains
{'empty': False }
def _validate_contains(self, expected_values, field, value): """{'empty': False }""" if not isinstance(value, Iterable): return if not isinstance(expected_values, Iterable) or isinstance( expected_values, _str_type ): expected_values = set((expected_values,)) else: expected_values = set(expected_values) missing_values = expected_values - set(value) if missing_values: self._error(field, errors.MISSING_MEMBERS, missing_values)
(self, expected_values, field, value)
4,999
cerberus.validator
_validate_dependencies
{'type': ('dict', 'hashable', 'list'), 'check_with': 'dependencies'}
def _validate_dependencies(self, dependencies, field, value): """{'type': ('dict', 'hashable', 'list'), 'check_with': 'dependencies'}""" if isinstance(dependencies, _str_type) or not isinstance( dependencies, (Iterable, Mapping) ): dependencies = (dependencies,) if isinstance(dependencies, Sequence): self.__validate_dependencies_sequence(dependencies, field) elif isinstance(dependencies, Mapping): self.__validate_dependencies_mapping(dependencies, field) if ( self.document_error_tree.fetch_node_from( self.schema_path + (field, 'dependencies') ) is not None ): return True
(self, dependencies, field, value)
5,000
cerberus.validator
_validate_empty
{'type': 'boolean'}
def _validate_empty(self, empty, field, value): """{'type': 'boolean'}""" if isinstance(value, Sized) and len(value) == 0: self._drop_remaining_rules( 'allowed', 'forbidden', 'items', 'minlength', 'maxlength', 'regex', 'check_with', ) if not empty: self._error(field, errors.EMPTY_NOT_ALLOWED)
(self, empty, field, value)
5,001
cerberus.validator
_validate_excludes
{'type': ('hashable', 'list'), 'schema': {'type': 'hashable'}}
def _validate_excludes(self, excluded_fields, field, value): """{'type': ('hashable', 'list'), 'schema': {'type': 'hashable'}}""" if isinstance(excluded_fields, Hashable): excluded_fields = [excluded_fields] # Mark the currently evaluated field as not required for now if it actually is. # One of the so marked will be needed to pass when required fields are checked. if self.schema[field].get('required', self.require_all): self._unrequired_by_excludes.add(field) for excluded_field in excluded_fields: if excluded_field in self.schema and self.schema[field].get( 'required', self.require_all ): self._unrequired_by_excludes.add(excluded_field) if any(excluded_field in self.document for excluded_field in excluded_fields): exclusion_str = ', '.join( "'{0}'".format(field) for field in excluded_fields ) self._error(field, errors.EXCLUDES_FIELD, exclusion_str)
(self, excluded_fields, field, value)
5,002
cerberus.validator
_validate_forbidden
{'type': 'list'}
def _validate_forbidden(self, forbidden_values, field, value): """{'type': 'list'}""" if isinstance(value, Sequence) and not isinstance(value, _str_type): forbidden = set(value) & set(forbidden_values) if forbidden: self._error(field, errors.FORBIDDEN_VALUES, list(forbidden)) else: if value in forbidden_values: self._error(field, errors.FORBIDDEN_VALUE, value)
(self, forbidden_values, field, value)
5,003
cerberus.validator
_validate_items
{'type': 'list', 'check_with': 'items'}
def _validate_items(self, items, field, values): """{'type': 'list', 'check_with': 'items'}""" if len(items) != len(values): self._error(field, errors.ITEMS_LENGTH, len(items), len(values)) else: schema = dict( (i, definition) for i, definition in enumerate(items) ) # noqa: E501 validator = self._get_child_validator( document_crumb=field, schema_crumb=(field, 'items'), # noqa: E501 schema=schema, ) if not validator( dict((i, value) for i, value in enumerate(values)), update=self.update, normalize=False, ): self._error(field, errors.BAD_ITEMS, validator._errors)
(self, items, field, values)
5,004
cerberus.validator
_validate_keysrules
{'type': ['dict', 'string'], 'check_with': 'bulk_schema', 'forbidden': ['rename', 'rename_handler']}
def _validate_keysrules(self, schema, field, value): """ {'type': ['dict', 'string'], 'check_with': 'bulk_schema', 'forbidden': ['rename', 'rename_handler']} """ if isinstance(value, Mapping): validator = self._get_child_validator( document_crumb=field, schema_crumb=(field, 'keysrules'), schema=dict(((k, schema) for k in value.keys())), ) if not validator(dict(((k, k) for k in value.keys())), normalize=False): self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4]) self._error(field, errors.KEYSRULES, validator._errors)
(self, schema, field, value)
5,005
cerberus.validator
_validate_max
{'nullable': False }
def _validate_max(self, max_value, field, value): """{'nullable': False }""" try: if value > max_value: self._error(field, errors.MAX_VALUE) except TypeError: pass
(self, max_value, field, value)
5,006
cerberus.validator
_validate_maxlength
{'type': 'integer'}
def _validate_maxlength(self, max_length, field, value): """{'type': 'integer'}""" if isinstance(value, Iterable) and len(value) > max_length: self._error(field, errors.MAX_LENGTH, len(value))
(self, max_length, field, value)
5,007
cerberus.validator
dummy
def dummy_for_rule_validation(rule_constraints): def dummy(self, constraint, field, value): raise RuntimeError( 'Dummy method called. Its purpose is to hold just' 'validation constraints for a rule in its ' 'docstring.' ) f = dummy f.__doc__ = rule_constraints return f
(self, constraint, field, value)
5,008
cerberus.validator
_validate_min
{'nullable': False }
def _validate_min(self, min_value, field, value): """{'nullable': False }""" try: if value < min_value: self._error(field, errors.MIN_VALUE) except TypeError: pass
(self, min_value, field, value)
5,009
cerberus.validator
_validate_minlength
{'type': 'integer'}
def _validate_minlength(self, min_length, field, value): """{'type': 'integer'}""" if isinstance(value, Iterable) and len(value) < min_length: self._error(field, errors.MIN_LENGTH, len(value))
(self, min_length, field, value)
5,010
cerberus.validator
_validate_noneof
{'type': 'list', 'logical': 'noneof'}
def _validate_noneof(self, definitions, field, value): """{'type': 'list', 'logical': 'noneof'}""" valids, _errors = self.__validate_logical('noneof', definitions, field, value) if valids > 0: self._error(field, errors.NONEOF, _errors, valids, len(definitions))
(self, definitions, field, value)
5,011
cerberus.validator
_validate_nullable
{'type': 'boolean'}
def _validate_nullable(self, nullable, field, value): """{'type': 'boolean'}""" if value is None: if not nullable: self._error(field, errors.NOT_NULLABLE) self._drop_remaining_rules( "allof", 'allowed', "anyof", 'empty', 'forbidden', 'items', 'keysrules', 'min', 'max', 'minlength', 'maxlength', "noneof", "oneof", 'regex', 'schema', 'type', 'valuesrules', )
(self, nullable, field, value)
5,012
cerberus.validator
_validate_oneof
{'type': 'list', 'logical': 'oneof'}
def _validate_oneof(self, definitions, field, value): """{'type': 'list', 'logical': 'oneof'}""" valids, _errors = self.__validate_logical('oneof', definitions, field, value) if valids != 1: self._error(field, errors.ONEOF, _errors, valids, len(definitions))
(self, definitions, field, value)
5,013
cerberus.validator
_validate_readonly
{'type': 'boolean'}
def _validate_readonly(self, readonly, field, value): """{'type': 'boolean'}""" if readonly: if not self._is_normalized: self._error(field, errors.READONLY_FIELD) # If the document was normalized (and therefore already been # checked for readonly fields), we still have to return True # if an error was filed. has_error = ( errors.READONLY_FIELD in self.document_error_tree.fetch_errors_from( self.document_path + (field,) ) ) if self._is_normalized and has_error: self._drop_remaining_rules()
(self, readonly, field, value)
5,014
cerberus.validator
_validate_regex
{'type': 'string'}
def _validate_regex(self, pattern, field, value): """{'type': 'string'}""" if not isinstance(value, _str_type): return if not pattern.endswith('$'): pattern += '$' re_obj = re.compile(pattern) if not re_obj.match(value): self._error(field, errors.REGEX_MISMATCH)
(self, pattern, field, value)
5,015
cerberus.validator
dummy
{'type': 'boolean'}
def dummy_for_rule_validation(rule_constraints): def dummy(self, constraint, field, value): raise RuntimeError( 'Dummy method called. Its purpose is to hold just' 'validation constraints for a rule in its ' 'docstring.' ) f = dummy f.__doc__ = rule_constraints return f
(self, constraint, field, value)
5,017
cerberus.validator
_validate_schema
{'type': ['dict', 'string'], 'anyof': [{'check_with': 'schema'}, {'check_with': 'bulk_schema'}]}
def _validate_schema(self, schema, field, value): """ {'type': ['dict', 'string'], 'anyof': [{'check_with': 'schema'}, {'check_with': 'bulk_schema'}]} """ if schema is None: return if isinstance(value, Sequence) and not isinstance(value, _str_type): self.__validate_schema_sequence(field, schema, value) elif isinstance(value, Mapping): self.__validate_schema_mapping(field, schema, value)
(self, schema, field, value)
5,018
cerberus.validator
_validate_type
{'type': ['string', 'list'], 'check_with': 'type'}
def _validate_type(self, data_type, field, value): """ {'type': ['string', 'list'], 'check_with': 'type'} """ if not data_type: return types = (data_type,) if isinstance(data_type, _str_type) else data_type for _type in types: # TODO remove this block on next major release # this implementation still supports custom type validation methods type_definition = self.types_mapping.get(_type) if type_definition is not None: matched = isinstance( value, type_definition.included_types ) and not isinstance(value, type_definition.excluded_types) else: type_handler = self.__get_rule_handler('validate_type', _type) matched = type_handler(value) if matched: return # TODO uncomment this block on next major release # when _validate_type_* methods were deprecated: # type_definition = self.types_mapping[_type] # if isinstance(value, type_definition.included_types) \ # and not isinstance(value, type_definition.excluded_types): # noqa 501 # return self._error(field, errors.BAD_TYPE) self._drop_remaining_rules()
(self, data_type, field, value)
5,019
cerberus.validator
_validate_valuesrules
{'type': ['dict', 'string'], 'check_with': 'bulk_schema', 'forbidden': ['rename', 'rename_handler']}
def _validate_valuesrules(self, schema, field, value): """ {'type': ['dict', 'string'], 'check_with': 'bulk_schema', 'forbidden': ['rename', 'rename_handler']} """ schema_crumb = (field, 'valuesrules') if isinstance(value, Mapping): validator = self._get_child_validator( document_crumb=field, schema_crumb=schema_crumb, schema=dict((k, schema) for k in value), ) validator(value, update=self.update, normalize=False) if validator._errors: self._drop_nodes_from_errorpaths(validator._errors, [], [2]) self._error(field, errors.VALUESRULES, validator._errors)
(self, schema, field, value)
5,020
cerberus.validator
normalized
Returns the document normalized according to the specified rules of a schema. :param document: The document to normalize. :type document: any :term:`mapping` :param schema: The validation schema. Defaults to :obj:`None`. If not provided here, the schema must have been provided at class instantiation. :type schema: any :term:`mapping` :param always_return_document: Return the document, even if an error occurred. Defaults to: ``False``. :type always_return_document: :class:`bool` :return: A normalized copy of the provided mapping or :obj:`None` if an error occurred during normalization.
def normalized(self, document, schema=None, always_return_document=False): """ Returns the document normalized according to the specified rules of a schema. :param document: The document to normalize. :type document: any :term:`mapping` :param schema: The validation schema. Defaults to :obj:`None`. If not provided here, the schema must have been provided at class instantiation. :type schema: any :term:`mapping` :param always_return_document: Return the document, even if an error occurred. Defaults to: ``False``. :type always_return_document: :class:`bool` :return: A normalized copy of the provided mapping or :obj:`None` if an error occurred during normalization. """ self.__init_processing(document, schema) self.__normalize_mapping(self.document, self.schema) self.error_handler.end(self) if self._errors and not always_return_document: return None else: return self.document
(self, document, schema=None, always_return_document=False)
5,022
cerberus.validator
validated
Wrapper around :meth:`~cerberus.Validator.validate` that returns the normalized and validated document or :obj:`None` if validation failed.
def validated(self, *args, **kwargs): """ Wrapper around :meth:`~cerberus.Validator.validate` that returns the normalized and validated document or :obj:`None` if validation failed. """ always_return_document = kwargs.pop('always_return_document', False) self.validate(*args, **kwargs) if self._errors and not always_return_document: return None else: return self.document
(self, *args, **kwargs)
5,029
jsii._runtime
JSIIAbstractClass
null
class JSIIAbstractClass(abc.ABCMeta, JSIIMeta): pass
(name, bases, namespace, **kwargs)
5,030
jsii._runtime
__call__
null
def __call__(cls: Type[M], *args: Any, **kwargs) -> M: # There is no way to constrain the metaclass of a `Type[M]` hint today, so we have to # perform a `cast` trick here in order for MyPy to accept this code as valid... The implicit # arguments to `super()` otherwise are `super(__class__, cls)`, which results in an error. inst = super(JSIIMeta, cast(JSIIMeta, cls)).__call__(*args, **kwargs) # Register this instance with our reference map. _reference_map.register_reference(inst) return inst
(cls: Type[~M], *args: Any, **kwargs) -> ~M
5,031
abc
__instancecheck__
Override for isinstance(instance, cls).
def __instancecheck__(cls, instance): """Override for isinstance(instance, cls).""" return _abc_instancecheck(cls, instance)
(cls, instance)
5,032
abc
__new__
null
def __new__(mcls, name, bases, namespace, **kwargs): cls = super().__new__(mcls, name, bases, namespace, **kwargs) _abc_init(cls) return cls
(mcls, name, bases, namespace, **kwargs)
5,033
jsii.python
__setattr__
null
def __setattr__(self, key: str, value: Any) -> None: obj = getattr(self, key, None) if isinstance(obj, _ClassProperty): return obj.__set__(self, value) return super().__setattr__(key, value)
(self, key: str, value: Any) -> NoneType
5,034
abc
__subclasscheck__
Override for issubclass(subclass, cls).
def __subclasscheck__(cls, subclass): """Override for issubclass(subclass, cls).""" return _abc_subclasscheck(cls, subclass)
(cls, subclass)
5,035
abc
_abc_caches_clear
Clear the caches (for debugging or testing).
def _abc_caches_clear(cls): """Clear the caches (for debugging or testing).""" _reset_caches(cls)
(cls)
5,036
abc
_abc_registry_clear
Clear the registry (for debugging or testing).
def _abc_registry_clear(cls): """Clear the registry (for debugging or testing).""" _reset_registry(cls)
(cls)
5,037
abc
_dump_registry
Debug helper to print the ABC registry.
def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) print(f"Inv. counter: {get_cache_token()}", file=file) (_abc_registry, _abc_cache, _abc_negative_cache, _abc_negative_cache_version) = _get_dump(cls) print(f"_abc_registry: {_abc_registry!r}", file=file) print(f"_abc_cache: {_abc_cache!r}", file=file) print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", file=file)
(cls, file=None)
5,038
abc
register
Register a virtual subclass of an ABC. Returns the subclass, to allow usage as a class decorator.
def register(cls, subclass): """Register a virtual subclass of an ABC. Returns the subclass, to allow usage as a class decorator. """ return _abc_register(cls, subclass)
(cls, subclass)
5,039
jsii._runtime
JSIIAssembly
null
class JSIIAssembly: name: str version: str module: str filename: str @classmethod def load(cls, *args, _kernel=kernel, **kwargs) -> "JSIIAssembly": # Our object here really just acts as a record for our JSIIAssembly, it doesn't # offer any functionality itself, besides this class method that will trigger # the loading of the given assembly in the JSII Kernel. assembly = cls(*args, **kwargs) # Actually load the assembly into the kernel, we're using the # importlib.resources API here instead of manually constructing the path, in # the hopes that this will make JSII modules able to be used with zipimport # instead of only on the FS. with importlib_resources.as_file( importlib_resources.files(f"{assembly.module}._jsii").joinpath( assembly.filename ) ) as assembly_path: _kernel.load(assembly.name, assembly.version, os.fspath(assembly_path)) # Give our record of the assembly back to the caller. return assembly @classmethod def invokeBinScript( cls, pkgname: str, script: str, args: Optional[Sequence[str]] = None, _kernel=kernel, ) -> int: if args is None: args = [] response = _kernel.getBinScriptCommand(pkgname, script, args) result = subprocess.run( " ".join([response.command, *response.args]), encoding="utf-8", shell=True, env=response.env, ) return result.returncode
(name: str, version: str, module: str, filename: str) -> None
5,040
attr._make
_frozen_delattrs
Attached to frozen classes as __delattr__.
def _frozen_delattrs(self, name): """ Attached to frozen classes as __delattr__. """ raise FrozenInstanceError()
(self, name)
5,041
jsii._runtime
__eq__
Method generated by attrs for class JSIIAssembly.
import abc import os import sys import subprocess import attr from typing import ( Any, Callable, cast, List, Mapping, Optional, Sequence, Type, TypeVar, ) from . import _reference_map from ._compat import importlib_resources from ._kernel import Kernel from .python import _ClassPropertyMeta # Yea, a global here is kind of gross, however, there's not really a better way of # handling this. Fundamentally this is a global value, since we can only reasonably # have a single kernel active at any one time in a real program. kernel = Kernel() @attr.s(auto_attribs=True, frozen=True, slots=True) class JSIIAssembly: name: str version: str module: str filename: str @classmethod def load(cls, *args, _kernel=kernel, **kwargs) -> "JSIIAssembly": # Our object here really just acts as a record for our JSIIAssembly, it doesn't # offer any functionality itself, besides this class method that will trigger # the loading of the given assembly in the JSII Kernel. assembly = cls(*args, **kwargs) # Actually load the assembly into the kernel, we're using the # importlib.resources API here instead of manually constructing the path, in # the hopes that this will make JSII modules able to be used with zipimport # instead of only on the FS. with importlib_resources.as_file( importlib_resources.files(f"{assembly.module}._jsii").joinpath( assembly.filename ) ) as assembly_path: _kernel.load(assembly.name, assembly.version, os.fspath(assembly_path)) # Give our record of the assembly back to the caller. return assembly @classmethod def invokeBinScript( cls, pkgname: str, script: str, args: Optional[Sequence[str]] = None, _kernel=kernel, ) -> int: if args is None: args = [] response = _kernel.getBinScriptCommand(pkgname, script, args) result = subprocess.run( " ".join([response.command, *response.args]), encoding="utf-8", shell=True, env=response.env, ) return result.returncode
(self, other)
5,042
jsii._runtime
__ge__
Method generated by attrs for class JSIIAssembly.
null
(self, other)
5,043
attr._make
slots_getstate
Automatically created by attrs.
def _make_getstate_setstate(self): """ Create custom __setstate__ and __getstate__ methods. """ # __weakref__ is not writable. state_attr_names = tuple( an for an in self._attr_names if an != "__weakref__" ) def slots_getstate(self): """ Automatically created by attrs. """ return {name: getattr(self, name) for name in state_attr_names} hash_caching_enabled = self._cache_hash def slots_setstate(self, state): """ Automatically created by attrs. """ __bound_setattr = _obj_setattr.__get__(self) if isinstance(state, tuple): # Backward compatibility with attrs instances pickled with # attrs versions before v22.2.0 which stored tuples. for name, value in zip(state_attr_names, state): __bound_setattr(name, value) else: for name in state_attr_names: if name in state: __bound_setattr(name, state[name]) # The hash code cache is not included when the object is # serialized, but it still needs to be initialized to None to # indicate that the first call to __hash__ should be a cache # miss. if hash_caching_enabled: __bound_setattr(_hash_cache_field, None) return slots_getstate, slots_setstate
(self)
5,051
attr._make
_frozen_setattrs
Attached to frozen classes as __setattr__.
def _frozen_setattrs(self, name, value): """ Attached to frozen classes as __setattr__. """ if isinstance(self, BaseException) and name in ( "__cause__", "__context__", "__traceback__", ): BaseException.__setattr__(self, name, value) return raise FrozenInstanceError()
(self, name, value)
5,053
jsii._runtime
JSIIMeta
null
class JSIIMeta(_ClassPropertyMeta, type): def __new__( cls: Type["JSIIMeta"], name: str, bases: tuple, attrs: dict, *, jsii_type: Optional[str] = None, ) -> "JSIIMeta": # We want to ensure that subclasses of a JSII class do not require setting the # jsii_type keyword argument. They should be able to subclass it as normal. # Since their parent class will have the __jsii_type__ variable defined, they # will as well anyways. if jsii_type is not None: attrs["__jsii_type__"] = jsii_type # The declared type should NOT be inherited by subclasses. This way we can identify whether # an MRO entry corresponds to a possible overrides contributor or not. attrs["__jsii_declared_type__"] = jsii_type obj = super().__new__(cls, name, bases, attrs) # Now that we've created the class, we'll need to register it with our reference # mapper. We only do this for types that are actually jsii types, and not any # subclasses of them. if jsii_type is not None: _reference_map.register_type(obj) return cast("JSIIMeta", obj) def __call__(cls: Type[M], *args: Any, **kwargs) -> M: # There is no way to constrain the metaclass of a `Type[M]` hint today, so we have to # perform a `cast` trick here in order for MyPy to accept this code as valid... The implicit # arguments to `super()` otherwise are `super(__class__, cls)`, which results in an error. inst = super(JSIIMeta, cast(JSIIMeta, cls)).__call__(*args, **kwargs) # Register this instance with our reference map. _reference_map.register_reference(inst) return inst
(name: str, bases: tuple, attrs: dict, *, jsii_type: Optional[str] = None) -> 'JSIIMeta'
5,055
jsii._runtime
__new__
null
def __new__( cls: Type["JSIIMeta"], name: str, bases: tuple, attrs: dict, *, jsii_type: Optional[str] = None, ) -> "JSIIMeta": # We want to ensure that subclasses of a JSII class do not require setting the # jsii_type keyword argument. They should be able to subclass it as normal. # Since their parent class will have the __jsii_type__ variable defined, they # will as well anyways. if jsii_type is not None: attrs["__jsii_type__"] = jsii_type # The declared type should NOT be inherited by subclasses. This way we can identify whether # an MRO entry corresponds to a possible overrides contributor or not. attrs["__jsii_declared_type__"] = jsii_type obj = super().__new__(cls, name, bases, attrs) # Now that we've created the class, we'll need to register it with our reference # mapper. We only do this for types that are actually jsii types, and not any # subclasses of them. if jsii_type is not None: _reference_map.register_type(obj) return cast("JSIIMeta", obj)
(cls: Type[jsii._runtime.JSIIMeta], name: str, bases: tuple, attrs: dict, *, jsii_type: Optional[str] = None) -> jsii._runtime.JSIIMeta
5,064
jsii._runtime
data_type
null
def data_type( *, jsii_type: str, jsii_struct_bases: List[Type[Any]], name_mapping: Mapping[str, str], ) -> Callable[[T], T]: def deco(cls): cls.__jsii_type__ = jsii_type cls.__jsii_struct_bases__ = jsii_struct_bases cls.__jsii_name_mapping__ = name_mapping _reference_map.register_data_type(cls) return cls return deco
(*, jsii_type: str, jsii_struct_bases: List[Type[Any]], name_mapping: Mapping[str, str]) -> Callable[[~T], ~T]
5,065
jsii._runtime
enum
null
def enum(*, jsii_type: str) -> Callable[[T], T]: def deco(cls): cls.__jsii_type__ = jsii_type _reference_map.register_enum(cls) return cls return deco
(*, jsii_type: str) -> Callable[[~T], ~T]
5,067
jsii._runtime
implements
null
def implements(*interfaces: Type[Any]) -> Callable[[T], T]: def deco(cls): cls.__jsii_type__ = getattr(cls, "__jsii_type__", None) cls.__jsii_ifaces__ = getattr(cls, "__jsii_ifaces__", []) + list(interfaces) return cls return deco
(*interfaces: Type[Any]) -> Callable[[~T], ~T]
5,068
jsii._runtime
interface
null
def interface(*, jsii_type: str) -> Callable[[T], T]: def deco(iface): iface.__jsii_type__ = jsii_type _reference_map.register_interface(iface) return iface return deco
(*, jsii_type: str) -> Callable[[~T], ~T]
5,069
jsii._runtime
member
null
def member(*, jsii_name: str) -> Callable[[F], F]: def deco(fn): fn.__jsii_name__ = jsii_name return fn return deco
(*, jsii_name: str) -> Callable[[~F], ~F]
5,070
jsii._runtime
proxy_for
null
def proxy_for(abstract_class: Type[Any]) -> Type[Any]: if not hasattr(abstract_class, "__jsii_proxy_class__"): raise TypeError(f"{abstract_class} is not a JSII Abstract class.") return cast(Any, abstract_class).__jsii_proxy_class__()
(abstract_class: Type[Any]) -> Type[Any]
5,073
collections.abc
ItemsView
null
from collections.abc import ItemsView
(mapping)
5,092
collections.abc
_hash
Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type.
null
(self)
5,093
collections.abc
isdisjoint
Return True if two sets have a null intersection.
null
(self, other)
5,094
collections.abc
KeysView
null
from collections.abc import KeysView
(mapping)
5,115
serpent
Serializer
Serialize an object tree to a byte stream. It is not thread-safe: make sure you're not making changes to the object tree that is being serialized, and don't use the same serializer across different threads.
class Serializer(object): """ Serialize an object tree to a byte stream. It is not thread-safe: make sure you're not making changes to the object tree that is being serialized, and don't use the same serializer across different threads. """ dispatch = {} def __init__(self, indent=False, module_in_classname=False, bytes_repr=False): """ Initialize the serializer. indent=indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types? """ self.indent = indent self.module_in_classname = module_in_classname self.serialized_obj_ids = set() self.special_classes_registry_copy = None self.maximum_level = min(sys.getrecursionlimit() // 5, 1000) self.bytes_repr = bytes_repr def serialize(self, obj): """Serialize the object tree to bytes.""" self.special_classes_registry_copy = _special_classes_registry.copy() # make it thread safe header = "# serpent utf-8 python3.2\n" out = [header] try: gc.disable() self.serialized_obj_ids = set() self._serialize(obj, out, 0) finally: gc.enable() self.special_classes_registry_copy = None del self.serialized_obj_ids return "".join(out).encode("utf-8") _shortcut_dispatch_types = {float, complex, tuple, list, dict, set, frozenset} def _serialize(self, obj, out, level): if level > self.maximum_level: raise ValueError( "Object graph nesting too deep. Increase serializer.maximum_level if you think you need more, " " but this may cause a RecursionError instead if Python's recursion limit doesn't allow it.") t = type(obj) if t in _bytes_types: out.append(_translate_byte_type(t, obj, self.bytes_repr)) return if t in _translate_types: obj = _translate_types[t](obj) t = type(obj) if t in _repr_types: out.append(repr(obj)) # just a simple repr() is enough for these objects return if t in self._shortcut_dispatch_types: # we shortcut these builtins directly to the dispatch function to avoid type lookup overhead below return self.dispatch[t](self, obj, out, level) # check special registered types: special_classes = self.special_classes_registry_copy for clazz in special_classes: if isinstance(obj, clazz): special_classes[clazz](obj, self, out, level) return # serialize dispatch try: func = self.dispatch[t] except KeyError: # walk the MRO until we find a base class we recognise for type_ in t.__mro__: if type_ in self.dispatch: func = self.dispatch[type_] break else: # fall back to the default class serializer func = Serializer.ser_default_class func(self, obj, out, level) def ser_builtins_float(self, float_obj, out, level): if math.isnan(float_obj): # there's no literal expression for a float NaN... out.append("{'__class__':'float','value':'nan'}") elif math.isinf(float_obj): # output a literal expression that overflows the float and results in +/-INF if float_obj > 0: out.append("1e30000") else: out.append("-1e30000") else: out.append(repr(float_obj)) dispatch[float] = ser_builtins_float def ser_builtins_complex(self, complex_obj, out, level): out.append("(") self.ser_builtins_float(complex_obj.real, out, level) if complex_obj.imag >= 0: out.append("+") self.ser_builtins_float(complex_obj.imag, out, level) out.append("j)") dispatch[complex] = ser_builtins_complex def ser_builtins_tuple(self, tuple_obj, out, level): append = out.append serialize = self._serialize if self.indent and tuple_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("(\n") for elt in tuple_obj: append(indent_chars_inside) serialize(elt, out, level + 1) append(",\n") out[-1] = out[-1].rstrip() # remove the last \n if len(tuple_obj) > 1: del out[-1] # undo the last , append("\n" + indent_chars + ")") else: append("(") for elt in tuple_obj: serialize(elt, out, level + 1) append(",") if len(tuple_obj) > 1: del out[-1] # undo the last , append(")") dispatch[tuple] = ser_builtins_tuple def ser_builtins_list(self, list_obj, out, level): if id(list_obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (list)") self.serialized_obj_ids.add(id(list_obj)) append = out.append serialize = self._serialize if self.indent and list_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("[\n") for elt in list_obj: append(indent_chars_inside) serialize(elt, out, level + 1) append(",\n") del out[-1] # remove the last ,\n append("\n" + indent_chars + "]") else: append("[") for elt in list_obj: serialize(elt, out, level + 1) append(",") if list_obj: del out[-1] # remove the last , append("]") self.serialized_obj_ids.discard(id(list_obj)) dispatch[list] = ser_builtins_list def _check_hashable_type(self, t): if t not in (bool, bytes, str, tuple) and not issubclass(t, numbers.Number): if issubclass(t, enum.Enum): return raise TypeError("one of the keys in a dict or set is not of a primitive hashable type: " + str(t) + ". Use simple types as keys or use a list or tuple as container.") def ser_builtins_dict(self, dict_obj, out, level): if id(dict_obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (dict)") self.serialized_obj_ids.add(id(dict_obj)) append = out.append serialize = self._serialize if self.indent and dict_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("{\n") dict_items = dict_obj.items() try: sorted_items = sorted(dict_items) except TypeError: # can occur when elements can't be ordered (Python 3.x) sorted_items = dict_items for key, value in sorted_items: append(indent_chars_inside) self._check_hashable_type(type(key)) serialize(key, out, level + 1) append(": ") serialize(value, out, level + 1) append(",\n") del out[-1] # remove last ,\n append("\n" + indent_chars + "}") else: append("{") for key, value in dict_obj.items(): self._check_hashable_type(type(key)) serialize(key, out, level + 1) append(":") serialize(value, out, level + 1) append(",") if dict_obj: del out[-1] # remove the last , append("}") self.serialized_obj_ids.discard(id(dict_obj)) dispatch[dict] = ser_builtins_dict def ser_builtins_set(self, set_obj, out, level): append = out.append serialize = self._serialize if self.indent and set_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("{\n") try: sorted_elts = sorted(set_obj) except TypeError: # can occur when elements can't be ordered (Python 3.x) sorted_elts = set_obj for elt in sorted_elts: append(indent_chars_inside) self._check_hashable_type(type(elt)) serialize(elt, out, level + 1) append(",\n") del out[-1] # remove the last ,\n append("\n" + indent_chars + "}") elif set_obj: append("{") for elt in set_obj: self._check_hashable_type(type(elt)) serialize(elt, out, level + 1) append(",") del out[-1] # remove the last , append("}") else: # empty set literal doesn't exist unfortunately, replace with empty tuple self.ser_builtins_tuple((), out, level) dispatch[set] = ser_builtins_set def ser_builtins_frozenset(self, set_obj, out, level): self.ser_builtins_set(set_obj, out, level) dispatch[frozenset] = ser_builtins_set def ser_decimal_Decimal(self, decimal_obj, out, level): # decimal is serialized as a string to avoid losing precision out.append(repr(str(decimal_obj))) dispatch[decimal.Decimal] = ser_decimal_Decimal def ser_datetime_datetime(self, datetime_obj, out, level): out.append(repr(datetime_obj.isoformat())) dispatch[datetime.datetime] = ser_datetime_datetime def ser_datetime_date(self, date_obj, out, level): out.append(repr(date_obj.isoformat())) dispatch[datetime.date] = ser_datetime_date def ser_datetime_timedelta(self, timedelta_obj, out, level): secs = timedelta_obj.total_seconds() out.append(repr(secs)) dispatch[datetime.timedelta] = ser_datetime_timedelta def ser_datetime_time(self, time_obj, out, level): out.append(repr(str(time_obj))) dispatch[datetime.time] = ser_datetime_time def ser_uuid_UUID(self, uuid_obj, out, level): out.append(repr(str(uuid_obj))) dispatch[uuid.UUID] = ser_uuid_UUID def ser_exception_class(self, exc_obj, out, level): value = { "__class__": self.get_class_name(exc_obj), "__exception__": True, "args": exc_obj.args, "attributes": vars(exc_obj) # add any custom attributes } self._serialize(value, out, level) dispatch[BaseException] = ser_exception_class def ser_array_array(self, array_obj, out, level): if array_obj.typecode == 'u': self._serialize(array_obj.tounicode(), out, level) else: self._serialize(array_obj.tolist(), out, level) dispatch[array.array] = ser_array_array def ser_default_class(self, obj, out, level): if id(obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (class)") self.serialized_obj_ids.add(id(obj)) try: # note: python 3.11+ object itself now has __getstate__ has_own_getstate = ( hasattr(type(obj), '__getstate__') and type(obj).__getstate__ is not getattr(object, '__getstate__', None) ) if has_own_getstate: value = obj.__getstate__() if isinstance(value, dict): self.ser_builtins_dict(value, out, level) return else: try: value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict value["__class__"] = self.get_class_name(obj) except TypeError: if hasattr(obj, "__slots__"): # use the __slots__ instead of the vars dict value = {} for slot in obj.__slots__: value[slot] = getattr(obj, slot) value["__class__"] = self.get_class_name(obj) else: raise TypeError("don't know how to serialize class " + str(obj.__class__) + ". Give it vars() or an appropriate __getstate__") self._serialize(value, out, level) finally: self.serialized_obj_ids.discard(id(obj)) def get_class_name(self, obj): if self.module_in_classname: return "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__) else: return obj.__class__.__name__
(indent=False, module_in_classname=False, bytes_repr=False)
5,116
serpent
__init__
Initialize the serializer. indent=indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types?
def __init__(self, indent=False, module_in_classname=False, bytes_repr=False): """ Initialize the serializer. indent=indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types? """ self.indent = indent self.module_in_classname = module_in_classname self.serialized_obj_ids = set() self.special_classes_registry_copy = None self.maximum_level = min(sys.getrecursionlimit() // 5, 1000) self.bytes_repr = bytes_repr
(self, indent=False, module_in_classname=False, bytes_repr=False)
5,117
serpent
_check_hashable_type
null
def _check_hashable_type(self, t): if t not in (bool, bytes, str, tuple) and not issubclass(t, numbers.Number): if issubclass(t, enum.Enum): return raise TypeError("one of the keys in a dict or set is not of a primitive hashable type: " + str(t) + ". Use simple types as keys or use a list or tuple as container.")
(self, t)
5,118
serpent
_serialize
null
def _serialize(self, obj, out, level): if level > self.maximum_level: raise ValueError( "Object graph nesting too deep. Increase serializer.maximum_level if you think you need more, " " but this may cause a RecursionError instead if Python's recursion limit doesn't allow it.") t = type(obj) if t in _bytes_types: out.append(_translate_byte_type(t, obj, self.bytes_repr)) return if t in _translate_types: obj = _translate_types[t](obj) t = type(obj) if t in _repr_types: out.append(repr(obj)) # just a simple repr() is enough for these objects return if t in self._shortcut_dispatch_types: # we shortcut these builtins directly to the dispatch function to avoid type lookup overhead below return self.dispatch[t](self, obj, out, level) # check special registered types: special_classes = self.special_classes_registry_copy for clazz in special_classes: if isinstance(obj, clazz): special_classes[clazz](obj, self, out, level) return # serialize dispatch try: func = self.dispatch[t] except KeyError: # walk the MRO until we find a base class we recognise for type_ in t.__mro__: if type_ in self.dispatch: func = self.dispatch[type_] break else: # fall back to the default class serializer func = Serializer.ser_default_class func(self, obj, out, level)
(self, obj, out, level)
5,119
serpent
get_class_name
null
def get_class_name(self, obj): if self.module_in_classname: return "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__) else: return obj.__class__.__name__
(self, obj)
5,120
serpent
ser_array_array
null
def ser_array_array(self, array_obj, out, level): if array_obj.typecode == 'u': self._serialize(array_obj.tounicode(), out, level) else: self._serialize(array_obj.tolist(), out, level)
(self, array_obj, out, level)
5,121
serpent
ser_builtins_complex
null
def ser_builtins_complex(self, complex_obj, out, level): out.append("(") self.ser_builtins_float(complex_obj.real, out, level) if complex_obj.imag >= 0: out.append("+") self.ser_builtins_float(complex_obj.imag, out, level) out.append("j)")
(self, complex_obj, out, level)
5,122
serpent
ser_builtins_dict
null
def ser_builtins_dict(self, dict_obj, out, level): if id(dict_obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (dict)") self.serialized_obj_ids.add(id(dict_obj)) append = out.append serialize = self._serialize if self.indent and dict_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("{\n") dict_items = dict_obj.items() try: sorted_items = sorted(dict_items) except TypeError: # can occur when elements can't be ordered (Python 3.x) sorted_items = dict_items for key, value in sorted_items: append(indent_chars_inside) self._check_hashable_type(type(key)) serialize(key, out, level + 1) append(": ") serialize(value, out, level + 1) append(",\n") del out[-1] # remove last ,\n append("\n" + indent_chars + "}") else: append("{") for key, value in dict_obj.items(): self._check_hashable_type(type(key)) serialize(key, out, level + 1) append(":") serialize(value, out, level + 1) append(",") if dict_obj: del out[-1] # remove the last , append("}") self.serialized_obj_ids.discard(id(dict_obj))
(self, dict_obj, out, level)
5,123
serpent
ser_builtins_float
null
def ser_builtins_float(self, float_obj, out, level): if math.isnan(float_obj): # there's no literal expression for a float NaN... out.append("{'__class__':'float','value':'nan'}") elif math.isinf(float_obj): # output a literal expression that overflows the float and results in +/-INF if float_obj > 0: out.append("1e30000") else: out.append("-1e30000") else: out.append(repr(float_obj))
(self, float_obj, out, level)
5,124
serpent
ser_builtins_frozenset
null
def ser_builtins_frozenset(self, set_obj, out, level): self.ser_builtins_set(set_obj, out, level)
(self, set_obj, out, level)
5,125
serpent
ser_builtins_list
null
def ser_builtins_list(self, list_obj, out, level): if id(list_obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (list)") self.serialized_obj_ids.add(id(list_obj)) append = out.append serialize = self._serialize if self.indent and list_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("[\n") for elt in list_obj: append(indent_chars_inside) serialize(elt, out, level + 1) append(",\n") del out[-1] # remove the last ,\n append("\n" + indent_chars + "]") else: append("[") for elt in list_obj: serialize(elt, out, level + 1) append(",") if list_obj: del out[-1] # remove the last , append("]") self.serialized_obj_ids.discard(id(list_obj))
(self, list_obj, out, level)
5,126
serpent
ser_builtins_set
null
def ser_builtins_set(self, set_obj, out, level): append = out.append serialize = self._serialize if self.indent and set_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("{\n") try: sorted_elts = sorted(set_obj) except TypeError: # can occur when elements can't be ordered (Python 3.x) sorted_elts = set_obj for elt in sorted_elts: append(indent_chars_inside) self._check_hashable_type(type(elt)) serialize(elt, out, level + 1) append(",\n") del out[-1] # remove the last ,\n append("\n" + indent_chars + "}") elif set_obj: append("{") for elt in set_obj: self._check_hashable_type(type(elt)) serialize(elt, out, level + 1) append(",") del out[-1] # remove the last , append("}") else: # empty set literal doesn't exist unfortunately, replace with empty tuple self.ser_builtins_tuple((), out, level)
(self, set_obj, out, level)
5,127
serpent
ser_builtins_tuple
null
def ser_builtins_tuple(self, tuple_obj, out, level): append = out.append serialize = self._serialize if self.indent and tuple_obj: indent_chars = " " * level indent_chars_inside = indent_chars + " " append("(\n") for elt in tuple_obj: append(indent_chars_inside) serialize(elt, out, level + 1) append(",\n") out[-1] = out[-1].rstrip() # remove the last \n if len(tuple_obj) > 1: del out[-1] # undo the last , append("\n" + indent_chars + ")") else: append("(") for elt in tuple_obj: serialize(elt, out, level + 1) append(",") if len(tuple_obj) > 1: del out[-1] # undo the last , append(")")
(self, tuple_obj, out, level)
5,128
serpent
ser_datetime_date
null
def ser_datetime_date(self, date_obj, out, level): out.append(repr(date_obj.isoformat()))
(self, date_obj, out, level)
5,129
serpent
ser_datetime_datetime
null
def ser_datetime_datetime(self, datetime_obj, out, level): out.append(repr(datetime_obj.isoformat()))
(self, datetime_obj, out, level)
5,130
serpent
ser_datetime_time
null
def ser_datetime_time(self, time_obj, out, level): out.append(repr(str(time_obj)))
(self, time_obj, out, level)
5,131
serpent
ser_datetime_timedelta
null
def ser_datetime_timedelta(self, timedelta_obj, out, level): secs = timedelta_obj.total_seconds() out.append(repr(secs))
(self, timedelta_obj, out, level)
5,132
serpent
ser_decimal_Decimal
null
def ser_decimal_Decimal(self, decimal_obj, out, level): # decimal is serialized as a string to avoid losing precision out.append(repr(str(decimal_obj)))
(self, decimal_obj, out, level)
5,133
serpent
ser_default_class
null
def ser_default_class(self, obj, out, level): if id(obj) in self.serialized_obj_ids: raise ValueError("Circular reference detected (class)") self.serialized_obj_ids.add(id(obj)) try: # note: python 3.11+ object itself now has __getstate__ has_own_getstate = ( hasattr(type(obj), '__getstate__') and type(obj).__getstate__ is not getattr(object, '__getstate__', None) ) if has_own_getstate: value = obj.__getstate__() if isinstance(value, dict): self.ser_builtins_dict(value, out, level) return else: try: value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict value["__class__"] = self.get_class_name(obj) except TypeError: if hasattr(obj, "__slots__"): # use the __slots__ instead of the vars dict value = {} for slot in obj.__slots__: value[slot] = getattr(obj, slot) value["__class__"] = self.get_class_name(obj) else: raise TypeError("don't know how to serialize class " + str(obj.__class__) + ". Give it vars() or an appropriate __getstate__") self._serialize(value, out, level) finally: self.serialized_obj_ids.discard(id(obj))
(self, obj, out, level)
5,134
serpent
ser_exception_class
null
def ser_exception_class(self, exc_obj, out, level): value = { "__class__": self.get_class_name(exc_obj), "__exception__": True, "args": exc_obj.args, "attributes": vars(exc_obj) # add any custom attributes } self._serialize(value, out, level)
(self, exc_obj, out, level)
5,135
serpent
ser_uuid_UUID
null
def ser_uuid_UUID(self, uuid_obj, out, level): out.append(repr(str(uuid_obj)))
(self, uuid_obj, out, level)
5,136
serpent
serialize
Serialize the object tree to bytes.
def serialize(self, obj): """Serialize the object tree to bytes.""" self.special_classes_registry_copy = _special_classes_registry.copy() # make it thread safe header = "# serpent utf-8 python3.2\n" out = [header] try: gc.disable() self.serialized_obj_ids = set() self._serialize(obj, out, 0) finally: gc.enable() self.special_classes_registry_copy = None del self.serialized_obj_ids return "".join(out).encode("utf-8")
(self, obj)
5,137
collections.abc
ValuesView
null
from collections.abc import ValuesView
(mapping)
5,143
serpent
_reset_special_classes_registry
null
def _reset_special_classes_registry(): _special_classes_registry.clear() _special_classes_registry[KeysView] = _ser_DictView _special_classes_registry[ValuesView] = _ser_DictView _special_classes_registry[ItemsView] = _ser_DictView _special_classes_registry[collections.OrderedDict] = _ser_OrderedDict def _ser_Enum(obj, serializer, outputstream, indentlevel): serializer._serialize(obj.value, outputstream, indentlevel) _special_classes_registry[enum.Enum] = _ser_Enum
()
5,144
serpent
_ser_DictView
null
def _ser_DictView(obj, serializer, outputstream, indentlevel): serializer.ser_builtins_list(obj, outputstream, indentlevel)
(obj, serializer, outputstream, indentlevel)
5,145
serpent
_ser_OrderedDict
null
def _ser_OrderedDict(obj, serializer, outputstream, indentlevel): obj = { "__class__": "collections.OrderedDict" if serializer.module_in_classname else "OrderedDict", "items": list(obj.items()) } serializer._serialize(obj, outputstream, indentlevel)
(obj, serializer, outputstream, indentlevel)
5,146
serpent
_translate_byte_type
null
def _translate_byte_type(t, data, bytes_repr): if bytes_repr: if t == bytes: return repr(data) elif t == bytearray: return repr(bytes(data)) elif t == memoryview: return repr(bytes(data)) else: raise TypeError("invalid bytes type") else: b64 = base64.b64encode(data) return repr({ "data": b64 if type(b64) is str else b64.decode("ascii"), "encoding": "base64" })
(t, data, bytes_repr)
5,154
serpent
dump
Serialize object tree to a file. indent = indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types?
def dump(obj, file, indent=False, module_in_classname=False, bytes_repr=False): """ Serialize object tree to a file. indent = indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types? """ file.write(dumps(obj, indent=indent, module_in_classname=module_in_classname, bytes_repr=bytes_repr))
(obj, file, indent=False, module_in_classname=False, bytes_repr=False)
5,155
serpent
dumps
Serialize object tree to bytes. indent = indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types?
def dumps(obj, indent=False, module_in_classname=False, bytes_repr=False): """ Serialize object tree to bytes. indent = indent the output over multiple lines (default=false) module_in_classname = include module prefix for class names or only use the class name itself bytes_repr = should the bytes literal value representation be used instead of base-64 encoding for bytes types? """ return Serializer(indent, module_in_classname, bytes_repr).serialize(obj)
(obj, indent=False, module_in_classname=False, bytes_repr=False)
5,158
serpent
load
Deserialize bytes from a file back to object tree. Uses ast.literal_eval (safe).
def load(file): """Deserialize bytes from a file back to object tree. Uses ast.literal_eval (safe).""" data = file.read() return loads(data)
(file)
5,159
serpent
loads
Deserialize bytes back to object tree. Uses ast.literal_eval (safe).
def loads(serialized_bytes): """Deserialize bytes back to object tree. Uses ast.literal_eval (safe).""" serialized = codecs.decode(serialized_bytes, "utf-8") if '\x00' in serialized: raise ValueError( "The serpent data contains 0-bytes so it cannot be parsed by ast.literal_eval. Has it been corrupted?") try: gc.disable() return ast.literal_eval(serialized) finally: gc.enable()
(serialized_bytes)
5,162
serpent
register_class
Register a special serializer function for objects of the given class. The function will be called with (object, serpent_serializer, outputstream, indentlevel) arguments. The function must write the serialized data to outputstream. It doesn't return a value.
def register_class(clazz, serializer): """ Register a special serializer function for objects of the given class. The function will be called with (object, serpent_serializer, outputstream, indentlevel) arguments. The function must write the serialized data to outputstream. It doesn't return a value. """ _special_classes_registry[clazz] = serializer
(clazz, serializer)
5,164
serpent
tobytes
Utility function to convert obj back to actual bytes if it is a serpent-encoded bytes dictionary (a dict with base-64 encoded 'data' in it and 'encoding'='base64'). If obj is already bytes or a byte-like type, return obj unmodified. Will raise TypeError if obj is none of the above. All this is not required if you called serpent with 'bytes_repr' set to True, since Serpent 1.40 that can be used to directly encode bytes into the bytes literal value representation. That will be less efficient than the default base-64 encoding though, but it's a bit more convenient.
def tobytes(obj): """ Utility function to convert obj back to actual bytes if it is a serpent-encoded bytes dictionary (a dict with base-64 encoded 'data' in it and 'encoding'='base64'). If obj is already bytes or a byte-like type, return obj unmodified. Will raise TypeError if obj is none of the above. All this is not required if you called serpent with 'bytes_repr' set to True, since Serpent 1.40 that can be used to directly encode bytes into the bytes literal value representation. That will be less efficient than the default base-64 encoding though, but it's a bit more convenient. """ if isinstance(obj, _bytes_types): return obj if isinstance(obj, dict) and "data" in obj and obj.get("encoding") == "base64": try: return base64.b64decode(obj["data"]) except TypeError: return base64.b64decode(obj["data"].encode("ascii")) # needed for certain older versions of pypy raise TypeError("argument is neither bytes nor serpent base64 encoded bytes dict")
(obj)
5,165
serpent
unregister_class
Unregister the specialcase serializer for the given class.
def unregister_class(clazz): """Unregister the specialcase serializer for the given class.""" if clazz in _special_classes_registry: del _special_classes_registry[clazz]
(clazz)
5,167
releases
BulletListVisitor
null
class BulletListVisitor(nodes.NodeVisitor): def __init__(self, document, app, docnames, is_singlepage): nodes.NodeVisitor.__init__(self, document) self.found_changelog = False self.app = app # document names to seek out (eg "changelog") self.docnames = docnames self.is_singlepage = is_singlepage def visit_bullet_list(self, node): # Short circuit if already mutated a changelog bullet list or if the # one being visited doesn't appear to apply. if self.found_changelog: return # Also short circuit if we're in singlepage mode and the node's parent # doesn't seem to be named after an expected changelog docname. In this # mode, this is the earliest we can actually tell whether a given # bullet list is or is not "the changelog". if ( self.is_singlepage and node.parent.attributes.get("docname", None) not in self.docnames ): return # At this point, we can safely assume the node we're visiting is the # right one to mutate. self.found_changelog = True # Walk + parse into release mapping releases, _ = construct_releases(node.children, self.app) # Construct new set of nodes to replace the old, and we're done node.replace_self(construct_nodes(releases)) def unknown_visit(self, node): pass
(document, app, docnames, is_singlepage)
5,168
releases
__init__
null
def __init__(self, document, app, docnames, is_singlepage): nodes.NodeVisitor.__init__(self, document) self.found_changelog = False self.app = app # document names to seek out (eg "changelog") self.docnames = docnames self.is_singlepage = is_singlepage
(self, document, app, docnames, is_singlepage)
5,169
docutils.nodes
dispatch_departure
Call self."``depart_`` + node class name" with `node` as parameter. If the ``depart_...`` method does not exist, call self.unknown_departure.
def dispatch_departure(self, node): """ Call self."``depart_`` + node class name" with `node` as parameter. If the ``depart_...`` method does not exist, call self.unknown_departure. """ node_name = node.__class__.__name__ method = getattr(self, 'depart_' + node_name, self.unknown_departure) self.document.reporter.debug( 'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s' % (method.__name__, node_name)) return method(node)
(self, node)
5,170
docutils.nodes
dispatch_visit
Call self."``visit_`` + node class name" with `node` as parameter. If the ``visit_...`` method does not exist, call self.unknown_visit.
def dispatch_visit(self, node): """ Call self."``visit_`` + node class name" with `node` as parameter. If the ``visit_...`` method does not exist, call self.unknown_visit. """ node_name = node.__class__.__name__ method = getattr(self, 'visit_' + node_name, self.unknown_visit) self.document.reporter.debug( 'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s' % (method.__name__, node_name)) return method(node)
(self, node)
5,171
docutils.nodes
unknown_departure
Called before exiting unknown `Node` types. Raise exception unless overridden.
def unknown_departure(self, node): """ Called before exiting unknown `Node` types. Raise exception unless overridden. """ if (self.document.settings.strict_visitor or node.__class__.__name__ not in self.optional): raise NotImplementedError( '%s departing unknown node type: %s' % (self.__class__, node.__class__.__name__))
(self, node)
5,172
releases
unknown_visit
null
def unknown_visit(self, node): pass
(self, node)
5,173
releases
visit_bullet_list
null
def visit_bullet_list(self, node): # Short circuit if already mutated a changelog bullet list or if the # one being visited doesn't appear to apply. if self.found_changelog: return # Also short circuit if we're in singlepage mode and the node's parent # doesn't seem to be named after an expected changelog docname. In this # mode, this is the earliest we can actually tell whether a given # bullet list is or is not "the changelog". if ( self.is_singlepage and node.parent.attributes.get("docname", None) not in self.docnames ): return # At this point, we can safely assume the node we're visiting is the # right one to mutate. self.found_changelog = True # Walk + parse into release mapping releases, _ = construct_releases(node.children, self.app) # Construct new set of nodes to replace the old, and we're done node.replace_self(construct_nodes(releases))
(self, node)
5,174
releases.models
Issue
null
class Issue(nodes.Element): # Technically, we just need number, but heck, you never know... _cmp_keys = ("type", "number", "backported", "major") @property def type(self): return self["type_"] @property def is_featurelike(self): if self.type == "bug": return self.major else: return not self.backported @property def is_buglike(self): return not self.is_featurelike @property def backported(self): return self.get("backported", False) @property def major(self): return self.get("major", False) @property def number(self): return self.get("number", None) @property def spec(self): return self.get("spec", None) def __eq__(self, other): for attr in self._cmp_keys: if getattr(self, attr, None) != getattr(other, attr, None): return False return True def __hash__(self): return reduce(xor, [hash(getattr(self, x)) for x in self._cmp_keys]) def minor_releases(self, manager): """ Return all minor release line labels found in ``manager``. """ # TODO: yea deffo need a real object for 'manager', heh. E.g. we do a # very similar test for "do you have any actual releases yet?" # elsewhere. (This may be fodder for changing how we roll up # pre-major-release features though...?) return [ key for key, value in manager.items() if any(x for x in value if not x.startswith("unreleased")) ] def default_spec(self, manager): """ Given the current release-lines structure, return a default Spec. Specifics: * For feature-like issues, only the highest major release is used, so given a ``manager`` with top level keys of ``[1, 2]``, this would return ``Spec(">=2")``. * When ``releases_always_forwardport_features`` is ``True``, that behavior is nullified, and this function always returns the empty ``Spec`` (which matches any and all versions/lines). * For bugfix-like issues, we only consider major release families which have actual releases already. * Thus the core difference here is that features are 'consumed' by upcoming major releases, and bugfixes are not. * When the ``unstable_prehistory`` setting is ``True``, the default spec starts at the oldest non-zero release line. (Otherwise, issues posted after prehistory ends would try being added to the 0.x part of the tree, which makes no sense in unstable-prehistory mode.) """ # TODO: I feel like this + the surrounding bits in add_to_manager() # could be consolidated & simplified... specstr = "" # Make sure truly-default spec skips 0.x if prehistory was unstable. stable_families = manager.stable_families if manager.config.releases_unstable_prehistory and stable_families: specstr = ">={}".format(min(stable_families)) if self.is_featurelike: # TODO: if app->config-><releases_always_forwardport_features or # w/e if True: specstr = ">={}".format(max(manager.keys())) else: # Can only meaningfully limit to minor release buckets if they # actually exist yet. buckets = self.minor_releases(manager) if buckets: specstr = ">={}".format(max(buckets)) return Spec(specstr) if specstr else Spec() def add_to_manager(self, manager): """ Given a 'manager' structure, add self to one or more of its 'buckets'. """ # Derive version spec allowing us to filter against major/minor buckets spec = self.spec or self.default_spec(manager) # Only look in appropriate major version/family; if self is an issue # declared as living in e.g. >=2, this means we don't even bother # looking in the 1.x family. families = [Version(str(x)) for x in manager] versions = list(spec.filter(families)) for version in versions: family = version.major # Within each family, we further limit which bugfix lines match up # to what self cares about (ignoring 'unreleased' until later) candidates = [ Version(x) for x in manager[family] if not x.startswith("unreleased") ] # Select matching release lines (& stringify) buckets = [] bugfix_buckets = [str(x) for x in spec.filter(candidates)] # Add back in unreleased_* as appropriate # TODO: probably leverage Issue subclasses for this eventually? if self.is_buglike: buckets.extend(bugfix_buckets) # Don't put into JUST unreleased_bugfix; it implies that this # major release/family hasn't actually seen any releases yet # and only exists for features to go into. if bugfix_buckets: buckets.append("unreleased_bugfix") # Obtain list of minor releases to check for "haven't had ANY # releases yet" corner case, in which case ALL issues get thrown in # unreleased_feature for the first release to consume. # NOTE: assumes first release is a minor or major one, # but...really? why would your first release be a bugfix one?? no_releases = not self.minor_releases(manager) if self.is_featurelike or self.backported or no_releases: buckets.append("unreleased_feature") # Now that we know which buckets are appropriate, add ourself to # all of them. TODO: or just...do it above...instead... for bucket in buckets: manager[family][bucket].append(self) def __repr__(self): flag = "" if self.backported: flag = "backported" elif self.major: flag = "major" elif self.spec: flag = self.spec if flag: flag = " ({})".format(flag) return "<{issue.type} #{issue.number}{flag}>".format( issue=self, flag=flag )
(rawsource='', *children, **attributes)