index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
728,411 | tables.file | _create_transaction | null | def _create_transaction(self, troot, tid):
return TransactionG(
troot, _trans_name % tid,
"Transaction number %d" % tid, new=True)
| (self, troot, tid) |
728,412 | tables.file | _create_transaction_group | null | def _create_transaction_group(self):
tgroup = TransactionGroupG(
self.root, _trans_group_name,
"Transaction information container", new=True)
# The format of the transaction container.
tgroup._v_attrs._g__setattr('FORMATVERSION', _trans_version)
return tgroup
| (self) |
728,413 | tables.file | _doundo | Undo/Redo actions up to final action in the specificed direction. | def _doundo(self, finalaction, direction):
"""Undo/Redo actions up to final action in the specificed direction."""
if direction < 0:
actionlog = \
self._actionlog[finalaction + 1:self._curaction + 1][::-1]
else:
actionlog = self._actionlog[self._curaction:finalaction]
# Uncomment this for debugging
# print("curaction, finalaction, direction", \
# self._curaction, finalaction, direction)
for i in range(len(actionlog)):
if actionlog['opcode'][i] != _op_to_code["MARK"]:
# undo/redo the action
if direction > 0:
# Uncomment this for debugging
# print("redo-->", \
# _code_to_op[actionlog['opcode'][i]],\
# actionlog['arg1'][i],\
# actionlog['arg2'][i])
undoredo.redo(self,
# _code_to_op[actionlog['opcode'][i]],
# The next is a workaround for python < 2.5
_code_to_op[int(actionlog['opcode'][i])],
actionlog['arg1'][i].decode('utf8'),
actionlog['arg2'][i].decode('utf8'))
else:
# Uncomment this for debugging
# print("undo-->", \
# _code_to_op[actionlog['opcode'][i]],\
# actionlog['arg1'][i].decode('utf8'),\
# actionlog['arg2'][i].decode('utf8'))
undoredo.undo(self,
# _code_to_op[actionlog['opcode'][i]],
# The next is a workaround for python < 2.5
_code_to_op[int(actionlog['opcode'][i])],
actionlog['arg1'][i].decode('utf8'),
actionlog['arg2'][i].decode('utf8'))
else:
if direction > 0:
self._curmark = int(actionlog['arg1'][i])
else:
self._curmark = int(actionlog['arg1'][i]) - 1
# Protection against negative marks
if self._curmark < 0:
self._curmark = 0
self._curaction += direction
| (self, finalaction, direction) |
728,414 | tables.file | _get_final_action | Get the action to go.
It does not touch the self private attributes
| def _get_final_action(self, markid):
"""Get the action to go.
It does not touch the self private attributes
"""
if markid > self._nmarks - 1:
# The required mark is beyond the end of the action log
# The final action is the last row
return self._actionlog.nrows
elif markid <= 0:
# The required mark is the first one
# return the first row
return 0
return self._seqmarkers[markid]
| (self, markid) |
728,415 | tables.file | _get_mark_id | Get an integer markid from a mark sequence number or name. | def _get_mark_id(self, mark):
"""Get an integer markid from a mark sequence number or name."""
if isinstance(mark, int):
markid = mark
elif isinstance(mark, str):
if mark not in self._markers:
lmarkers = sorted(self._markers)
raise UndoRedoError("The mark that you have specified has not "
"been found in the internal marker list: "
"%r" % lmarkers)
markid = self._markers[mark]
else:
raise TypeError("Parameter mark can only be an integer or a "
"string, and you passed a type <%s>" % type(mark))
# print("markid, self._nmarks:", markid, self._nmarks)
return markid
| (self, mark) |
728,416 | tables.file | _get_node | null | def _get_node(self, nodepath):
# The root node is always at hand.
if nodepath == '/':
return self.root
node = self._node_manager.get_node(nodepath)
assert node is not None, "unable to instantiate node ``%s``" % nodepath
return node
| (self, nodepath) |
728,417 | tables.file | _get_or_create_path | Get the given `path` or create it if `create` is true.
If `create` is true, `path` *must* be a string path and not a
node, otherwise a `TypeError`will be raised.
| def _get_or_create_path(self, path, create):
"""Get the given `path` or create it if `create` is true.
If `create` is true, `path` *must* be a string path and not a
node, otherwise a `TypeError`will be raised.
"""
if create:
return self._create_path(path)
else:
return self.get_node(path)
| (self, path, create) |
728,418 | tables.file | _iswritable | Is this file writable? | def _iswritable(self):
"""Is this file writable?"""
return self.mode in ('w', 'a', 'r+')
| (self) |
728,419 | tables.file | _log | Log an action.
The `action` must be an all-uppercase string identifying it.
Arguments must also be strings.
This method should be called once the action has been completed.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an `UndoRedoError` is raised.
| def _log(self, action, *args):
"""Log an action.
The `action` must be an all-uppercase string identifying it.
Arguments must also be strings.
This method should be called once the action has been completed.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an `UndoRedoError` is raised.
"""
assert self.is_undo_enabled()
maxundo = self.params['MAX_UNDO_PATH_LENGTH']
# Check whether we are at the end of the action log or not
if self._curaction != self._actionlog.nrows - 1:
# We are not, so delete the trailing actions
self._actionlog.remove_rows(self._curaction + 1,
self._actionlog.nrows)
# Reset the current marker group
mnode = self.get_node(_markPath % (self._curtransaction,
self._curmark))
mnode._g_reset()
# Delete the marker groups with backup objects
for mark in range(self._curmark + 1, self._nmarks):
mnode = self.get_node(_markPath % (self._curtransaction, mark))
mnode._g_remove(recursive=1)
# Update the new number of marks
self._nmarks = self._curmark + 1
self._seqmarkers = self._seqmarkers[:self._nmarks]
if action not in _op_to_code: # INTERNAL
raise UndoRedoError("Action ``%s`` not in ``_op_to_code`` "
"dictionary: %r" % (action, _op_to_code))
arg1 = ""
arg2 = ""
if len(args) <= 1:
arg1 = args[0]
elif len(args) <= 2:
arg1 = args[0]
arg2 = args[1]
else: # INTERNAL
raise UndoRedoError("Too many parameters for action log: "
"%r").with_traceback(args)
if (len(arg1) > maxundo
or len(arg2) > maxundo): # INTERNAL
raise UndoRedoError("Parameter arg1 or arg2 is too long: "
"(%r, %r)" % (arg1, arg2))
# print("Logging-->", (action, arg1, arg2))
self._actionlog.append([(_op_to_code[action],
arg1.encode('utf-8'),
arg2.encode('utf-8'))])
self._curaction += 1
| (self, action, *args) |
728,420 | tables.file | _shadow_name | Compute and return a shadow name.
Computes the current shadow name according to the current
transaction, mark and action. It returns a tuple with the
shadow parent node and the name of the shadow in it.
| def _shadow_name(self):
"""Compute and return a shadow name.
Computes the current shadow name according to the current
transaction, mark and action. It returns a tuple with the
shadow parent node and the name of the shadow in it.
"""
parent = self.get_node(
_shadow_parent % (self._curtransaction, self._curmark))
name = _shadow_name % (self._curaction,)
return (parent, name)
| (self) |
728,421 | tables.file | _update_node_locations | Update location information of nodes under `oldpath`.
This only affects *already loaded* nodes.
| def _update_node_locations(self, oldpath, newpath):
"""Update location information of nodes under `oldpath`.
This only affects *already loaded* nodes.
"""
oldprefix = oldpath + '/' # root node can not be renamed, anyway
oldprefix_len = len(oldprefix)
# Update alive and dead descendents.
for cache in [self._node_manager.cache, self._node_manager.registry]:
for nodepath in list(cache):
if nodepath.startswith(oldprefix) and nodepath != oldprefix:
nodesuffix = nodepath[oldprefix_len:]
newnodepath = join_path(newpath, nodesuffix)
newnodeppath = split_path(newnodepath)[0]
descendent_node = self._get_node(nodepath)
descendent_node._g_update_location(newnodeppath)
| (self, oldpath, newpath) |
728,422 | tables.file | close | Flush all the alive leaves in object tree and close the file. | def close(self):
"""Flush all the alive leaves in object tree and close the file."""
# If the file is already closed, return immediately
if not self.isopen:
return
# If this file has been opened more than once, decrease the
# counter and return
if self._open_count > 1:
self._open_count -= 1
return
filename = self.filename
if self._undoEnabled and self._iswritable():
# Save the current mark and current action
self._actionlog.attrs._g__setattr("CURMARK", self._curmark)
self._actionlog.attrs._g__setattr("CURACTION", self._curaction)
# Close all loaded nodes.
self.root._f_close()
self._node_manager.shutdown()
# Post-conditions
assert len(self._node_manager.cache) == 0, \
("cached nodes remain after closing: %s"
% list(self._node_manager.cache))
# No other nodes should have been revived.
assert len(self._node_manager.registry) == 0, \
("alive nodes remain after closing: %s"
% list(self._node_manager.registry))
# Close the file
self._close_file()
# After the objects are disconnected, destroy the
# object dictionary using the brute force ;-)
# This should help to the garbage collector
self.__dict__.clear()
# Set the flag to indicate that the file is closed
self.isopen = 0
# Restore the filename attribute that is used by _FileRegistry
self.filename = filename
# Delete the entry from he registry of opened files
_open_files.remove(self)
| (self) |
728,423 | tables.file | copy_children | Copy the children of a group into another group.
Parameters
----------
srcgroup : str
The group to copy from.
dstgroup : str
The destination group.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs : dict
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy_children`
for a description of those arguments.
| def copy_children(self, srcgroup, dstgroup,
overwrite=False, recursive=False,
createparents=False, **kwargs):
"""Copy the children of a group into another group.
Parameters
----------
srcgroup : str
The group to copy from.
dstgroup : str
The destination group.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs : dict
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy_children`
for a description of those arguments.
"""
srcgroup = self.get_node(srcgroup) # Does the source node exist?
self._check_group(srcgroup) # Is it a group?
srcgroup._f_copy_children(
dstgroup, overwrite, recursive, createparents, **kwargs)
| (self, srcgroup, dstgroup, overwrite=False, recursive=False, createparents=False, **kwargs) |
728,424 | tables.file | copy_file | Copy the contents of this file to dstfilename.
Parameters
----------
dstfilename : str
A path string indicating the name of the destination file. If
it already exists, the copy will fail with an IOError, unless
the overwrite argument is true.
overwrite : bool, optional
If true, the destination file will be overwritten if it already
exists. In this case, the destination file must be closed, or
errors will occur. Defaults to False.
kwargs
Additional keyword arguments discussed below.
Notes
-----
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be
sub-sampled, stats may be collected, etc. Arguments unknown to
nodes are simply ignored. Check the documentation for copying
operations of nodes to see which options they support.
In addition, it recognizes the names of parameters present in
:file:`tables/parameters.py` as additional keyword arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
Copying a file usually has the beneficial side effect of
creating a more compact and cleaner version of the original
file.
| def copy_file(self, dstfilename, overwrite=False, **kwargs):
"""Copy the contents of this file to dstfilename.
Parameters
----------
dstfilename : str
A path string indicating the name of the destination file. If
it already exists, the copy will fail with an IOError, unless
the overwrite argument is true.
overwrite : bool, optional
If true, the destination file will be overwritten if it already
exists. In this case, the destination file must be closed, or
errors will occur. Defaults to False.
kwargs
Additional keyword arguments discussed below.
Notes
-----
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be
sub-sampled, stats may be collected, etc. Arguments unknown to
nodes are simply ignored. Check the documentation for copying
operations of nodes to see which options they support.
In addition, it recognizes the names of parameters present in
:file:`tables/parameters.py` as additional keyword arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
Copying a file usually has the beneficial side effect of
creating a more compact and cleaner version of the original
file.
"""
self._check_open()
# Check that we are not treading our own shoes
if Path(self.filename).resolve() == Path(dstfilename).resolve():
raise OSError("You cannot copy a file over itself")
# Compute default arguments.
# These are *not* passed on.
filters = kwargs.pop('filters', None)
if filters is None:
# By checking the HDF5 attribute, we avoid setting filters
# in the destination file if not explicitly set in the
# source file. Just by assigning ``self.filters`` we would
# not be able to tell.
filters = getattr(self.root._v_attrs, 'FILTERS', None)
copyuserattrs = kwargs.get('copyuserattrs', True)
title = kwargs.pop('title', self.title)
if Path(dstfilename).is_file() and not overwrite:
raise OSError(
f"file ``{dstfilename}`` already exists; you may want to "
f"use the ``overwrite`` argument"
)
# Create destination file, overwriting it.
dstfileh = open_file(
dstfilename, mode="w", title=title, filters=filters, **kwargs)
try:
# Maybe copy the user attributes of the root group.
if copyuserattrs:
self.root._v_attrs._f_copy(dstfileh.root)
# Copy the rest of the hierarchy.
self.root._f_copy_children(dstfileh.root, recursive=True, **kwargs)
finally:
dstfileh.close()
| (self, dstfilename, overwrite=False, **kwargs) |
728,425 | tables.file | copy_node | Copy the node specified by where and name to newparent/newname.
Parameters
----------
where : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
newparent : str or Group
The destination group that the node will be copied
into (a path name or a Group
instance). If not specified or None, the
current parent group is chosen as the new parent.
newname : str
The name to be assigned to the new copy in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
name : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy`
for a description of those arguments.
Returns
-------
node : Node
The newly created copy of the source node (i.e. the destination
node). See :meth:`.Node._f_copy` for further details on the
semantics of copying nodes.
| def copy_node(self, where, newparent=None, newname=None, name=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy the node specified by where and name to newparent/newname.
Parameters
----------
where : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
newparent : str or Group
The destination group that the node will be copied
into (a path name or a Group
instance). If not specified or None, the
current parent group is chosen as the new parent.
newname : str
The name to be assigned to the new copy in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
name : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy`
for a description of those arguments.
Returns
-------
node : Node
The newly created copy of the source node (i.e. the destination
node). See :meth:`.Node._f_copy` for further details on the
semantics of copying nodes.
"""
obj = self.get_node(where, name=name)
if obj._v_depth == 0 and newparent and not newname:
npobj = self.get_node(newparent)
if obj._v_file is not npobj._v_file:
# Special case for copying file1:/ --> file2:/path
self.root._f_copy_children(npobj, overwrite=overwrite,
recursive=recursive, **kwargs)
return npobj
else:
raise OSError(
"You cannot copy a root group over the same file")
return obj._f_copy(newparent, newname,
overwrite, recursive, createparents, **kwargs)
| (self, where, newparent=None, newname=None, name=None, overwrite=False, recursive=False, createparents=False, **kwargs) |
728,426 | tables.file | copy_node_attrs | Copy PyTables attributes from one node to another.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
dstnode
The destination node where the attributes will be copied to. It can
be a path string or a Node instance (see :ref:`NodeClassDescr`).
| def copy_node_attrs(self, where, dstnode, name=None):
"""Copy PyTables attributes from one node to another.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
dstnode
The destination node where the attributes will be copied to. It can
be a path string or a Node instance (see :ref:`NodeClassDescr`).
"""
srcobject = self.get_node(where, name=name)
dstobject = self.get_node(dstnode)
srcobject._v_attrs._f_copy(dstobject)
| (self, where, dstnode, name=None) |
728,427 | tables.file | create_array | Create a new array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported (use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0).
.. versionchanged:: 3.0
The *Object parameter has been renamed into *obj*.*
title : str
A description for this node (it sets the TITLE HDF5 attribute on
disk).
byteorder : str
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionadded:: 3.0
shape : tuple of ints
The shape of the stored array.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Array : for more information on arrays
create_table : for more information on the rest of parameters
| def create_array(self, where, name, obj=None, title="",
byteorder=None, createparents=False,
atom=None, shape=None, track_times=True):
"""Create a new array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported (use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0).
.. versionchanged:: 3.0
The *Object parameter has been renamed into *obj*.*
title : str
A description for this node (it sets the TITLE HDF5 attribute on
disk).
byteorder : str
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionadded:: 3.0
shape : tuple of ints
The shape of the stored array.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Array : for more information on arrays
create_table : for more information on the rest of parameters
"""
if obj is None:
if atom is None or shape is None:
raise TypeError('if the obj parameter is not specified '
'(or None) then both the atom and shape '
'parametes should be provided.')
else:
# Making strides=(0,...) below is a trick to create the
# array fast and without memory consumption
dflt = np.zeros((), dtype=atom.dtype)
obj = np.ndarray(shape, dtype=atom.dtype, buffer=dflt,
strides=(0,)*len(shape))
else:
flavor = flavor_of(obj)
# use a temporary object because converting obj at this stage
# breaks some test. This is solution performs a double,
# potentially expensive, conversion of the obj parameter.
_obj = array_as_internal(obj, flavor)
if shape is not None and shape != _obj.shape:
raise TypeError('the shape parameter do not match obj.shape')
if atom is not None and atom.dtype != _obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
parentnode = self._get_or_create_path(where, createparents)
return Array(parentnode, name,
obj=obj, title=title, byteorder=byteorder,
track_times=track_times)
| (self, where, name, obj=None, title='', byteorder=None, createparents=False, atom=None, shape=None, track_times=True) |
728,428 | tables.file | create_carray | Create a new chunked array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters, optional
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
chunkshape : tuple or number or None, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be
the same as that of shape. If None, a sensible value is
calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little'
or 'big'. If this is not specified, the byteorder is that
of the given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported. Please use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0.
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
CArray : for more information on chunked arrays
| def create_carray(self, where, name, atom=None, shape=None, title="",
filters=None, chunkshape=None,
byteorder=None, createparents=False, obj=None,
track_times=True):
"""Create a new chunked array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters, optional
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
chunkshape : tuple or number or None, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be
the same as that of shape. If None, a sensible value is
calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little'
or 'big'. If this is not specified, the byteorder is that
of the given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported. Please use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0.
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
CArray : for more information on chunked arrays
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
if shape is not None and shape != obj.shape:
raise TypeError('the shape parameter do not match obj.shape')
else:
shape = obj.shape
if atom is not None and atom.dtype != obj.dtype:
raise TypeError("the 'atom' parameter is not consistent with "
"the data type of the 'obj' parameter")
elif atom is None:
atom = Atom.from_dtype(obj.dtype)
else:
if atom is None and shape is None:
raise TypeError(
"the 'atom' and 'shape' parameters or the 'obj' parameter "
"must be provided")
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = CArray(parentnode, name,
atom=atom, shape=shape, title=title, filters=filters,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj[...] = obj
return ptobj
| (self, where, name, atom=None, shape=None, title='', filters=None, chunkshape=None, byteorder=None, createparents=False, obj=None, track_times=True) |
728,429 | tables.file | create_earray | Create a new enlargeable array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing the
*type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array. One (and only one) of the shape
dimensions *must* be 0. The dimension being 0 means that the
resulting EArray object can be extended along it. Multiple
enlargeable dimensions are not supported right now.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
expectedrows : int, optional
A user estimate about the number of row elements that will be added
to the growable dimension in the EArray node. If not provided, the
default value is EXPECTED_ROWS_EARRAY (see tables/parameters.py).
If you plan to create either a much smaller or a much bigger array
try providing a guess; this will optimize the HDF5 B-Tree creation
and management process time and the amount of memory used.
chunkshape : tuple, numeric, or None, optional
The shape of the data chunk to be read or written in a single HDF5
I/O operation. Filters are applied to those chunks of data. The
dimensionality of chunkshape must be the same as that of shape
(beware: no dimension should be 0 this time!). If None, a sensible
value is calculated based on the expectedrows parameter (which is
recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
EArray : for more information on enlargeable arrays
| def create_earray(self, where, name, atom=None, shape=None, title="",
filters=None, expectedrows=1000,
chunkshape=None, byteorder=None,
createparents=False, obj=None, track_times=True):
"""Create a new enlargeable array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing the
*type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array. One (and only one) of the shape
dimensions *must* be 0. The dimension being 0 means that the
resulting EArray object can be extended along it. Multiple
enlargeable dimensions are not supported right now.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
expectedrows : int, optional
A user estimate about the number of row elements that will be added
to the growable dimension in the EArray node. If not provided, the
default value is EXPECTED_ROWS_EARRAY (see tables/parameters.py).
If you plan to create either a much smaller or a much bigger array
try providing a guess; this will optimize the HDF5 B-Tree creation
and management process time and the amount of memory used.
chunkshape : tuple, numeric, or None, optional
The shape of the data chunk to be read or written in a single HDF5
I/O operation. Filters are applied to those chunks of data. The
dimensionality of chunkshape must be the same as that of shape
(beware: no dimension should be 0 this time!). If None, a sensible
value is calculated based on the expectedrows parameter (which is
recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
EArray : for more information on enlargeable arrays
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
earray_shape = (0,) + obj.shape[1:]
if shape is not None and shape != earray_shape:
raise TypeError('the shape parameter is not compatible '
'with obj.shape.')
else:
shape = earray_shape
if atom is not None and atom.dtype != obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
elif atom is None:
atom = Atom.from_dtype(obj.dtype)
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = EArray(parentnode, name,
atom=atom, shape=shape, title=title,
filters=filters, expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
| (self, where, name, atom=None, shape=None, title='', filters=None, expectedrows=1000, chunkshape=None, byteorder=None, createparents=False, obj=None, track_times=True) |
728,430 | tables.file | create_external_link | Create an external link.
Create an external link to a *target* node with the given *name*
in *where* location. *target* can be a node object in another
file or a path string in the form 'file:/path/to/node'. If
*createparents* is true, the intermediate groups required for
reaching *where* are created (the default is not doing so).
The returned node is an :class:`ExternalLink` instance.
| def create_external_link(self, where, name, target, createparents=False):
"""Create an external link.
Create an external link to a *target* node with the given *name*
in *where* location. *target* can be a node object in another
file or a path string in the form 'file:/path/to/node'. If
*createparents* is true, the intermediate groups required for
reaching *where* are created (the default is not doing so).
The returned node is an :class:`ExternalLink` instance.
"""
if not isinstance(target, str):
if hasattr(target, '_v_pathname'): # quacks like a Node
target = target._v_file.filename + ':' + target._v_pathname
else:
raise ValueError(
"`target` has to be a string or a node object")
elif target.find(':/') == -1:
raise ValueError(
"`target` must expressed as 'file:/path/to/node'")
parentnode = self._get_or_create_path(where, createparents)
elink = ExternalLink(parentnode, name, target)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
return elink
| (self, where, name, target, createparents=False) |
728,431 | tables.file | create_group | Create a new group.
Parameters
----------
where : str or Group
The parent group from which the new group will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new group.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters applicable
to the leaves that hang directly from this new group (unless other
filter properties are specified for these leaves). Besides, if you
do not specify filter properties for its child groups, they will
inherit these ones.
createparents : bool
Whether to create the needed groups for the parent
path to exist (not done by default).
See Also
--------
Group : for more information on groups
| def create_group(self, where, name, title="", filters=None,
createparents=False):
"""Create a new group.
Parameters
----------
where : str or Group
The parent group from which the new group will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new group.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters applicable
to the leaves that hang directly from this new group (unless other
filter properties are specified for these leaves). Besides, if you
do not specify filter properties for its child groups, they will
inherit these ones.
createparents : bool
Whether to create the needed groups for the parent
path to exist (not done by default).
See Also
--------
Group : for more information on groups
"""
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
return Group(parentnode, name,
title=title, new=True, filters=filters)
| (self, where, name, title='', filters=None, createparents=False) |
728,432 | tables.file | create_hard_link | Create a hard link.
Create a hard link to a `target` node with the given `name` in
`where` location. `target` can be a node object or a path
string. If `createparents` is true, the intermediate groups
required for reaching `where` are created (the default is not
doing so).
The returned node is a regular `Group` or `Leaf` instance.
| def create_hard_link(self, where, name, target, createparents=False):
"""Create a hard link.
Create a hard link to a `target` node with the given `name` in
`where` location. `target` can be a node object or a path
string. If `createparents` is true, the intermediate groups
required for reaching `where` are created (the default is not
doing so).
The returned node is a regular `Group` or `Leaf` instance.
"""
targetnode = self.get_node(target)
parentnode = self._get_or_create_path(where, createparents)
linkextension._g_create_hard_link(parentnode, name, targetnode)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
# Return the target node
return self.get_node(parentnode, name)
| (self, where, name, target, createparents=False) |
728,433 | tables.file | create_soft_link | Create a soft link (aka symbolic link) to a `target` node.
Create a soft link (aka symbolic link) to a `target` nodewith
the given `name` in `where` location. `target` can be a node
object or a path string. If `createparents` is true, the
intermediate groups required for reaching `where` are created.
(the default is not doing so).
The returned node is a SoftLink instance. See the SoftLink
class (in :ref:`SoftLinkClassDescr`) for more information on
soft links.
| def create_soft_link(self, where, name, target, createparents=False):
"""Create a soft link (aka symbolic link) to a `target` node.
Create a soft link (aka symbolic link) to a `target` nodewith
the given `name` in `where` location. `target` can be a node
object or a path string. If `createparents` is true, the
intermediate groups required for reaching `where` are created.
(the default is not doing so).
The returned node is a SoftLink instance. See the SoftLink
class (in :ref:`SoftLinkClassDescr`) for more information on
soft links.
"""
if not isinstance(target, str):
if hasattr(target, '_v_pathname'): # quacks like a Node
target = target._v_pathname
else:
raise ValueError(
"`target` has to be a string or a node object")
parentnode = self._get_or_create_path(where, createparents)
slink = SoftLink(parentnode, name, target)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
return slink
| (self, where, name, target, createparents=False) |
728,434 | tables.file | create_table | Create a new table with the given name in where location.
Parameters
----------
where : str or Group
The parent group from which the new table will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new table.
description : Description
This is an object that describes the table, i.e. how
many columns it has, their names, types, shapes, etc. It
can be any of the following:
* *A user-defined class*: This should inherit from the
IsDescription class (see :ref:`IsDescriptionClassDescr`)
where table fields are specified.
* *A dictionary*: For example, when you do not know
beforehand which structure your table will have).
* *A Description instance*: You can use the description
attribute of another table to create a new one with the
same structure.
* *A NumPy dtype*: A completely general structured NumPy
dtype.
* *A NumPy (structured) array instance*: The dtype of
this structured array will be used as the description.
Also, in case the array has actual data, it will be
injected into the newly created table.
.. versionchanged:: 3.0
The *description* parameter can be None (default) if *obj* is
provided. In that case the structure of the table is deduced
by *obj*.
title : str
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to be
applied during the life of this object.
expectedrows : int
A user estimate of the number of records that will be in the table.
If not provided, the default value is EXPECTED_ROWS_TABLE (see
:file:`tables/parameters.py`). If you plan to create a bigger
table try providing a guess; this will optimize the HDF5 B-Tree
creation and management process time and memory used.
chunkshape
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The rank of the chunkshape for tables must
be 1. If None, a sensible value is calculated based on the
expectedrows parameter (which is recommended).
byteorder : str
The byteorder of data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the platform,
unless you passed an array as the description, in which case
its byteorder will be used.
createparents : bool
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The recarray to be saved. Accepted types are NumPy record
arrays.
The *obj* parameter is optional and it can be provided in
alternative to the *description* parameter.
If both *obj* and *description* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Table : for more information on tables
| def create_table(self, where, name, description=None, title="",
filters=None, expectedrows=10_000,
chunkshape=None, byteorder=None,
createparents=False, obj=None, track_times=True):
"""Create a new table with the given name in where location.
Parameters
----------
where : str or Group
The parent group from which the new table will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new table.
description : Description
This is an object that describes the table, i.e. how
many columns it has, their names, types, shapes, etc. It
can be any of the following:
* *A user-defined class*: This should inherit from the
IsDescription class (see :ref:`IsDescriptionClassDescr`)
where table fields are specified.
* *A dictionary*: For example, when you do not know
beforehand which structure your table will have).
* *A Description instance*: You can use the description
attribute of another table to create a new one with the
same structure.
* *A NumPy dtype*: A completely general structured NumPy
dtype.
* *A NumPy (structured) array instance*: The dtype of
this structured array will be used as the description.
Also, in case the array has actual data, it will be
injected into the newly created table.
.. versionchanged:: 3.0
The *description* parameter can be None (default) if *obj* is
provided. In that case the structure of the table is deduced
by *obj*.
title : str
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to be
applied during the life of this object.
expectedrows : int
A user estimate of the number of records that will be in the table.
If not provided, the default value is EXPECTED_ROWS_TABLE (see
:file:`tables/parameters.py`). If you plan to create a bigger
table try providing a guess; this will optimize the HDF5 B-Tree
creation and management process time and memory used.
chunkshape
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The rank of the chunkshape for tables must
be 1. If None, a sensible value is calculated based on the
expectedrows parameter (which is recommended).
byteorder : str
The byteorder of data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the platform,
unless you passed an array as the description, in which case
its byteorder will be used.
createparents : bool
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The recarray to be saved. Accepted types are NumPy record
arrays.
The *obj* parameter is optional and it can be provided in
alternative to the *description* parameter.
If both *obj* and *description* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Table : for more information on tables
"""
if obj is not None:
if not isinstance(obj, np.ndarray):
raise TypeError('invalid obj parameter %r' % obj)
descr, _ = descr_from_dtype(obj.dtype, ptparams=self.params)
if (description is not None and
dtype_from_descr(description,
ptparams=self.params) != obj.dtype):
raise TypeError('the desctiption parameter is not consistent '
'with the data type of the obj parameter')
elif description is None:
description = descr
parentnode = self._get_or_create_path(where, createparents)
if description is None:
raise ValueError("invalid table description: None")
_checkfilters(filters)
ptobj = Table(parentnode, name,
description=description, title=title,
filters=filters, expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
| (self, where, name, description=None, title='', filters=None, expectedrows=10000, chunkshape=None, byteorder=None, createparents=False, obj=None, track_times=True) |
728,435 | tables.file | create_vlarray | Create a new variable-length array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
expectedrows : int, optional
A user estimate about the number of row elements that will
be added to the growable dimension in the `VLArray` node.
If not provided, the default value is ``EXPECTED_ROWS_VLARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `VLArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
.. versionadded:: 3.0
chunkshape : int or tuple of int, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be 1.
If None, a sensible value is calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* parameter.
If both *obj* and *atom* and are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
VLArray : for more informationon variable-length arrays
.. versionchanged:: 3.0
The *expectedsizeinMB* parameter has been replaced by
*expectedrows*.
| def create_vlarray(self, where, name, atom=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
createparents=False, obj=None,
track_times=True):
"""Create a new variable-length array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
expectedrows : int, optional
A user estimate about the number of row elements that will
be added to the growable dimension in the `VLArray` node.
If not provided, the default value is ``EXPECTED_ROWS_VLARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `VLArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
.. versionadded:: 3.0
chunkshape : int or tuple of int, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be 1.
If None, a sensible value is calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* parameter.
If both *obj* and *atom* and are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
VLArray : for more informationon variable-length arrays
.. versionchanged:: 3.0
The *expectedsizeinMB* parameter has been replaced by
*expectedrows*.
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
if atom is not None and atom.dtype != obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
if atom is None:
atom = Atom.from_dtype(obj.dtype)
elif atom is None:
raise ValueError('atom parameter cannot be None')
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = VLArray(parentnode, name,
atom=atom, title=title, filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
| (self, where, name, atom=None, title='', filters=None, expectedrows=None, chunkshape=None, byteorder=None, createparents=False, obj=None, track_times=True) |
728,436 | tables.file | del_node_attr | Delete a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to delete. If the named
attribute does not exist, an AttributeError is raised.
| def del_node_attr(self, where, attrname, name=None):
"""Delete a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to delete. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
obj._f_delattr(attrname)
| (self, where, attrname, name=None) |
728,437 | tables.file | disable_undo | Disable the Undo/Redo mechanism.
Disabling the Undo/Redo mechanism leaves the database in the
current state and forgets past and future database states. This
makes :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and other
methods fail with an UndoRedoError.
Calling this method when the Undo/Redo mechanism is already
disabled raises an UndoRedoError.
| def disable_undo(self):
"""Disable the Undo/Redo mechanism.
Disabling the Undo/Redo mechanism leaves the database in the
current state and forgets past and future database states. This
makes :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and other
methods fail with an UndoRedoError.
Calling this method when the Undo/Redo mechanism is already
disabled raises an UndoRedoError.
"""
self._check_open()
if not self.is_undo_enabled():
raise UndoRedoError("Undo/Redo feature is already disabled!")
# The file is going to be changed.
self._check_writable()
del self._markers
del self._seqmarkers
del self._curmark
del self._curaction
del self._curtransaction
del self._nmarks
del self._actionlog
# Recursively delete the transaction group
tnode = self.get_node(_trans_group_path)
tnode._g_remove(recursive=1)
# The Undo/Redo mechanism has been disabled.
self._undoEnabled = False
| (self) |
728,438 | tables.file | enable_undo | Enable the Undo/Redo mechanism.
This operation prepares the database for undoing and redoing
modifications in the node hierarchy. This
allows :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and
other methods to be called.
The filters argument, when specified,
must be an instance of class Filters (see :ref:`FiltersClassDescr`) and
is meant for setting the compression values for the action log. The
default is having compression enabled, as the gains in terms of
space can be considerable. You may want to disable compression if
you want maximum speed for Undo/Redo operations.
Calling this method when the Undo/Redo mechanism is already
enabled raises an UndoRedoError.
| def enable_undo(self, filters=Filters(complevel=1)):
"""Enable the Undo/Redo mechanism.
This operation prepares the database for undoing and redoing
modifications in the node hierarchy. This
allows :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and
other methods to be called.
The filters argument, when specified,
must be an instance of class Filters (see :ref:`FiltersClassDescr`) and
is meant for setting the compression values for the action log. The
default is having compression enabled, as the gains in terms of
space can be considerable. You may want to disable compression if
you want maximum speed for Undo/Redo operations.
Calling this method when the Undo/Redo mechanism is already
enabled raises an UndoRedoError.
"""
maxundo = self.params['MAX_UNDO_PATH_LENGTH']
class ActionLog(NotLoggedMixin, Table):
pass
class ActionLogDesc(IsDescription):
opcode = UInt8Col(pos=0)
arg1 = StringCol(maxundo, pos=1, dflt=b"")
arg2 = StringCol(maxundo, pos=2, dflt=b"")
self._check_open()
# Enabling several times is not allowed to avoid the user having
# the illusion that a new implicit mark has been created
# when calling enable_undo for the second time.
if self.is_undo_enabled():
raise UndoRedoError("Undo/Redo feature is already enabled!")
self._markers = {}
self._seqmarkers = []
self._nmarks = 0
self._curtransaction = 0
self._curmark = -1 # No marks yet
# Get the Group for keeping user actions
try:
tgroup = self.get_node(_trans_group_path)
except NodeError:
# The file is going to be changed.
self._check_writable()
# A transaction log group does not exist. Create it
tgroup = self._create_transaction_group()
# Create a transaction.
self._trans = self._create_transaction(
tgroup, self._curtransaction)
# Create an action log
self._actionlog = ActionLog(
tgroup, _action_log_name, ActionLogDesc, "Action log",
filters=filters)
# Create an implicit mark
self._actionlog.append([(_op_to_code["MARK"], str(0), '')])
self._nmarks += 1
self._seqmarkers.append(0) # current action is 0
# Create a group for mark 0
self._create_mark(self._trans, 0)
# Initialize the marker pointer
self._curmark = int(self._nmarks - 1)
# Initialize the action pointer
self._curaction = self._actionlog.nrows - 1
else:
# The group seems to exist already
# Get the default transaction
self._trans = tgroup._f_get_child(
_trans_name % self._curtransaction)
# Open the action log and go to the end of it
self._actionlog = tgroup.actionlog
for row in self._actionlog:
if row["opcode"] == _op_to_code["MARK"]:
name = row["arg2"].decode('utf-8')
self._markers[name] = self._nmarks
self._seqmarkers.append(row.nrow)
self._nmarks += 1
# Get the current mark and current action
self._curmark = int(self._actionlog.attrs.CURMARK)
self._curaction = self._actionlog.attrs.CURACTION
# The Undo/Redo mechanism has been enabled.
self._undoEnabled = True
| (self, filters=Filters(complevel=1, complib='zlib', shuffle=True, bitshuffle=False, fletcher32=False, least_significant_digit=None)) |
728,439 | tables.file | flush | Flush all the alive leaves in the object tree. | def flush(self):
"""Flush all the alive leaves in the object tree."""
self._check_open()
# Flush the cache to disk
self._node_manager.flush_nodes()
self._flush_file(0) # 0 means local scope, 1 global (virtual) scope
| (self) |
728,440 | tables.file | get_current_mark | Get the identifier of the current mark.
Returns the identifier of the current mark. This can be used
to know the state of a database after an application crash, or to
get the identifier of the initial implicit mark after a call
to :meth:`File.enable_undo`.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
| def get_current_mark(self):
"""Get the identifier of the current mark.
Returns the identifier of the current mark. This can be used
to know the state of a database after an application crash, or to
get the identifier of the initial implicit mark after a call
to :meth:`File.enable_undo`.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
"""
self._check_open()
self._check_undo_enabled()
return self._curmark
| (self) |
728,441 | tables.file | get_node | Get the node under where with the given name.
Parameters
----------
where : str or Node
This can be a path string leading to a node or a Node instance (see
:ref:`NodeClassDescr`). If no name is specified, that node is
returned.
.. note::
If where is a Node instance from a different file than the one
on which this function is called, the returned node will also
be from that other file.
name : str, optional
If a name is specified, this must be a string with the name of
a node under where. In this case the where argument can only
lead to a Group (see :ref:`GroupClassDescr`) instance (else a
TypeError is raised). The node called name under the group
where is returned.
classname : str, optional
If the classname argument is specified, it must be the name of
a class derived from Node (e.g. Table). If the node is found but it
is not an instance of that class, a NoSuchNodeError is also raised.
Notes
-----
If the node to be returned does not exist, a NoSuchNodeError is
raised. Please note that hidden nodes are also considered.
| def get_node(self, where, name=None, classname=None):
"""Get the node under where with the given name.
Parameters
----------
where : str or Node
This can be a path string leading to a node or a Node instance (see
:ref:`NodeClassDescr`). If no name is specified, that node is
returned.
.. note::
If where is a Node instance from a different file than the one
on which this function is called, the returned node will also
be from that other file.
name : str, optional
If a name is specified, this must be a string with the name of
a node under where. In this case the where argument can only
lead to a Group (see :ref:`GroupClassDescr`) instance (else a
TypeError is raised). The node called name under the group
where is returned.
classname : str, optional
If the classname argument is specified, it must be the name of
a class derived from Node (e.g. Table). If the node is found but it
is not an instance of that class, a NoSuchNodeError is also raised.
Notes
-----
If the node to be returned does not exist, a NoSuchNodeError is
raised. Please note that hidden nodes are also considered.
"""
self._check_open()
if isinstance(where, Node):
where._g_check_open()
basepath = where._v_pathname
nodepath = join_path(basepath, name or '') or '/'
node = where._v_file._get_node(nodepath)
elif isinstance(where, (str, np.str_)):
if not where.startswith('/'):
raise NameError("``where`` must start with a slash ('/')")
basepath = where
nodepath = join_path(basepath, name or '') or '/'
node = self._get_node(nodepath)
else:
raise TypeError(
f"``where`` must be a string or a node: {where!r}")
# Finally, check whether the desired node is an instance
# of the expected class.
if classname:
class_ = get_class_by_name(classname)
if not isinstance(node, class_):
npathname = node._v_pathname
nclassname = node.__class__.__name__
# This error message is right since it can never be shown
# for ``classname in [None, 'Node']``.
raise NoSuchNodeError(
"could not find a ``%s`` node at ``%s``; "
"instead, a ``%s`` node has been found there"
% (classname, npathname, nclassname))
return node
| (self, where, name=None, classname=None) |
728,442 | tables.file | get_node_attr | Get a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to retrieve. If the named
attribute does not exist, an AttributeError is raised.
| def get_node_attr(self, where, attrname, name=None):
"""Get a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to retrieve. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
return obj._f_getattr(attrname)
| (self, where, attrname, name=None) |
728,443 | tables.file | goto | Go to a specific mark of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
| def goto(self, mark):
"""Go to a specific mark of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
if mark == -1: # Special case
mark = self._nmarks # Go beyond the mark bounds up to the end
# Get the mark ID number
markid = self._get_mark_id(mark)
finalaction = self._get_final_action(markid)
if finalaction < self._curaction:
self.undo(mark)
else:
self.redo(mark)
| (self, mark) |
728,444 | tables.file | is_undo_enabled | Is the Undo/Redo mechanism enabled?
Returns True if the Undo/Redo mechanism has been enabled for
this file, False otherwise. Please note that this mechanism is
persistent, so a newly opened PyTables file may already have
Undo/Redo support enabled.
| def is_undo_enabled(self):
"""Is the Undo/Redo mechanism enabled?
Returns True if the Undo/Redo mechanism has been enabled for
this file, False otherwise. Please note that this mechanism is
persistent, so a newly opened PyTables file may already have
Undo/Redo support enabled.
"""
self._check_open()
return self._undoEnabled
| (self) |
728,445 | tables.file | is_visible_node | Is the node under `path` visible?
If the node does not exist, a NoSuchNodeError is raised.
| def is_visible_node(self, path):
"""Is the node under `path` visible?
If the node does not exist, a NoSuchNodeError is raised.
"""
# ``util.isvisiblepath()`` is still recommended for internal use.
return self.get_node(path)._f_isvisible()
| (self, path) |
728,446 | tables.file | iter_nodes | Iterate over children nodes hanging from where.
Parameters
----------
where
This argument works as in :meth:`File.get_node`, referencing the
node to be acted upon.
classname
If the name of a class derived from
Node (see :ref:`NodeClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
The returned nodes are alphanumerically sorted by their name.
This is an iterator version of :meth:`File.list_nodes`.
| def iter_nodes(self, where, classname=None):
"""Iterate over children nodes hanging from where.
Parameters
----------
where
This argument works as in :meth:`File.get_node`, referencing the
node to be acted upon.
classname
If the name of a class derived from
Node (see :ref:`NodeClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
The returned nodes are alphanumerically sorted by their name.
This is an iterator version of :meth:`File.list_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_iter_nodes(classname)
| (self, where, classname=None) |
728,447 | tables.file | list_nodes | Return a *list* with children nodes hanging from where.
This is a list-returning version of :meth:`File.iter_nodes`.
| def list_nodes(self, where, classname=None):
"""Return a *list* with children nodes hanging from where.
This is a list-returning version of :meth:`File.iter_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_list_nodes(classname)
| (self, where, classname=None) |
728,448 | tables.file | mark | Mark the state of the database.
Creates a mark for the current state of the database. A unique (and
immutable) identifier for the mark is returned. An optional name (a
string) can be assigned to the mark. Both the identifier of a mark and
its name can be used in :meth:`File.undo` and :meth:`File.redo`
operations. When the name has already been used for another mark,
an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
| def mark(self, name=None):
"""Mark the state of the database.
Creates a mark for the current state of the database. A unique (and
immutable) identifier for the mark is returned. An optional name (a
string) can be assigned to the mark. Both the identifier of a mark and
its name can be used in :meth:`File.undo` and :meth:`File.redo`
operations. When the name has already been used for another mark,
an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
if name is None:
name = ''
else:
if not isinstance(name, str):
raise TypeError("Only strings are allowed as mark names. "
"You passed object: '%s'" % name)
if name in self._markers:
raise UndoRedoError("Name '%s' is already used as a marker "
"name. Try another one." % name)
# The file is going to be changed.
self._check_writable()
self._markers[name] = self._curmark + 1
# Create an explicit mark
# Insert the mark in the action log
self._log("MARK", str(self._curmark + 1), name)
self._curmark += 1
self._nmarks = self._curmark + 1
self._seqmarkers.append(self._curaction)
# Create a group for the current mark
self._create_mark(self._trans, self._curmark)
return self._curmark
| (self, name=None) |
728,449 | tables.file | move_node | Move the node specified by where and name to newparent/newname.
Parameters
----------
where, name : path
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newparent
The destination group the node will be moved into (a
path name or a Group instance). If it is
not specified or None, the current parent
group is chosen as the new parent.
newname
The new name to be assigned to the node in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
Notes
-----
The other arguments work as in :meth:`Node._f_move`.
| def move_node(self, where, newparent=None, newname=None, name=None,
overwrite=False, createparents=False):
"""Move the node specified by where and name to newparent/newname.
Parameters
----------
where, name : path
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newparent
The destination group the node will be moved into (a
path name or a Group instance). If it is
not specified or None, the current parent
group is chosen as the new parent.
newname
The new name to be assigned to the node in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
Notes
-----
The other arguments work as in :meth:`Node._f_move`.
"""
obj = self.get_node(where, name=name)
obj._f_move(newparent, newname, overwrite, createparents)
| (self, where, newparent=None, newname=None, name=None, overwrite=False, createparents=False) |
728,450 | tables.file | redo | Go to a future state of the database.
Returns the database to the state associated with the specified
mark. Both the identifier of a mark and its name can be used.
If the `mark` is omitted, the next created mark is used. If
there are no future marks, or the specified mark is not newer
than the current one, an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an UndoRedoError is raised.
| def redo(self, mark=None):
"""Go to a future state of the database.
Returns the database to the state associated with the specified
mark. Both the identifier of a mark and its name can be used.
If the `mark` is omitted, the next created mark is used. If
there are no future marks, or the specified mark is not newer
than the current one, an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
# print("(pre)REDO: (curaction, curmark) = (%s, %s)" % \
# (self._curaction, self._curmark))
if self._curaction >= self._actionlog.nrows - 1:
# We are at the end of log, so no action
return
if mark is None:
mark = self._curmark + 1
elif mark == -1:
mark = int(self._nmarks) # Go beyond the mark bounds up to the end
# Get the mark ID number
markid = self._get_mark_id(mark)
finalaction = self._get_final_action(markid)
if finalaction < self._curaction + 1:
raise UndoRedoError("Mark ``%s`` is older than the current mark. "
"Use `redo()` or `goto()` instead." % (mark,))
# The file is going to be changed.
self._check_writable()
# Get the final action ID to go
self._curaction += 1
# Try to reach this mark by redoing the actions in the log
self._doundo(finalaction, 1)
# Increment the current mark only if we are not at the end of marks
if self._curmark < self._nmarks - 1:
self._curmark += 1
if self._curaction > self._actionlog.nrows - 1:
self._curaction = self._actionlog.nrows - 1
| (self, mark=None) |
728,451 | tables.file | remove_node | Remove the object node *name* under *where* location.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
recursive : bool
If not supplied or false, the node will be removed
only if it has no children; if it does, a
NodeError will be raised. If supplied
with a true value, the node and all its descendants will be
completely removed.
| def remove_node(self, where, name=None, recursive=False):
"""Remove the object node *name* under *where* location.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
recursive : bool
If not supplied or false, the node will be removed
only if it has no children; if it does, a
NodeError will be raised. If supplied
with a true value, the node and all its descendants will be
completely removed.
"""
obj = self.get_node(where, name=name)
obj._f_remove(recursive)
| (self, where, name=None, recursive=False) |
728,452 | tables.file | rename_node | Change the name of the node specified by where and name to newname.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newname : str
The new name to be assigned to the node (a string).
overwrite : bool
Whether to recursively remove a node with the same
newname if it already exists (not done by default).
| def rename_node(self, where, newname, name=None, overwrite=False):
"""Change the name of the node specified by where and name to newname.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newname : str
The new name to be assigned to the node (a string).
overwrite : bool
Whether to recursively remove a node with the same
newname if it already exists (not done by default).
"""
obj = self.get_node(where, name=name)
obj._f_rename(newname, overwrite)
| (self, where, newname, name=None, overwrite=False) |
728,453 | tables.file | set_node_attr | Set a PyTables attribute for the given node.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
attrname
The name of the attribute to set.
attrvalue
The value of the attribute to set. Any kind of Python
object (like strings, ints, floats, lists, tuples, dicts,
small NumPy objects ...) can be stored as an attribute.
However, if necessary, pickle is automatically used so as
to serialize objects that you might want to save.
See the :class:`AttributeSet` class for details.
Notes
-----
If the node already has a large number of attributes, a
PerformanceWarning is issued.
| def set_node_attr(self, where, attrname, attrvalue, name=None):
"""Set a PyTables attribute for the given node.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
attrname
The name of the attribute to set.
attrvalue
The value of the attribute to set. Any kind of Python
object (like strings, ints, floats, lists, tuples, dicts,
small NumPy objects ...) can be stored as an attribute.
However, if necessary, pickle is automatically used so as
to serialize objects that you might want to save.
See the :class:`AttributeSet` class for details.
Notes
-----
If the node already has a large number of attributes, a
PerformanceWarning is issued.
"""
obj = self.get_node(where, name=name)
obj._f_setattr(attrname, attrvalue)
| (self, where, attrname, attrvalue, name=None) |
728,454 | tables.file | undo | Go to a past state of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used. If the mark is
omitted, the last created mark is used. If there are no past
marks, or the specified mark is not older than the current one, an
UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
| def undo(self, mark=None):
"""Go to a past state of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used. If the mark is
omitted, the last created mark is used. If there are no past
marks, or the specified mark is not older than the current one, an
UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
"""
self._check_open()
self._check_undo_enabled()
# print("(pre)UNDO: (curaction, curmark) = (%s,%s)" % \
# (self._curaction, self._curmark))
if mark is None:
markid = self._curmark
# Correction if we are settled on top of a mark
opcode = self._actionlog.cols.opcode
if opcode[self._curaction] == _op_to_code["MARK"]:
markid -= 1
else:
# Get the mark ID number
markid = self._get_mark_id(mark)
# Get the final action ID to go
finalaction = self._get_final_action(markid)
if finalaction > self._curaction:
raise UndoRedoError("Mark ``%s`` is newer than the current mark. "
"Use `redo()` or `goto()` instead." % (mark,))
# The file is going to be changed.
self._check_writable()
# Try to reach this mark by unwinding actions in the log
self._doundo(finalaction - 1, -1)
if self._curaction < self._actionlog.nrows - 1:
self._curaction += 1
self._curmark = int(self._actionlog.cols.arg1[self._curaction])
| (self, mark=None) |
728,455 | tables.file | walk_groups | Recursively iterate over groups (not leaves) hanging from where.
The where group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure. If where is not supplied, the root
group is used.
The where argument can be a path string
or a Group instance (see :ref:`GroupClassDescr`).
| def walk_groups(self, where="/"):
"""Recursively iterate over groups (not leaves) hanging from where.
The where group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure. If where is not supplied, the root
group is used.
The where argument can be a path string
or a Group instance (see :ref:`GroupClassDescr`).
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_walk_groups()
| (self, where='/') |
728,456 | tables.file | walk_nodes | Recursively iterate over nodes hanging from where.
Parameters
----------
where : str or Group, optional
If supplied, the iteration starts from (and includes)
this group. It can be a path string or a
Group instance (see :ref:`GroupClassDescr`).
classname
If the name of a class derived from
Node (see :ref:`GroupClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
This version iterates over the leaves in the same group in order
to avoid having a list referencing to them and thus, preventing
the LRU cache to remove them after their use.
Examples
--------
::
# Recursively print all the nodes hanging from '/detector'.
print("Nodes hanging from group '/detector':")
for node in h5file.walk_nodes('/detector', classname='EArray'):
print(node)
| def walk_nodes(self, where="/", classname=None):
"""Recursively iterate over nodes hanging from where.
Parameters
----------
where : str or Group, optional
If supplied, the iteration starts from (and includes)
this group. It can be a path string or a
Group instance (see :ref:`GroupClassDescr`).
classname
If the name of a class derived from
Node (see :ref:`GroupClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
This version iterates over the leaves in the same group in order
to avoid having a list referencing to them and thus, preventing
the LRU cache to remove them after their use.
Examples
--------
::
# Recursively print all the nodes hanging from '/detector'.
print("Nodes hanging from group '/detector':")
for node in h5file.walk_nodes('/detector', classname='EArray'):
print(node)
"""
class_ = get_class_by_name(classname)
if class_ is Group: # only groups
yield from self.walk_groups(where)
elif class_ is Node: # all nodes
yield self.get_node(where)
for group in self.walk_groups(where):
yield from self.iter_nodes(group)
else: # only nodes of the named type
for group in self.walk_groups(where):
yield from self.iter_nodes(group, classname)
| (self, where='/', classname=None) |
728,457 | tables.exceptions | FileModeError | The operation can not be carried out because the mode in which the
hosting file is opened is not adequate.
For instance, removing an existing leaf from a read-only file is not
allowed.
| class FileModeError(ValueError):
"""The operation can not be carried out because the mode in which the
hosting file is opened is not adequate.
For instance, removing an existing leaf from a read-only file is not
allowed.
"""
pass
| null |
728,458 | tables.filters | Filters | Container for filter properties.
This class is meant to serve as a container that keeps information about
the filter properties associated with the chunked leaves, that is Table,
CArray, EArray and VLArray.
Instances of this class can be directly compared for equality.
Parameters
----------
complevel : int
Specifies a compression level for data. The allowed
range is 0-9. A value of 0 (the default) disables
compression.
complib : str
Specifies the compression library to be used. Right now, 'zlib' (the
default), 'lzo', 'bzip2', 'blosc' and 'blosc2' are supported.
Additional compressors for Blosc like 'blosc:blosclz' ('blosclz' is
the default in case the additional compressor is not specified),
'blosc:lz4', 'blosc:lz4hc', 'blosc:zlib' and 'blosc:zstd' are
supported too.
Also, additional compressors for Blosc2 like 'blosc2:blosclz'
('blosclz' is the default in case the additional compressor is not
specified), 'blosc2:lz4', 'blosc2:lz4hc', 'blosc2:zlib' and
'blosc2:zstd' are supported too.
Specifying a compression library which is not available
in the system issues a FiltersWarning and sets the library to the
default one.
shuffle : bool
Whether to use the *Shuffle* filter in the HDF5 library.
This is normally used to improve the compression ratio.
A false value disables shuffling and a true one enables
it. The default value depends on whether compression is
enabled or not; if compression is enabled, shuffling defaults
to be enabled, else shuffling is disabled. Shuffling can only
be used when compression is enabled.
bitshuffle : bool
Whether to use the *BitShuffle* filter in the Blosc/Blosc2
libraries. This is normally used to improve the compression
ratio. A false value disables bitshuffling and a true one
enables it. The default value is disabled.
fletcher32 : bool
Whether to use the *Fletcher32* filter in the HDF5 library.
This is used to add a checksum on each data chunk. A false
value (the default) disables the checksum.
least_significant_digit : int
If specified, data will be truncated (quantized). In conjunction
with enabling compression, this produces 'lossy', but
significantly more efficient compression. For example, if
*least_significant_digit=1*, data will be quantized using
``around(scale*data)/scale``, where ``scale = 2**bits``, and
bits is determined so that a precision of 0.1 is retained (in
this case bits=4). Default is *None*, or no quantization.
.. note::
quantization is only applied if some form of compression is
enabled
Examples
--------
This is a small example on using the Filters class::
import numpy as np
import tables as tb
fileh = tb.open_file('test5.h5', mode='w')
atom = Float32Atom()
filters = Filters(complevel=1, complib='blosc', fletcher32=True)
arr = fileh.create_earray(fileh.root, 'earray', atom, (0,2),
"A growable array", filters=filters)
# Append several rows in only one call
arr.append(np.array([[1., 2.],
[2., 3.],
[3., 4.]], dtype=np.float32))
# Print information on that enlargeable array
print("Result Array:")
print(repr(arr))
fileh.close()
This enforces the use of the Blosc library, a compression level of 1 and a
Fletcher32 checksum filter as well. See the output of this example::
Result Array:
/earray (EArray(3, 2), fletcher32, shuffle, blosc(1)) 'A growable array'
type = float32
shape = (3, 2)
itemsize = 4
nrows = 3
extdim = 0
flavor = 'numpy'
byteorder = 'little'
.. rubric:: Filters attributes
.. attribute:: fletcher32
Whether the *Fletcher32* filter is active or not.
.. attribute:: complevel
The compression level (0 disables compression).
.. attribute:: complib
The compression filter used (irrelevant when compression is not
enabled).
.. attribute:: shuffle
Whether the *Shuffle* filter is active or not.
.. attribute:: bitshuffle
Whether the *BitShuffle* filter is active or not (Blosc/Blosc2 only).
| class Filters:
"""Container for filter properties.
This class is meant to serve as a container that keeps information about
the filter properties associated with the chunked leaves, that is Table,
CArray, EArray and VLArray.
Instances of this class can be directly compared for equality.
Parameters
----------
complevel : int
Specifies a compression level for data. The allowed
range is 0-9. A value of 0 (the default) disables
compression.
complib : str
Specifies the compression library to be used. Right now, 'zlib' (the
default), 'lzo', 'bzip2', 'blosc' and 'blosc2' are supported.
Additional compressors for Blosc like 'blosc:blosclz' ('blosclz' is
the default in case the additional compressor is not specified),
'blosc:lz4', 'blosc:lz4hc', 'blosc:zlib' and 'blosc:zstd' are
supported too.
Also, additional compressors for Blosc2 like 'blosc2:blosclz'
('blosclz' is the default in case the additional compressor is not
specified), 'blosc2:lz4', 'blosc2:lz4hc', 'blosc2:zlib' and
'blosc2:zstd' are supported too.
Specifying a compression library which is not available
in the system issues a FiltersWarning and sets the library to the
default one.
shuffle : bool
Whether to use the *Shuffle* filter in the HDF5 library.
This is normally used to improve the compression ratio.
A false value disables shuffling and a true one enables
it. The default value depends on whether compression is
enabled or not; if compression is enabled, shuffling defaults
to be enabled, else shuffling is disabled. Shuffling can only
be used when compression is enabled.
bitshuffle : bool
Whether to use the *BitShuffle* filter in the Blosc/Blosc2
libraries. This is normally used to improve the compression
ratio. A false value disables bitshuffling and a true one
enables it. The default value is disabled.
fletcher32 : bool
Whether to use the *Fletcher32* filter in the HDF5 library.
This is used to add a checksum on each data chunk. A false
value (the default) disables the checksum.
least_significant_digit : int
If specified, data will be truncated (quantized). In conjunction
with enabling compression, this produces 'lossy', but
significantly more efficient compression. For example, if
*least_significant_digit=1*, data will be quantized using
``around(scale*data)/scale``, where ``scale = 2**bits``, and
bits is determined so that a precision of 0.1 is retained (in
this case bits=4). Default is *None*, or no quantization.
.. note::
quantization is only applied if some form of compression is
enabled
Examples
--------
This is a small example on using the Filters class::
import numpy as np
import tables as tb
fileh = tb.open_file('test5.h5', mode='w')
atom = Float32Atom()
filters = Filters(complevel=1, complib='blosc', fletcher32=True)
arr = fileh.create_earray(fileh.root, 'earray', atom, (0,2),
"A growable array", filters=filters)
# Append several rows in only one call
arr.append(np.array([[1., 2.],
[2., 3.],
[3., 4.]], dtype=np.float32))
# Print information on that enlargeable array
print("Result Array:")
print(repr(arr))
fileh.close()
This enforces the use of the Blosc library, a compression level of 1 and a
Fletcher32 checksum filter as well. See the output of this example::
Result Array:
/earray (EArray(3, 2), fletcher32, shuffle, blosc(1)) 'A growable array'
type = float32
shape = (3, 2)
itemsize = 4
nrows = 3
extdim = 0
flavor = 'numpy'
byteorder = 'little'
.. rubric:: Filters attributes
.. attribute:: fletcher32
Whether the *Fletcher32* filter is active or not.
.. attribute:: complevel
The compression level (0 disables compression).
.. attribute:: complib
The compression filter used (irrelevant when compression is not
enabled).
.. attribute:: shuffle
Whether the *Shuffle* filter is active or not.
.. attribute:: bitshuffle
Whether the *BitShuffle* filter is active or not (Blosc/Blosc2 only).
"""
@property
def shuffle_bitshuffle(self):
"""Encode NoShuffle (0), Shuffle (1) and BitShuffle (2) filters."""
if (self.shuffle and self.bitshuffle):
raise ValueError(
"Shuffle and BitShuffle cannot be active at the same time")
if not (self.shuffle or self.bitshuffle):
return 0
if self.shuffle:
return 1
if self.bitshuffle:
return 2
@classmethod
def _from_leaf(cls, leaf):
# Get a dictionary with all the filters
parent = leaf._v_parent
filters_dict = utilsextension.get_filters(parent._v_objectid,
leaf._v_name)
if filters_dict is None:
filters_dict = {} # not chunked
# Keyword arguments are all off
kwargs = dict(complevel=0, shuffle=False, bitshuffle=False,
fletcher32=False, least_significant_digit=None,
_new=False)
for (name, values) in filters_dict.items():
if name == 'deflate':
name = 'zlib'
if name in all_complibs:
kwargs['complib'] = name
if name in ('blosc', 'blosc2'):
kwargs['complevel'] = values[4]
if values[5] == 1:
# Shuffle filter is internal to blosc/blosc2
kwargs['shuffle'] = True
elif values[5] == 2:
# Shuffle filter is internal to blosc/blosc2
kwargs['bitshuffle'] = True
# From Blosc 1.3 on, parameter 6 is used for the compressor
if len(values) > 6:
if name == "blosc":
cname = blosc_compcode_to_compname(values[6])
kwargs['complib'] = "blosc:%s" % cname
else:
cname = blosc2_compcode_to_compname(values[6])
kwargs['complib'] = "blosc2:%s" % cname
else:
kwargs['complevel'] = values[0]
elif name in foreign_complibs:
kwargs['complib'] = name
kwargs['complevel'] = 1 # any nonzero value will do
elif name in ['shuffle', 'fletcher32']:
kwargs[name] = True
return cls(**kwargs)
@classmethod
def _unpack(cls, packed):
"""Create a new `Filters` object from a packed version.
>>> Filters._unpack(0)
Filters(complevel=0, shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x101)
Filters(complevel=1, complib='zlib', shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x30109)
Filters(complevel=9, complib='zlib', shuffle=True, bitshuffle=False, fletcher32=True, least_significant_digit=None)
>>> Filters._unpack(0x3010A)
Traceback (most recent call last):
...
ValueError: compression level must be between 0 and 9
>>> Filters._unpack(0x1)
Traceback (most recent call last):
...
ValueError: invalid compression library id: 0
"""
kwargs = {'_new': False}
# Byte 0: compression level.
kwargs['complevel'] = complevel = packed & 0xff
packed >>= 8
# Byte 1: compression library id (0 for none).
if complevel > 0:
complib_id = int(packed & 0xff)
if not (0 < complib_id <= len(all_complibs)):
raise ValueError("invalid compression library id: %d"
% complib_id)
kwargs['complib'] = all_complibs[complib_id - 1]
packed >>= 8
# Byte 2: parameterless filters.
kwargs['shuffle'] = packed & _shuffle_flag
kwargs['bitshuffle'] = packed & _bitshuffle_flag
kwargs['fletcher32'] = packed & _fletcher32_flag
has_rounding = packed & _rounding_flag
packed >>= 8
# Byte 3: least significant digit.
if has_rounding:
kwargs['least_significant_digit'] = np.int8(packed & 0xff)
else:
kwargs['least_significant_digit'] = None
return cls(**kwargs)
def _pack(self):
"""Pack the `Filters` object into a 64-bit NumPy integer."""
packed = np.int64(0)
# Byte 3: least significant digit.
if self.least_significant_digit is not None:
# assert isinstance(self.least_significant_digit, np.int8)
packed |= self.least_significant_digit
packed <<= 8
# Byte 2: parameterless filters.
if self.shuffle:
packed |= _shuffle_flag
if self.bitshuffle:
packed |= _bitshuffle_flag
if self.fletcher32:
packed |= _fletcher32_flag
if self.least_significant_digit:
packed |= _rounding_flag
packed <<= 8
# Byte 1: compression library id (0 for none).
if self.complevel > 0:
packed |= all_complibs.index(self.complib) + 1
packed <<= 8
# Byte 0: compression level.
packed |= self.complevel
return packed
def __init__(self, complevel=0, complib=default_complib,
shuffle=True, bitshuffle=False, fletcher32=False,
least_significant_digit=None, _new=True):
if not (0 <= complevel <= 9):
raise ValueError("compression level must be between 0 and 9")
if _new and complevel > 0:
# These checks are not performed when loading filters from disk.
if complib not in all_complibs:
raise ValueError(
"compression library ``%s`` is not supported; "
"it must be one of: %s"
% (complib, ", ".join(all_complibs)))
if utilsextension.which_lib_version(complib) is None:
warnings.warn("compression library ``%s`` is not available; "
"using ``%s`` instead"
% (complib, default_complib), FiltersWarning)
complib = default_complib # always available
complevel = int(complevel)
complib = str(complib)
shuffle = bool(shuffle)
bitshuffle = bool(bitshuffle)
fletcher32 = bool(fletcher32)
if least_significant_digit is not None:
least_significant_digit = np.int8(least_significant_digit)
if complevel == 0:
# Override some inputs when compression is not enabled.
complib = None # make it clear there is no compression
shuffle = False # shuffling and not compressing makes no sense
least_significant_digit = None
elif complib not in all_complibs:
# Do not try to use a meaningful level for unsupported libs.
complevel = -1
self.complevel = complevel
"""The compression level (0 disables compression)."""
self.complib = complib
"""The compression filter used (irrelevant when compression is
not enabled).
"""
self.shuffle = shuffle
"""Whether the *Shuffle* filter is active or not."""
self.bitshuffle = bitshuffle
"""Whether the *BitShuffle* filter is active or not."""
if (self.complib and
self.bitshuffle and
not self.complib.startswith('blosc')):
raise ValueError("BitShuffle can only be used inside Blosc/Blosc2")
if self.shuffle and self.bitshuffle:
# BitShuffle has priority in case both are specified
self.shuffle = False
self.fletcher32 = fletcher32
"""Whether the *Fletcher32* filter is active or not."""
self.least_significant_digit = least_significant_digit
"""The least significant digit to which data shall be truncated."""
def __repr__(self):
args = []
if self.complevel >= 0: # meaningful compression level
args.append(f'complevel={self.complevel}')
if self.complevel != 0: # compression enabled (-1 or > 0)
args.append(f'complib={self.complib!r}')
args.append(f'shuffle={self.shuffle}')
args.append(f'bitshuffle={self.bitshuffle}')
args.append(f'fletcher32={self.fletcher32}')
args.append(f'least_significant_digit={self.least_significant_digit}')
return f'{self.__class__.__name__}({", ".join(args)})'
def __str__(self):
return repr(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__dict__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.complevel, self.complib,
# self.shuffle, self.bitshuffle, self.fletcher32))
def copy(self, **override):
"""Get a copy of the filters, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as keyword
arguments.
Using this method is recommended over replacing the attributes of an
instance, since instances of this class may become immutable in the
future::
>>> filters1 = Filters()
>>> filters2 = filters1.copy()
>>> filters1 == filters2
True
>>> filters1 is filters2
False
>>> filters3 = filters1.copy(complevel=1) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: compression library ``None`` is not supported...
>>> filters3 = filters1.copy(complevel=1, complib='zlib')
>>> print(filters1)
Filters(complevel=0, shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> print(filters3)
Filters(complevel=1, complib='zlib', shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> filters1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
"""
newargs = self.__dict__.copy()
newargs.update(override)
return self.__class__(**newargs)
| (complevel=0, complib='zlib', shuffle=True, bitshuffle=False, fletcher32=False, least_significant_digit=None, _new=True) |
728,459 | tables.filters | __eq__ | null | def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__dict__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
| (self, other) |
728,460 | tables.filters | __init__ | null | def __init__(self, complevel=0, complib=default_complib,
shuffle=True, bitshuffle=False, fletcher32=False,
least_significant_digit=None, _new=True):
if not (0 <= complevel <= 9):
raise ValueError("compression level must be between 0 and 9")
if _new and complevel > 0:
# These checks are not performed when loading filters from disk.
if complib not in all_complibs:
raise ValueError(
"compression library ``%s`` is not supported; "
"it must be one of: %s"
% (complib, ", ".join(all_complibs)))
if utilsextension.which_lib_version(complib) is None:
warnings.warn("compression library ``%s`` is not available; "
"using ``%s`` instead"
% (complib, default_complib), FiltersWarning)
complib = default_complib # always available
complevel = int(complevel)
complib = str(complib)
shuffle = bool(shuffle)
bitshuffle = bool(bitshuffle)
fletcher32 = bool(fletcher32)
if least_significant_digit is not None:
least_significant_digit = np.int8(least_significant_digit)
if complevel == 0:
# Override some inputs when compression is not enabled.
complib = None # make it clear there is no compression
shuffle = False # shuffling and not compressing makes no sense
least_significant_digit = None
elif complib not in all_complibs:
# Do not try to use a meaningful level for unsupported libs.
complevel = -1
self.complevel = complevel
"""The compression level (0 disables compression)."""
self.complib = complib
"""The compression filter used (irrelevant when compression is
not enabled).
"""
self.shuffle = shuffle
"""Whether the *Shuffle* filter is active or not."""
self.bitshuffle = bitshuffle
"""Whether the *BitShuffle* filter is active or not."""
if (self.complib and
self.bitshuffle and
not self.complib.startswith('blosc')):
raise ValueError("BitShuffle can only be used inside Blosc/Blosc2")
if self.shuffle and self.bitshuffle:
# BitShuffle has priority in case both are specified
self.shuffle = False
self.fletcher32 = fletcher32
"""Whether the *Fletcher32* filter is active or not."""
self.least_significant_digit = least_significant_digit
"""The least significant digit to which data shall be truncated."""
| (self, complevel=0, complib='zlib', shuffle=True, bitshuffle=False, fletcher32=False, least_significant_digit=None, _new=True) |
728,461 | tables.filters | __repr__ | null | def __repr__(self):
args = []
if self.complevel >= 0: # meaningful compression level
args.append(f'complevel={self.complevel}')
if self.complevel != 0: # compression enabled (-1 or > 0)
args.append(f'complib={self.complib!r}')
args.append(f'shuffle={self.shuffle}')
args.append(f'bitshuffle={self.bitshuffle}')
args.append(f'fletcher32={self.fletcher32}')
args.append(f'least_significant_digit={self.least_significant_digit}')
return f'{self.__class__.__name__}({", ".join(args)})'
| (self) |
728,463 | tables.filters | _pack | Pack the `Filters` object into a 64-bit NumPy integer. | def _pack(self):
"""Pack the `Filters` object into a 64-bit NumPy integer."""
packed = np.int64(0)
# Byte 3: least significant digit.
if self.least_significant_digit is not None:
# assert isinstance(self.least_significant_digit, np.int8)
packed |= self.least_significant_digit
packed <<= 8
# Byte 2: parameterless filters.
if self.shuffle:
packed |= _shuffle_flag
if self.bitshuffle:
packed |= _bitshuffle_flag
if self.fletcher32:
packed |= _fletcher32_flag
if self.least_significant_digit:
packed |= _rounding_flag
packed <<= 8
# Byte 1: compression library id (0 for none).
if self.complevel > 0:
packed |= all_complibs.index(self.complib) + 1
packed <<= 8
# Byte 0: compression level.
packed |= self.complevel
return packed
| (self) |
728,464 | tables.filters | copy | Get a copy of the filters, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as keyword
arguments.
Using this method is recommended over replacing the attributes of an
instance, since instances of this class may become immutable in the
future::
>>> filters1 = Filters()
>>> filters2 = filters1.copy()
>>> filters1 == filters2
True
>>> filters1 is filters2
False
>>> filters3 = filters1.copy(complevel=1) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: compression library ``None`` is not supported...
>>> filters3 = filters1.copy(complevel=1, complib='zlib')
>>> print(filters1)
Filters(complevel=0, shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> print(filters3)
Filters(complevel=1, complib='zlib', shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> filters1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
| def copy(self, **override):
"""Get a copy of the filters, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as keyword
arguments.
Using this method is recommended over replacing the attributes of an
instance, since instances of this class may become immutable in the
future::
>>> filters1 = Filters()
>>> filters2 = filters1.copy()
>>> filters1 == filters2
True
>>> filters1 is filters2
False
>>> filters3 = filters1.copy(complevel=1) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: compression library ``None`` is not supported...
>>> filters3 = filters1.copy(complevel=1, complib='zlib')
>>> print(filters1)
Filters(complevel=0, shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> print(filters3)
Filters(complevel=1, complib='zlib', shuffle=False, bitshuffle=False, fletcher32=False, least_significant_digit=None)
>>> filters1.copy(foobar=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...__init__() got an unexpected keyword argument 'foobar'
"""
newargs = self.__dict__.copy()
newargs.update(override)
return self.__class__(**newargs)
| (self, **override) |
728,465 | tables.exceptions | FiltersWarning | Unavailable filters.
This warning is issued when a valid filter is specified but it is
not available in the system. It may mean that an available default
filter is to be used instead.
| class FiltersWarning(Warning):
"""Unavailable filters.
This warning is issued when a valid filter is specified but it is
not available in the system. It may mean that an available default
filter is to be used instead.
"""
pass
| null |
728,466 | tables.exceptions | FlavorError | Unsupported or unavailable flavor or flavor conversion.
This exception is raised when an unsupported or unavailable flavor
is given to a dataset, or when a conversion of data between two
given flavors is not supported nor available.
| class FlavorError(ValueError):
"""Unsupported or unavailable flavor or flavor conversion.
This exception is raised when an unsupported or unavailable flavor
is given to a dataset, or when a conversion of data between two
given flavors is not supported nor available.
"""
pass
| null |
728,467 | tables.exceptions | FlavorWarning | Unsupported or unavailable flavor conversion.
This warning is issued when a conversion of data between two given
flavors is not supported nor available, and raising an error would
render the data inaccessible (e.g. on a dataset of an unavailable
flavor in a read-only file).
See the `FlavorError` class for more information.
| class FlavorWarning(Warning):
"""Unsupported or unavailable flavor conversion.
This warning is issued when a conversion of data between two given
flavors is not supported nor available, and raising an error would
render the data inaccessible (e.g. on a dataset of an unavailable
flavor in a read-only file).
See the `FlavorError` class for more information.
"""
pass
| null |
728,468 | tables.atom | Float128Atom | Defines an atom of type ``float128``. | from tables.atom import Float128Atom
| (shape=(), dflt=0.0) |
728,470 | tables.atom | __init__ | null | def _create_numeric_class(baseclass, itemsize):
"""Create a numeric atom class with the given `baseclass` and an
`itemsize`."""
prefix = '%s%d' % (baseclass.prefix(), itemsize * 8)
type_ = prefix.lower()
classdict = {'itemsize': itemsize, 'type': type_,
'__doc__': "Defines an atom of type ``%s``." % type_}
def __init__(self, shape=(), dflt=baseclass._defvalue):
Atom.__init__(self, self.type, shape, dflt)
classdict['__init__'] = __init__
return type('%sAtom' % prefix, (baseclass,), classdict)
| (self, shape=(), dflt=0.0) |
728,476 | tables.description | Float128Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Float128Col
| (*args, **kwargs) |
728,484 | tables.atom | Float16Atom | Defines an atom of type ``float16``. | from tables.atom import Float16Atom
| (shape=(), dflt=0.0) |
728,492 | tables.description | Float16Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Float16Col
| (*args, **kwargs) |
728,500 | tables.atom | Float32Atom | Defines an atom of type ``float32``. | from tables.atom import Float32Atom
| (shape=(), dflt=0.0) |
728,508 | tables.description | Float32Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Float32Col
| (*args, **kwargs) |
728,516 | tables.atom | Float64Atom | Defines an atom of type ``float64``. | from tables.atom import Float64Atom
| (shape=(), dflt=0.0) |
728,524 | tables.description | Float64Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Float64Col
| (*args, **kwargs) |
728,532 | tables.atom | FloatAtom | Defines an atom of a floating point type (float kind). | class FloatAtom(Atom):
"""Defines an atom of a floating point type (float kind)."""
kind = 'float'
_deftype = 'float64'
_defvalue = 0.0
__init__ = _abstract_atom_init(_deftype, _defvalue)
| (itemsize=8, shape=(), dflt=0.0) |
728,534 | tables.atom | __init__ | null | def _abstract_atom_init(deftype, defvalue):
"""Return a constructor for an abstract `Atom` class."""
defitemsize = split_type(deftype)[1]
def __init__(self, itemsize=defitemsize, shape=(), dflt=defvalue):
assert self.kind in atom_map
try:
atomclass = atom_map[self.kind][itemsize]
except KeyError:
raise _invalid_itemsize_error(self.kind, itemsize,
atom_map[self.kind])
self.__class__ = atomclass
atomclass.__init__(self, shape, dflt)
return __init__
| (self, itemsize=8, shape=(), dflt=0.0) |
728,540 | tables.description | FloatCol | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import FloatCol
| (*args, **kwargs) |
728,548 | tables.group | Group | Basic PyTables grouping structure.
Instances of this class are grouping structures containing *child*
instances of zero or more groups or leaves, together with
supporting metadata. Each group has exactly one *parent* group.
Working with groups and leaves is similar in many ways to working
with directories and files, respectively, in a Unix filesystem.
As with Unix directories and files, objects in the object tree are
often described by giving their full (or absolute) path names.
This full path can be specified either as a string (like in
'/group1/group2') or as a complete object path written in *natural
naming* schema (like in file.root.group1.group2).
A collateral effect of the *natural naming* schema is that the
names of members in the Group class and its instances must be
carefully chosen to avoid colliding with existing children node
names. For this reason and to avoid polluting the children
namespace all members in a Group start with some reserved prefix,
like _f_ (for public methods), _g_ (for private ones), _v_ (for
instance variables) or _c_ (for class variables). Any attempt to
create a new child node whose name starts with one of these
prefixes will raise a ValueError exception.
Another effect of natural naming is that children named after
Python keywords or having names not valid as Python identifiers
(e.g. class, $a or 44) can not be accessed using the node.child
syntax. You will be forced to use node._f_get_child(child) to
access them (which is recommended for programmatic accesses).
You will also need to use _f_get_child() to access an existing
child node if you set a Python attribute in the Group with the
same name as that node (you will get a NaturalNameWarning when
doing this).
Parameters
----------
parentnode
The parent :class:`Group` object.
name : str
The name of this node in its parent group.
title
The title for this group
new
If this group is new or has to be read from disk
filters : Filters
A Filters instance
.. versionchanged:: 3.0
*parentNode* renamed into *parentnode*
Notes
-----
The following documentation includes methods that are automatically
called when a Group instance is accessed in a special way.
For instance, this class defines the __setattr__, __getattr__,
__delattr__ and __dir__ methods, and they set, get and delete
*ordinary Python attributes* as normally intended. In addition to that,
__getattr__ allows getting *child nodes* by their name for the sake of
easy interaction on the command line, as long as there is no Python
attribute with the same name. Groups also allow the interactive
completion (when using readline) of the names of child nodes.
For instance::
# get a Python attribute
nchild = group._v_nchildren
# Add a Table child called 'table' under 'group'.
h5file.create_table(group, 'table', myDescription)
table = group.table # get the table child instance
group.table = 'foo' # set a Python attribute
# (PyTables warns you here about using the name of a child node.)
foo = group.table # get a Python attribute
del group.table # delete a Python attribute
table = group.table # get the table child instance again
Additionally, on interactive python sessions you may get autocompletions
of children named as *valid python identifiers* by pressing the `[Tab]`
key, or to use the dir() global function.
.. rubric:: Group attributes
The following instance variables are provided in addition to those
in Node (see :ref:`NodeClassDescr`):
.. attribute:: _v_children
Dictionary with all nodes hanging from this group.
.. attribute:: _v_groups
Dictionary with all groups hanging from this group.
.. attribute:: _v_hidden
Dictionary with all hidden nodes hanging from this group.
.. attribute:: _v_leaves
Dictionary with all leaves hanging from this group.
.. attribute:: _v_links
Dictionary with all links hanging from this group.
.. attribute:: _v_unknown
Dictionary with all unknown nodes hanging from this group.
| class Group(hdf5extension.Group, Node):
"""Basic PyTables grouping structure.
Instances of this class are grouping structures containing *child*
instances of zero or more groups or leaves, together with
supporting metadata. Each group has exactly one *parent* group.
Working with groups and leaves is similar in many ways to working
with directories and files, respectively, in a Unix filesystem.
As with Unix directories and files, objects in the object tree are
often described by giving their full (or absolute) path names.
This full path can be specified either as a string (like in
'/group1/group2') or as a complete object path written in *natural
naming* schema (like in file.root.group1.group2).
A collateral effect of the *natural naming* schema is that the
names of members in the Group class and its instances must be
carefully chosen to avoid colliding with existing children node
names. For this reason and to avoid polluting the children
namespace all members in a Group start with some reserved prefix,
like _f_ (for public methods), _g_ (for private ones), _v_ (for
instance variables) or _c_ (for class variables). Any attempt to
create a new child node whose name starts with one of these
prefixes will raise a ValueError exception.
Another effect of natural naming is that children named after
Python keywords or having names not valid as Python identifiers
(e.g. class, $a or 44) can not be accessed using the node.child
syntax. You will be forced to use node._f_get_child(child) to
access them (which is recommended for programmatic accesses).
You will also need to use _f_get_child() to access an existing
child node if you set a Python attribute in the Group with the
same name as that node (you will get a NaturalNameWarning when
doing this).
Parameters
----------
parentnode
The parent :class:`Group` object.
name : str
The name of this node in its parent group.
title
The title for this group
new
If this group is new or has to be read from disk
filters : Filters
A Filters instance
.. versionchanged:: 3.0
*parentNode* renamed into *parentnode*
Notes
-----
The following documentation includes methods that are automatically
called when a Group instance is accessed in a special way.
For instance, this class defines the __setattr__, __getattr__,
__delattr__ and __dir__ methods, and they set, get and delete
*ordinary Python attributes* as normally intended. In addition to that,
__getattr__ allows getting *child nodes* by their name for the sake of
easy interaction on the command line, as long as there is no Python
attribute with the same name. Groups also allow the interactive
completion (when using readline) of the names of child nodes.
For instance::
# get a Python attribute
nchild = group._v_nchildren
# Add a Table child called 'table' under 'group'.
h5file.create_table(group, 'table', myDescription)
table = group.table # get the table child instance
group.table = 'foo' # set a Python attribute
# (PyTables warns you here about using the name of a child node.)
foo = group.table # get a Python attribute
del group.table # delete a Python attribute
table = group.table # get the table child instance again
Additionally, on interactive python sessions you may get autocompletions
of children named as *valid python identifiers* by pressing the `[Tab]`
key, or to use the dir() global function.
.. rubric:: Group attributes
The following instance variables are provided in addition to those
in Node (see :ref:`NodeClassDescr`):
.. attribute:: _v_children
Dictionary with all nodes hanging from this group.
.. attribute:: _v_groups
Dictionary with all groups hanging from this group.
.. attribute:: _v_hidden
Dictionary with all hidden nodes hanging from this group.
.. attribute:: _v_leaves
Dictionary with all leaves hanging from this group.
.. attribute:: _v_links
Dictionary with all links hanging from this group.
.. attribute:: _v_unknown
Dictionary with all unknown nodes hanging from this group.
"""
# Class identifier.
_c_classid = 'GROUP'
# Children containers that should be loaded only in a lazy way.
# These are documented in the ``Group._g_add_children_names`` method.
_c_lazy_children_attrs = (
'__members__', '_v_children', '_v_groups', '_v_leaves',
'_v_links', '_v_unknown', '_v_hidden')
# `_v_nchildren` is a direct read-only shorthand
# for the number of *visible* children in a group.
def _g_getnchildren(self):
"""The number of children hanging from this group."""
return len(self._v_children)
_v_nchildren = property(_g_getnchildren)
# `_v_filters` is a direct read-write shorthand for the ``FILTERS``
# attribute with the default `Filters` instance as a default value.
def _g_getfilters(self):
filters = getattr(self._v_attrs, 'FILTERS', None)
if filters is None:
filters = Filters()
return filters
def _g_setfilters(self, value):
if not isinstance(value, Filters):
raise TypeError(
f"value is not an instance of `Filters`: {value!r}")
self._v_attrs.FILTERS = value
def _g_delfilters(self):
del self._v_attrs.FILTERS
_v_filters = property(
_g_getfilters, _g_setfilters, _g_delfilters,
"""Default filter properties for child nodes.
You can (and are encouraged to) use this property to get, set and
delete the FILTERS HDF5 attribute of the group, which stores a Filters
instance (see :ref:`FiltersClassDescr`). When the group has no such
attribute, a default Filters instance is used.
""")
def __init__(self, parentnode, name,
title="", new=False, filters=None,
_log=True):
# Remember to assign these values in the root group constructor
# if it does not use this one!
# First, set attributes belonging to group objects.
self._v_version = obversion
"""The object version of this group."""
self._v_new = new
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._v_new_filters = filters
"""New default filter properties for child nodes."""
self._v_max_group_width = parentnode._v_file.params['MAX_GROUP_WIDTH']
"""Maximum number of children on each group before warning the user.
.. versionchanged:: 3.0
The *_v_maxGroupWidth* attribute has been renamed into
*_v_max_group_width*.
"""
# Finally, set up this object as a node.
super().__init__(parentnode, name, _log)
def _g_post_init_hook(self):
if self._v_new:
if self._v_file.params['PYTABLES_SYS_ATTRS']:
# Save some attributes for the new group on disk.
set_attr = self._v_attrs._g__setattr
# Set the title, class and version attributes.
set_attr('TITLE', self._v_new_title)
set_attr('CLASS', self._c_classid)
set_attr('VERSION', self._v_version)
# Set the default filter properties.
newfilters = self._v_new_filters
if newfilters is None:
# If no filters have been passed in the constructor,
# inherit them from the parent group, but only if they
# have been inherited or explicitly set.
newfilters = getattr(
self._v_parent._v_attrs, 'FILTERS', None)
if newfilters is not None:
set_attr('FILTERS', newfilters)
else:
# If the file has PyTables format, get the VERSION attr
if 'VERSION' in self._v_attrs._v_attrnamessys:
self._v_version = self._v_attrs.VERSION
else:
self._v_version = "0.0 (unknown)"
# We don't need to get more attributes from disk,
# since the most important ones are defined as properties.
def __del__(self):
if (self._v_isopen and
self._v_pathname in self._v_file._node_manager.registry and
'_v_children' in self.__dict__):
# The group is going to be killed. Rebuild weak references
# (that Python cancelled just before calling this method) so
# that they are still usable if the object is revived later.
selfref = weakref.ref(self)
self._v_children.containerref = selfref
self._v_groups.containerref = selfref
self._v_leaves.containerref = selfref
self._v_links.containerref = selfref
self._v_unknown.containerref = selfref
self._v_hidden.containerref = selfref
super().__del__()
def _g_get_child_group_class(self, childname):
"""Get the class of a not-yet-loaded group child.
`childname` must be the name of a *group* child.
"""
childCID = self._g_get_gchild_attr(childname, 'CLASS')
if childCID is not None and not isinstance(childCID, str):
childCID = childCID.decode('utf-8')
if childCID in class_id_dict:
return class_id_dict[childCID] # look up group class
else:
return Group # default group class
def _g_get_child_leaf_class(self, childname, warn=True):
"""Get the class of a not-yet-loaded leaf child.
`childname` must be the name of a *leaf* child. If the child
belongs to an unknown kind of leaf, or if its kind can not be
guessed, `UnImplemented` will be returned and a warning will be
issued if `warn` is true.
"""
if self._v_file.params['PYTABLES_SYS_ATTRS']:
childCID = self._g_get_lchild_attr(childname, 'CLASS')
if childCID is not None and not isinstance(childCID, str):
childCID = childCID.decode('utf-8')
else:
childCID = None
if childCID in class_id_dict:
return class_id_dict[childCID] # look up leaf class
else:
# Unknown or no ``CLASS`` attribute, try a guess.
childCID2 = utilsextension.which_class(self._v_objectid, childname)
if childCID2 == 'UNSUPPORTED':
if warn:
if childCID is None:
warnings.warn(
"leaf ``%s`` is of an unsupported type; "
"it will become an ``UnImplemented`` node"
% self._g_join(childname))
else:
warnings.warn(
("leaf ``%s`` has an unknown class ID ``%s``; "
"it will become an ``UnImplemented`` node")
% (self._g_join(childname), childCID))
return UnImplemented
assert childCID2 in class_id_dict
return class_id_dict[childCID2] # look up leaf class
def _g_add_children_names(self):
"""Add children names to this group taking into account their
visibility and kind."""
mydict = self.__dict__
# The names of the lazy attributes
mydict['__members__'] = members = []
"""The names of visible children nodes for readline-style completion.
"""
mydict['_v_children'] = children = _ChildrenDict(self)
"""The number of children hanging from this group."""
mydict['_v_groups'] = groups = _ChildrenDict(self)
"""Dictionary with all groups hanging from this group."""
mydict['_v_leaves'] = leaves = _ChildrenDict(self)
"""Dictionary with all leaves hanging from this group."""
mydict['_v_links'] = links = _ChildrenDict(self)
"""Dictionary with all links hanging from this group."""
mydict['_v_unknown'] = unknown = _ChildrenDict(self)
"""Dictionary with all unknown nodes hanging from this group."""
mydict['_v_hidden'] = hidden = _ChildrenDict(self)
"""Dictionary with all hidden nodes hanging from this group."""
# Get the names of *all* child groups and leaves.
(group_names, leaf_names, link_names, unknown_names) = \
self._g_list_group(self._v_parent)
# Separate groups into visible groups and hidden nodes,
# and leaves into visible leaves and hidden nodes.
for (childnames, childdict) in ((group_names, groups),
(leaf_names, leaves),
(link_names, links),
(unknown_names, unknown)):
for childname in childnames:
# See whether the name implies that the node is hidden.
# (Assigned values are entirely irrelevant.)
if isvisiblename(childname):
# Visible node.
members.insert(0, childname)
children[childname] = None
childdict[childname] = None
else:
# Hidden node.
hidden[childname] = None
def _g_check_has_child(self, name):
"""Check whether 'name' is a children of 'self' and return its type."""
# Get the HDF5 name matching the PyTables name.
node_type = self._g_get_objinfo(name)
if node_type == "NoSuchNode":
raise NoSuchNodeError(
"group ``%s`` does not have a child named ``%s``"
% (self._v_pathname, name))
return node_type
def __iter__(self):
"""Iterate over the child nodes hanging directly from the group.
This iterator is *not* recursive.
Examples
--------
::
# Non-recursively list all the nodes hanging from '/detector'
print("Nodes in '/detector' group:")
for node in h5file.root.detector:
print(node)
"""
return self._f_iter_nodes()
def __contains__(self, name):
"""Is there a child with that `name`?
Returns a true value if the group has a child node (visible or
hidden) with the given `name` (a string), false otherwise.
"""
self._g_check_open()
try:
self._g_check_has_child(name)
except NoSuchNodeError:
return False
return True
def __getitem__(self, childname):
"""Return the (visible or hidden) child with that `name` ( a string).
Raise IndexError if child not exist.
"""
try:
return self._f_get_child(childname)
except NoSuchNodeError:
raise IndexError(childname)
def _f_walknodes(self, classname=None):
"""Iterate over descendant nodes.
This method recursively walks *self* top to bottom (preorder),
iterating over child groups in alphanumerical order, and yielding
nodes. If classname is supplied, only instances of the named class are
yielded.
If *classname* is Group, it behaves like :meth:`Group._f_walk_groups`,
yielding only groups. If you don't want a recursive behavior,
use :meth:`Group._f_iter_nodes` instead.
Examples
--------
::
# Recursively print all the arrays hanging from '/'
print("Arrays in the object tree '/':")
for array in h5file.root._f_walknodes('Array', recursive=True):
print(array)
"""
self._g_check_open()
# For compatibility with old default arguments.
if classname == '':
classname = None
if classname == "Group":
# Recursive algorithm
yield from self._f_walk_groups()
else:
for group in self._f_walk_groups():
yield from group._f_iter_nodes(classname)
def _g_join(self, name):
"""Helper method to correctly concatenate a name child object with the
pathname of this group."""
if name == "/":
# This case can happen when doing copies
return self._v_pathname
return join_path(self._v_pathname, name)
def _g_width_warning(self):
"""Issue a :exc:`PerformanceWarning` on too many children."""
warnings.warn("""\
group ``%s`` is exceeding the recommended maximum number of children (%d); \
be ready to see PyTables asking for *lots* of memory and possibly slow I/O."""
% (self._v_pathname, self._v_max_group_width),
PerformanceWarning)
def _g_refnode(self, childnode, childname, validate=True):
"""Insert references to a `childnode` via a `childname`.
Checks that the `childname` is valid and does not exist, then
creates references to the given `childnode` by that `childname`.
The validation of the name can be omitted by setting `validate`
to a false value (this may be useful for adding already existing
nodes to the tree).
"""
# Check for name validity.
if validate:
check_name_validity(childname)
childnode._g_check_name(childname)
# Check if there is already a child with the same name.
# This can be triggered because of the user
# (via node construction or renaming/movement).
# Links are not checked here because they are copied and referenced
# using ``File.get_node`` so they already exist in `self`.
if (not isinstance(childnode, Link)) and childname in self:
raise NodeError(
"group ``%s`` already has a child node named ``%s``"
% (self._v_pathname, childname))
# Show a warning if there is an object attribute with that name.
if childname in self.__dict__:
warnings.warn(
"group ``%s`` already has an attribute named ``%s``; "
"you will not be able to use natural naming "
"to access the child node"
% (self._v_pathname, childname), NaturalNameWarning)
# Check group width limits.
if (len(self._v_children) + len(self._v_hidden) >=
self._v_max_group_width):
self._g_width_warning()
# Update members information.
# Insert references to the new child.
# (Assigned values are entirely irrelevant.)
if isvisiblename(childname):
# Visible node.
self.__members__.insert(0, childname) # enable completion
self._v_children[childname] = None # insert node
if isinstance(childnode, Unknown):
self._v_unknown[childname] = None
elif isinstance(childnode, Link):
self._v_links[childname] = None
elif isinstance(childnode, Leaf):
self._v_leaves[childname] = None
elif isinstance(childnode, Group):
self._v_groups[childname] = None
else:
# Hidden node.
self._v_hidden[childname] = None # insert node
def _g_unrefnode(self, childname):
"""Remove references to a node.
Removes all references to the named node.
"""
# This can *not* be triggered because of the user.
assert childname in self, \
("group ``%s`` does not have a child node named ``%s``"
% (self._v_pathname, childname))
# Update members information, if needed
if '_v_children' in self.__dict__:
if childname in self._v_children:
# Visible node.
members = self.__members__
member_index = members.index(childname)
del members[member_index] # disables completion
del self._v_children[childname] # remove node
self._v_unknown.pop(childname, None)
self._v_links.pop(childname, None)
self._v_leaves.pop(childname, None)
self._v_groups.pop(childname, None)
else:
# Hidden node.
del self._v_hidden[childname] # remove node
def _g_move(self, newparent, newname):
# Move the node to the new location.
oldpath = self._v_pathname
super()._g_move(newparent, newname)
newpath = self._v_pathname
# Update location information in children. This node shouldn't
# be affected since it has already been relocated.
self._v_file._update_node_locations(oldpath, newpath)
def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
# Compute default arguments.
title = kwargs.get('title', self._v_title)
filters = kwargs.get('filters', None)
stats = kwargs.get('stats', None)
# Fix arguments with explicit None values for backwards compatibility.
if title is None:
title = self._v_title
# If no filters have been passed to the call, copy them from the
# source group, but only if inherited or explicitly set.
if filters is None:
filters = getattr(self._v_attrs, 'FILTERS', None)
# Create a copy of the object.
new_node = Group(newparent, newname,
title, new=True, filters=filters, _log=_log)
# Copy user attributes if needed.
if kwargs.get('copyuserattrs', True):
self._v_attrs._g_copy(new_node._v_attrs, copyclass=True)
# Update statistics if needed.
if stats is not None:
stats['groups'] += 1
if recursive:
# Copy child nodes if a recursive copy was requested.
# Some arguments should *not* be passed to children copy ops.
kwargs = kwargs.copy()
kwargs.pop('title', None)
self._g_copy_children(new_node, **kwargs)
return new_node
def _g_copy_children(self, newparent, **kwargs):
"""Copy child nodes.
Copies all nodes descending from this one into the specified
`newparent`. If the new parent has a child node with the same
name as one of the nodes in this group, the copy fails with a
`NodeError`, maybe resulting in a partial copy. Nothing is
logged.
"""
# Recursive version of children copy.
# for srcchild in self._v_children.itervalues():
# srcchild._g_copy_as_child(newparent, **kwargs)
# Non-recursive version of children copy.
use_hardlinks = kwargs.get('use_hardlinks', False)
if use_hardlinks:
address_map = kwargs.setdefault('address_map', {})
parentstack = [(self, newparent)] # [(source, destination), ...]
while parentstack:
(srcparent, dstparent) = parentstack.pop()
if use_hardlinks:
for srcchild in srcparent._v_children.values():
addr, rc = srcchild._get_obj_info()
if rc > 1 and addr in address_map:
where, name = address_map[addr][0]
localsrc = os.path.join(where, name)
dstparent._v_file.create_hard_link(dstparent,
srcchild.name,
localsrc)
address_map[addr].append(
(dstparent._v_pathname, srcchild.name)
)
# Update statistics if needed.
stats = kwargs.pop('stats', None)
if stats is not None:
stats['hardlinks'] += 1
else:
dstchild = srcchild._g_copy_as_child(dstparent,
**kwargs)
if isinstance(srcchild, Group):
parentstack.append((srcchild, dstchild))
if rc > 1:
address_map[addr] = [
(dstparent._v_pathname, srcchild.name)
]
else:
for srcchild in srcparent._v_children.values():
dstchild = srcchild._g_copy_as_child(dstparent, **kwargs)
if isinstance(srcchild, Group):
parentstack.append((srcchild, dstchild))
def _f_get_child(self, childname):
"""Get the child called childname of this group.
If the child exists (be it visible or not), it is returned. Else, a
NoSuchNodeError is raised.
Using this method is recommended over getattr() when doing programmatic
accesses to children if childname is unknown beforehand or when its
name is not a valid Python identifier.
"""
self._g_check_open()
self._g_check_has_child(childname)
childpath = join_path(self._v_pathname, childname)
return self._v_file._get_node(childpath)
def _f_list_nodes(self, classname=None):
"""Return a *list* with children nodes.
This is a list-returning version of :meth:`Group._f_iter_nodes()`.
"""
return list(self._f_iter_nodes(classname))
def _f_iter_nodes(self, classname=None):
"""Iterate over children nodes.
Child nodes are yielded alphanumerically sorted by node name. If the
name of a class derived from Node (see :ref:`NodeClassDescr`) is
supplied in the classname parameter, only instances of that class (or
subclasses of it) will be returned.
This is an iterator version of :meth:`Group._f_list_nodes`.
"""
self._g_check_open()
if not classname:
# Returns all the children alphanumerically sorted
for name in sorted(self._v_children):
yield self._v_children[name]
elif classname == 'Group':
# Returns all the groups alphanumerically sorted
for name in sorted(self._v_groups):
yield self._v_groups[name]
elif classname == 'Leaf':
# Returns all the leaves alphanumerically sorted
for name in sorted(self._v_leaves):
yield self._v_leaves[name]
elif classname == 'Link':
# Returns all the links alphanumerically sorted
for name in sorted(self._v_links):
yield self._v_links[name]
elif classname == 'IndexArray':
raise TypeError(
"listing ``IndexArray`` nodes is not allowed")
else:
class_ = get_class_by_name(classname)
for childname, childnode in sorted(self._v_children.items()):
if isinstance(childnode, class_):
yield childnode
def _f_walk_groups(self):
"""Recursively iterate over descendent groups (not leaves).
This method starts by yielding *self*, and then it goes on to
recursively iterate over all child groups in alphanumerical order, top
to bottom (preorder), following the same procedure.
"""
self._g_check_open()
stack = [self]
yield self
# Iterate over the descendants
while stack:
objgroup = stack.pop()
groupnames = sorted(objgroup._v_groups)
# Sort the groups before delivering. This uses the groups names
# for groups in tree (in order to sort() can classify them).
for groupname in groupnames:
# TODO: check recursion
stack.append(objgroup._v_groups[groupname])
yield objgroup._v_groups[groupname]
def __delattr__(self, name):
"""Delete a Python attribute called name.
This method only provides a extra warning in case the user
tries to delete a children node using __delattr__.
To remove a children node from this group use
:meth:`File.remove_node` or :meth:`Node._f_remove`. To delete
a PyTables node attribute use :meth:`File.del_node_attr`,
:meth:`Node._f_delattr` or :attr:`Node._v_attrs``.
If there is an attribute and a child node with the same name,
the child node will be made accessible again via natural naming.
"""
try:
super().__delattr__(name) # nothing particular
except AttributeError as ae:
hint = " (use ``node._f_remove()`` if you want to remove a node)"
raise ae.__class__(str(ae) + hint)
def __dir__(self):
"""Autocomplete only children named as valid python identifiers.
Only PY3 supports this special method.
"""
subnods = [c for c in self._v_children if c.isidentifier()]
return super().__dir__() + subnods
def __getattr__(self, name):
"""Get a Python attribute or child node called name.
If the node has a child node called name it is returned,
else an AttributeError is raised.
"""
if name in self._c_lazy_children_attrs:
self._g_add_children_names()
return self.__dict__[name]
return self._f_get_child(name)
def __setattr__(self, name, value):
"""Set a Python attribute called name with the given value.
This method stores an *ordinary Python attribute* in the object. It
does *not* store new children nodes under this group; for that, use the
File.create*() methods (see the File class
in :ref:`FileClassDescr`). It does *neither* store a PyTables node
attribute; for that,
use :meth:`File.set_node_attr`, :meth`:Node._f_setattr`
or :attr:`Node._v_attrs`.
If there is already a child node with the same name, a
NaturalNameWarning will be issued and the child node will not be
accessible via natural naming nor getattr(). It will still be available
via :meth:`File.get_node`, :meth:`Group._f_get_child` and children
dictionaries in the group (if visible).
"""
# Show a warning if there is an child node with that name.
#
# ..note::
#
# Using ``if name in self:`` is not right since that would
# require ``_v_children`` and ``_v_hidden`` to be already set
# when the very first attribute assignments are made.
# Moreover, this warning is only concerned about clashes with
# names used in natural naming, i.e. those in ``__members__``.
#
# ..note::
#
# The check ``'__members__' in myDict`` allows attribute
# assignment to happen before calling `Group.__init__()`, by
# avoiding to look into the still not assigned ``__members__``
# attribute. This allows subclasses to set up some attributes
# and then call the constructor of the superclass. If the
# check above is disabled, that results in Python entering an
# endless loop on exit!
mydict = self.__dict__
if '__members__' in mydict and name in self.__members__:
warnings.warn(
"group ``%s`` already has a child node named ``%s``; "
"you will not be able to use natural naming "
"to access the child node"
% (self._v_pathname, name), NaturalNameWarning)
super().__setattr__(name, value)
def _f_flush(self):
"""Flush this Group."""
self._g_check_open()
self._g_flush_group()
def _g_close_descendents(self):
"""Close all the *loaded* descendent nodes of this group."""
node_manager = self._v_file._node_manager
node_manager.close_subtree(self._v_pathname)
def _g_close(self):
"""Close this (open) group."""
if self._v_isopen:
# hdf5extension operations:
# Close HDF5 group.
self._g_close_group()
# Close myself as a node.
super()._f_close()
def _f_close(self):
"""Close this group and all its descendents.
This method has the behavior described in :meth:`Node._f_close`.
It should be noted that this operation closes all the nodes
descending from this group.
You should not need to close nodes manually because they are
automatically opened/closed when they are loaded/evicted from
the integrated LRU cache.
"""
# If the group is already closed, return immediately
if not self._v_isopen:
return
# First, close all the descendents of this group, unless a) the
# group is being deleted (evicted from LRU cache) or b) the node
# is being closed during an aborted creation, in which cases
# this is not an explicit close issued by the user.
if not (self._v__deleting or self._v_objectid is None):
self._g_close_descendents()
# When all the descendents have been closed, close this group.
# This is done at the end because some nodes may still need to
# be loaded during the closing process; thus this node must be
# open until the very end.
self._g_close()
def _g_remove(self, recursive=False, force=False):
"""Remove (recursively if needed) the Group.
This version correctly handles both visible and hidden nodes.
"""
if self._v_nchildren > 0:
if not (recursive or force):
raise NodeError("group ``%s`` has child nodes; "
"please set `recursive` or `force` to true "
"to remove it"
% (self._v_pathname,))
# First close all the descendents hanging from this group,
# so that it is not possible to use a node that no longer exists.
self._g_close_descendents()
# Remove the node itself from the hierarchy.
super()._g_remove(recursive, force)
def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`.
In addition, it recognizes the following keyword arguments:
Parameters
----------
title
The new title for the destination. If omitted or None, the
original title is used. This only applies to the topmost
node in recursive copies.
filters : Filters
Specifying this parameter overrides the original filter
properties in the source node. If specified, it must be an
instance of the Filters class (see :ref:`FiltersClassDescr`).
The default is to copy the filter properties from the source
node.
copyuserattrs
You can prevent the user attributes from being copied by setting
thisparameter to False. The default is to copy them.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves', 'links' and 'bytes' having a numeric value. Their values
willbe incremented to reflect the number of groups, leaves and
bytes, respectively, that have been copied during the operation.
"""
return super()._f_copy(
newparent, newname,
overwrite, recursive, createparents, **kwargs)
def _f_copy_children(self, dstgroup, overwrite=False, recursive=False,
createparents=False, **kwargs):
"""Copy the children of this group into another group.
Children hanging directly from this group are copied into dstgroup,
which can be a Group (see :ref:`GroupClassDescr`) object or its
pathname in string form. If createparents is true, the needed groups
for the given destination group path to exist will be created.
The operation will fail with a NodeError if there is a child node
in the destination group with the same name as one of the copied
children from this one, unless overwrite is true; in this case,
the former child node is recursively removed before copying the
later.
By default, nodes descending from children groups of this node
are not copied. If the recursive argument is true, all descendant
nodes of this node are recursively copied.
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be sub-sampled,
stats may be collected, etc. Arguments unknown to nodes are simply
ignored. Check the documentation for copying operations of nodes to
see which options they support.
"""
self._g_check_open()
# `dstgroup` is used instead of its path to avoid accepting
# `Node` objects when `createparents` is true. Also, note that
# there is no risk of creating parent nodes and failing later
# because of destination nodes already existing.
dstparent = self._v_file._get_or_create_path(dstgroup, createparents)
self._g_check_group(dstparent) # Is it a group?
if not overwrite:
# Abort as early as possible when destination nodes exist
# and overwriting is not enabled.
for childname in self._v_children:
if childname in dstparent:
raise NodeError(
"destination group ``%s`` already has "
"a node named ``%s``; "
"you may want to use the ``overwrite`` argument"
% (dstparent._v_pathname, childname))
use_hardlinks = kwargs.get('use_hardlinks', False)
if use_hardlinks:
address_map = kwargs.setdefault('address_map', {})
for child in self._v_children.values():
addr, rc = child._get_obj_info()
if rc > 1 and addr in address_map:
where, name = address_map[addr][0]
localsrc = os.path.join(where, name)
dstparent._v_file.create_hard_link(dstparent, child.name,
localsrc)
address_map[addr].append(
(dstparent._v_pathname, child.name)
)
# Update statistics if needed.
stats = kwargs.pop('stats', None)
if stats is not None:
stats['hardlinks'] += 1
else:
child._f_copy(dstparent, None, overwrite, recursive,
**kwargs)
if rc > 1:
address_map[addr] = [
(dstparent._v_pathname, child.name)
]
else:
for child in self._v_children.values():
child._f_copy(dstparent, None, overwrite, recursive, **kwargs)
def __str__(self):
"""Return a short string representation of the group.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> print(f.root.group0)
/group0 (Group) ''
>>> f.close()
"""
return (f"{self._v_pathname} ({self.__class__.__name__}) "
f"{self._v_title!r}")
def __repr__(self):
"""Return a detailed string representation of the group.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> f.root.group0
/group0 (Group) ''
children := ['group1' (Group), 'tuple1' (Table)]
>>> f.close()
"""
rep = [
f'{childname!r} ({child.__class__.__name__})'
for (childname, child) in self._v_children.items()
]
return f'{self!s}\n children := [{", ".join(rep)}]'
| (parentnode, name, title='', new=False, filters=None, _log=True) |
728,549 | tables.group | __contains__ | Is there a child with that `name`?
Returns a true value if the group has a child node (visible or
hidden) with the given `name` (a string), false otherwise.
| def __contains__(self, name):
"""Is there a child with that `name`?
Returns a true value if the group has a child node (visible or
hidden) with the given `name` (a string), false otherwise.
"""
self._g_check_open()
try:
self._g_check_has_child(name)
except NoSuchNodeError:
return False
return True
| (self, name) |
728,550 | tables.group | __del__ | null | def __del__(self):
if (self._v_isopen and
self._v_pathname in self._v_file._node_manager.registry and
'_v_children' in self.__dict__):
# The group is going to be killed. Rebuild weak references
# (that Python cancelled just before calling this method) so
# that they are still usable if the object is revived later.
selfref = weakref.ref(self)
self._v_children.containerref = selfref
self._v_groups.containerref = selfref
self._v_leaves.containerref = selfref
self._v_links.containerref = selfref
self._v_unknown.containerref = selfref
self._v_hidden.containerref = selfref
super().__del__()
| (self) |
728,551 | tables.group | __delattr__ | Delete a Python attribute called name.
This method only provides a extra warning in case the user
tries to delete a children node using __delattr__.
To remove a children node from this group use
:meth:`File.remove_node` or :meth:`Node._f_remove`. To delete
a PyTables node attribute use :meth:`File.del_node_attr`,
:meth:`Node._f_delattr` or :attr:`Node._v_attrs``.
If there is an attribute and a child node with the same name,
the child node will be made accessible again via natural naming.
| def __delattr__(self, name):
"""Delete a Python attribute called name.
This method only provides a extra warning in case the user
tries to delete a children node using __delattr__.
To remove a children node from this group use
:meth:`File.remove_node` or :meth:`Node._f_remove`. To delete
a PyTables node attribute use :meth:`File.del_node_attr`,
:meth:`Node._f_delattr` or :attr:`Node._v_attrs``.
If there is an attribute and a child node with the same name,
the child node will be made accessible again via natural naming.
"""
try:
super().__delattr__(name) # nothing particular
except AttributeError as ae:
hint = " (use ``node._f_remove()`` if you want to remove a node)"
raise ae.__class__(str(ae) + hint)
| (self, name) |
728,552 | tables.group | __dir__ | Autocomplete only children named as valid python identifiers.
Only PY3 supports this special method.
| def __dir__(self):
"""Autocomplete only children named as valid python identifiers.
Only PY3 supports this special method.
"""
subnods = [c for c in self._v_children if c.isidentifier()]
return super().__dir__() + subnods
| (self) |
728,553 | tables.group | __getattr__ | Get a Python attribute or child node called name.
If the node has a child node called name it is returned,
else an AttributeError is raised.
| def __getattr__(self, name):
"""Get a Python attribute or child node called name.
If the node has a child node called name it is returned,
else an AttributeError is raised.
"""
if name in self._c_lazy_children_attrs:
self._g_add_children_names()
return self.__dict__[name]
return self._f_get_child(name)
| (self, name) |
728,554 | tables.group | __getitem__ | Return the (visible or hidden) child with that `name` ( a string).
Raise IndexError if child not exist.
| def __getitem__(self, childname):
"""Return the (visible or hidden) child with that `name` ( a string).
Raise IndexError if child not exist.
"""
try:
return self._f_get_child(childname)
except NoSuchNodeError:
raise IndexError(childname)
| (self, childname) |
728,555 | tables.group | __init__ | null | def __init__(self, parentnode, name,
title="", new=False, filters=None,
_log=True):
# Remember to assign these values in the root group constructor
# if it does not use this one!
# First, set attributes belonging to group objects.
self._v_version = obversion
"""The object version of this group."""
self._v_new = new
"""Is this the first time the node has been created?"""
self._v_new_title = title
"""New title for this node."""
self._v_new_filters = filters
"""New default filter properties for child nodes."""
self._v_max_group_width = parentnode._v_file.params['MAX_GROUP_WIDTH']
"""Maximum number of children on each group before warning the user.
.. versionchanged:: 3.0
The *_v_maxGroupWidth* attribute has been renamed into
*_v_max_group_width*.
"""
# Finally, set up this object as a node.
super().__init__(parentnode, name, _log)
| (self, parentnode, name, title='', new=False, filters=None, _log=True) |
728,556 | tables.group | __iter__ | Iterate over the child nodes hanging directly from the group.
This iterator is *not* recursive.
Examples
--------
::
# Non-recursively list all the nodes hanging from '/detector'
print("Nodes in '/detector' group:")
for node in h5file.root.detector:
print(node)
| def __iter__(self):
"""Iterate over the child nodes hanging directly from the group.
This iterator is *not* recursive.
Examples
--------
::
# Non-recursively list all the nodes hanging from '/detector'
print("Nodes in '/detector' group:")
for node in h5file.root.detector:
print(node)
"""
return self._f_iter_nodes()
| (self) |
728,557 | tables.group | __repr__ | Return a detailed string representation of the group.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> f.root.group0
/group0 (Group) ''
children := ['group1' (Group), 'tuple1' (Table)]
>>> f.close()
| def _get_value_from_container(self, container, key):
return container._f_get_child(key)
| (self) |
728,558 | tables.group | __setattr__ | Set a Python attribute called name with the given value.
This method stores an *ordinary Python attribute* in the object. It
does *not* store new children nodes under this group; for that, use the
File.create*() methods (see the File class
in :ref:`FileClassDescr`). It does *neither* store a PyTables node
attribute; for that,
use :meth:`File.set_node_attr`, :meth`:Node._f_setattr`
or :attr:`Node._v_attrs`.
If there is already a child node with the same name, a
NaturalNameWarning will be issued and the child node will not be
accessible via natural naming nor getattr(). It will still be available
via :meth:`File.get_node`, :meth:`Group._f_get_child` and children
dictionaries in the group (if visible).
| def __setattr__(self, name, value):
"""Set a Python attribute called name with the given value.
This method stores an *ordinary Python attribute* in the object. It
does *not* store new children nodes under this group; for that, use the
File.create*() methods (see the File class
in :ref:`FileClassDescr`). It does *neither* store a PyTables node
attribute; for that,
use :meth:`File.set_node_attr`, :meth`:Node._f_setattr`
or :attr:`Node._v_attrs`.
If there is already a child node with the same name, a
NaturalNameWarning will be issued and the child node will not be
accessible via natural naming nor getattr(). It will still be available
via :meth:`File.get_node`, :meth:`Group._f_get_child` and children
dictionaries in the group (if visible).
"""
# Show a warning if there is an child node with that name.
#
# ..note::
#
# Using ``if name in self:`` is not right since that would
# require ``_v_children`` and ``_v_hidden`` to be already set
# when the very first attribute assignments are made.
# Moreover, this warning is only concerned about clashes with
# names used in natural naming, i.e. those in ``__members__``.
#
# ..note::
#
# The check ``'__members__' in myDict`` allows attribute
# assignment to happen before calling `Group.__init__()`, by
# avoiding to look into the still not assigned ``__members__``
# attribute. This allows subclasses to set up some attributes
# and then call the constructor of the superclass. If the
# check above is disabled, that results in Python entering an
# endless loop on exit!
mydict = self.__dict__
if '__members__' in mydict and name in self.__members__:
warnings.warn(
"group ``%s`` already has a child node named ``%s``; "
"you will not be able to use natural naming "
"to access the child node"
% (self._v_pathname, name), NaturalNameWarning)
super().__setattr__(name, value)
| (self, name, value) |
728,559 | tables.group | __str__ | Return a short string representation of the group.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> print(f.root.group0)
/group0 (Group) ''
>>> f.close()
| def _get_value_from_container(self, container, key):
return container._f_get_child(key)
| (self) |
728,560 | tables.group | _f_close | Close this group and all its descendents.
This method has the behavior described in :meth:`Node._f_close`.
It should be noted that this operation closes all the nodes
descending from this group.
You should not need to close nodes manually because they are
automatically opened/closed when they are loaded/evicted from
the integrated LRU cache.
| def _f_close(self):
"""Close this group and all its descendents.
This method has the behavior described in :meth:`Node._f_close`.
It should be noted that this operation closes all the nodes
descending from this group.
You should not need to close nodes manually because they are
automatically opened/closed when they are loaded/evicted from
the integrated LRU cache.
"""
# If the group is already closed, return immediately
if not self._v_isopen:
return
# First, close all the descendents of this group, unless a) the
# group is being deleted (evicted from LRU cache) or b) the node
# is being closed during an aborted creation, in which cases
# this is not an explicit close issued by the user.
if not (self._v__deleting or self._v_objectid is None):
self._g_close_descendents()
# When all the descendents have been closed, close this group.
# This is done at the end because some nodes may still need to
# be loaded during the closing process; thus this node must be
# open until the very end.
self._g_close()
| (self) |
728,561 | tables.group | _f_copy | Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`.
In addition, it recognizes the following keyword arguments:
Parameters
----------
title
The new title for the destination. If omitted or None, the
original title is used. This only applies to the topmost
node in recursive copies.
filters : Filters
Specifying this parameter overrides the original filter
properties in the source node. If specified, it must be an
instance of the Filters class (see :ref:`FiltersClassDescr`).
The default is to copy the filter properties from the source
node.
copyuserattrs
You can prevent the user attributes from being copied by setting
thisparameter to False. The default is to copy them.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves', 'links' and 'bytes' having a numeric value. Their values
willbe incremented to reflect the number of groups, leaves and
bytes, respectively, that have been copied during the operation.
| def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy this node and return the new one.
This method has the behavior described in :meth:`Node._f_copy`.
In addition, it recognizes the following keyword arguments:
Parameters
----------
title
The new title for the destination. If omitted or None, the
original title is used. This only applies to the topmost
node in recursive copies.
filters : Filters
Specifying this parameter overrides the original filter
properties in the source node. If specified, it must be an
instance of the Filters class (see :ref:`FiltersClassDescr`).
The default is to copy the filter properties from the source
node.
copyuserattrs
You can prevent the user attributes from being copied by setting
thisparameter to False. The default is to copy them.
stats
This argument may be used to collect statistics on the copy
process. When used, it should be a dictionary with keys 'groups',
'leaves', 'links' and 'bytes' having a numeric value. Their values
willbe incremented to reflect the number of groups, leaves and
bytes, respectively, that have been copied during the operation.
"""
return super()._f_copy(
newparent, newname,
overwrite, recursive, createparents, **kwargs)
| (self, newparent=None, newname=None, overwrite=False, recursive=False, createparents=False, **kwargs) |
728,562 | tables.group | _f_copy_children | Copy the children of this group into another group.
Children hanging directly from this group are copied into dstgroup,
which can be a Group (see :ref:`GroupClassDescr`) object or its
pathname in string form. If createparents is true, the needed groups
for the given destination group path to exist will be created.
The operation will fail with a NodeError if there is a child node
in the destination group with the same name as one of the copied
children from this one, unless overwrite is true; in this case,
the former child node is recursively removed before copying the
later.
By default, nodes descending from children groups of this node
are not copied. If the recursive argument is true, all descendant
nodes of this node are recursively copied.
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be sub-sampled,
stats may be collected, etc. Arguments unknown to nodes are simply
ignored. Check the documentation for copying operations of nodes to
see which options they support.
| def _f_copy_children(self, dstgroup, overwrite=False, recursive=False,
createparents=False, **kwargs):
"""Copy the children of this group into another group.
Children hanging directly from this group are copied into dstgroup,
which can be a Group (see :ref:`GroupClassDescr`) object or its
pathname in string form. If createparents is true, the needed groups
for the given destination group path to exist will be created.
The operation will fail with a NodeError if there is a child node
in the destination group with the same name as one of the copied
children from this one, unless overwrite is true; in this case,
the former child node is recursively removed before copying the
later.
By default, nodes descending from children groups of this node
are not copied. If the recursive argument is true, all descendant
nodes of this node are recursively copied.
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be sub-sampled,
stats may be collected, etc. Arguments unknown to nodes are simply
ignored. Check the documentation for copying operations of nodes to
see which options they support.
"""
self._g_check_open()
# `dstgroup` is used instead of its path to avoid accepting
# `Node` objects when `createparents` is true. Also, note that
# there is no risk of creating parent nodes and failing later
# because of destination nodes already existing.
dstparent = self._v_file._get_or_create_path(dstgroup, createparents)
self._g_check_group(dstparent) # Is it a group?
if not overwrite:
# Abort as early as possible when destination nodes exist
# and overwriting is not enabled.
for childname in self._v_children:
if childname in dstparent:
raise NodeError(
"destination group ``%s`` already has "
"a node named ``%s``; "
"you may want to use the ``overwrite`` argument"
% (dstparent._v_pathname, childname))
use_hardlinks = kwargs.get('use_hardlinks', False)
if use_hardlinks:
address_map = kwargs.setdefault('address_map', {})
for child in self._v_children.values():
addr, rc = child._get_obj_info()
if rc > 1 and addr in address_map:
where, name = address_map[addr][0]
localsrc = os.path.join(where, name)
dstparent._v_file.create_hard_link(dstparent, child.name,
localsrc)
address_map[addr].append(
(dstparent._v_pathname, child.name)
)
# Update statistics if needed.
stats = kwargs.pop('stats', None)
if stats is not None:
stats['hardlinks'] += 1
else:
child._f_copy(dstparent, None, overwrite, recursive,
**kwargs)
if rc > 1:
address_map[addr] = [
(dstparent._v_pathname, child.name)
]
else:
for child in self._v_children.values():
child._f_copy(dstparent, None, overwrite, recursive, **kwargs)
| (self, dstgroup, overwrite=False, recursive=False, createparents=False, **kwargs) |
728,564 | tables.group | _f_flush | Flush this Group. | def _f_flush(self):
"""Flush this Group."""
self._g_check_open()
self._g_flush_group()
| (self) |
728,565 | tables.group | _f_get_child | Get the child called childname of this group.
If the child exists (be it visible or not), it is returned. Else, a
NoSuchNodeError is raised.
Using this method is recommended over getattr() when doing programmatic
accesses to children if childname is unknown beforehand or when its
name is not a valid Python identifier.
| def _f_get_child(self, childname):
"""Get the child called childname of this group.
If the child exists (be it visible or not), it is returned. Else, a
NoSuchNodeError is raised.
Using this method is recommended over getattr() when doing programmatic
accesses to children if childname is unknown beforehand or when its
name is not a valid Python identifier.
"""
self._g_check_open()
self._g_check_has_child(childname)
childpath = join_path(self._v_pathname, childname)
return self._v_file._get_node(childpath)
| (self, childname) |
728,568 | tables.group | _f_iter_nodes | Iterate over children nodes.
Child nodes are yielded alphanumerically sorted by node name. If the
name of a class derived from Node (see :ref:`NodeClassDescr`) is
supplied in the classname parameter, only instances of that class (or
subclasses of it) will be returned.
This is an iterator version of :meth:`Group._f_list_nodes`.
| def _f_iter_nodes(self, classname=None):
"""Iterate over children nodes.
Child nodes are yielded alphanumerically sorted by node name. If the
name of a class derived from Node (see :ref:`NodeClassDescr`) is
supplied in the classname parameter, only instances of that class (or
subclasses of it) will be returned.
This is an iterator version of :meth:`Group._f_list_nodes`.
"""
self._g_check_open()
if not classname:
# Returns all the children alphanumerically sorted
for name in sorted(self._v_children):
yield self._v_children[name]
elif classname == 'Group':
# Returns all the groups alphanumerically sorted
for name in sorted(self._v_groups):
yield self._v_groups[name]
elif classname == 'Leaf':
# Returns all the leaves alphanumerically sorted
for name in sorted(self._v_leaves):
yield self._v_leaves[name]
elif classname == 'Link':
# Returns all the links alphanumerically sorted
for name in sorted(self._v_links):
yield self._v_links[name]
elif classname == 'IndexArray':
raise TypeError(
"listing ``IndexArray`` nodes is not allowed")
else:
class_ = get_class_by_name(classname)
for childname, childnode in sorted(self._v_children.items()):
if isinstance(childnode, class_):
yield childnode
| (self, classname=None) |
728,569 | tables.group | _f_list_nodes | Return a *list* with children nodes.
This is a list-returning version of :meth:`Group._f_iter_nodes()`.
| def _f_list_nodes(self, classname=None):
"""Return a *list* with children nodes.
This is a list-returning version of :meth:`Group._f_iter_nodes()`.
"""
return list(self._f_iter_nodes(classname))
| (self, classname=None) |
728,574 | tables.group | _f_walk_groups | Recursively iterate over descendent groups (not leaves).
This method starts by yielding *self*, and then it goes on to
recursively iterate over all child groups in alphanumerical order, top
to bottom (preorder), following the same procedure.
| def _f_walk_groups(self):
"""Recursively iterate over descendent groups (not leaves).
This method starts by yielding *self*, and then it goes on to
recursively iterate over all child groups in alphanumerical order, top
to bottom (preorder), following the same procedure.
"""
self._g_check_open()
stack = [self]
yield self
# Iterate over the descendants
while stack:
objgroup = stack.pop()
groupnames = sorted(objgroup._v_groups)
# Sort the groups before delivering. This uses the groups names
# for groups in tree (in order to sort() can classify them).
for groupname in groupnames:
# TODO: check recursion
stack.append(objgroup._v_groups[groupname])
yield objgroup._v_groups[groupname]
| (self) |
728,575 | tables.group | _f_walknodes | Iterate over descendant nodes.
This method recursively walks *self* top to bottom (preorder),
iterating over child groups in alphanumerical order, and yielding
nodes. If classname is supplied, only instances of the named class are
yielded.
If *classname* is Group, it behaves like :meth:`Group._f_walk_groups`,
yielding only groups. If you don't want a recursive behavior,
use :meth:`Group._f_iter_nodes` instead.
Examples
--------
::
# Recursively print all the arrays hanging from '/'
print("Arrays in the object tree '/':")
for array in h5file.root._f_walknodes('Array', recursive=True):
print(array)
| def _f_walknodes(self, classname=None):
"""Iterate over descendant nodes.
This method recursively walks *self* top to bottom (preorder),
iterating over child groups in alphanumerical order, and yielding
nodes. If classname is supplied, only instances of the named class are
yielded.
If *classname* is Group, it behaves like :meth:`Group._f_walk_groups`,
yielding only groups. If you don't want a recursive behavior,
use :meth:`Group._f_iter_nodes` instead.
Examples
--------
::
# Recursively print all the arrays hanging from '/'
print("Arrays in the object tree '/':")
for array in h5file.root._f_walknodes('Array', recursive=True):
print(array)
"""
self._g_check_open()
# For compatibility with old default arguments.
if classname == '':
classname = None
if classname == "Group":
# Recursive algorithm
yield from self._f_walk_groups()
else:
for group in self._f_walk_groups():
yield from group._f_iter_nodes(classname)
| (self, classname=None) |
728,576 | tables.group | _g_add_children_names | Add children names to this group taking into account their
visibility and kind. | def _g_add_children_names(self):
"""Add children names to this group taking into account their
visibility and kind."""
mydict = self.__dict__
# The names of the lazy attributes
mydict['__members__'] = members = []
"""The names of visible children nodes for readline-style completion.
"""
mydict['_v_children'] = children = _ChildrenDict(self)
"""The number of children hanging from this group."""
mydict['_v_groups'] = groups = _ChildrenDict(self)
"""Dictionary with all groups hanging from this group."""
mydict['_v_leaves'] = leaves = _ChildrenDict(self)
"""Dictionary with all leaves hanging from this group."""
mydict['_v_links'] = links = _ChildrenDict(self)
"""Dictionary with all links hanging from this group."""
mydict['_v_unknown'] = unknown = _ChildrenDict(self)
"""Dictionary with all unknown nodes hanging from this group."""
mydict['_v_hidden'] = hidden = _ChildrenDict(self)
"""Dictionary with all hidden nodes hanging from this group."""
# Get the names of *all* child groups and leaves.
(group_names, leaf_names, link_names, unknown_names) = \
self._g_list_group(self._v_parent)
# Separate groups into visible groups and hidden nodes,
# and leaves into visible leaves and hidden nodes.
for (childnames, childdict) in ((group_names, groups),
(leaf_names, leaves),
(link_names, links),
(unknown_names, unknown)):
for childname in childnames:
# See whether the name implies that the node is hidden.
# (Assigned values are entirely irrelevant.)
if isvisiblename(childname):
# Visible node.
members.insert(0, childname)
children[childname] = None
childdict[childname] = None
else:
# Hidden node.
hidden[childname] = None
| (self) |
728,578 | tables.group | _g_check_has_child | Check whether 'name' is a children of 'self' and return its type. | def _g_check_has_child(self, name):
"""Check whether 'name' is a children of 'self' and return its type."""
# Get the HDF5 name matching the PyTables name.
node_type = self._g_get_objinfo(name)
if node_type == "NoSuchNode":
raise NoSuchNodeError(
"group ``%s`` does not have a child named ``%s``"
% (self._v_pathname, name))
return node_type
| (self, name) |
728,582 | tables.group | _g_close | Close this (open) group. | def _g_close(self):
"""Close this (open) group."""
if self._v_isopen:
# hdf5extension operations:
# Close HDF5 group.
self._g_close_group()
# Close myself as a node.
super()._f_close()
| (self) |
728,583 | tables.group | _g_close_descendents | Close all the *loaded* descendent nodes of this group. | def _g_close_descendents(self):
"""Close all the *loaded* descendent nodes of this group."""
node_manager = self._v_file._node_manager
node_manager.close_subtree(self._v_pathname)
| (self) |
728,584 | tables.group | _g_copy | null | def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
# Compute default arguments.
title = kwargs.get('title', self._v_title)
filters = kwargs.get('filters', None)
stats = kwargs.get('stats', None)
# Fix arguments with explicit None values for backwards compatibility.
if title is None:
title = self._v_title
# If no filters have been passed to the call, copy them from the
# source group, but only if inherited or explicitly set.
if filters is None:
filters = getattr(self._v_attrs, 'FILTERS', None)
# Create a copy of the object.
new_node = Group(newparent, newname,
title, new=True, filters=filters, _log=_log)
# Copy user attributes if needed.
if kwargs.get('copyuserattrs', True):
self._v_attrs._g_copy(new_node._v_attrs, copyclass=True)
# Update statistics if needed.
if stats is not None:
stats['groups'] += 1
if recursive:
# Copy child nodes if a recursive copy was requested.
# Some arguments should *not* be passed to children copy ops.
kwargs = kwargs.copy()
kwargs.pop('title', None)
self._g_copy_children(new_node, **kwargs)
return new_node
| (self, newparent, newname, recursive, _log=True, **kwargs) |
728,586 | tables.group | _g_copy_children | Copy child nodes.
Copies all nodes descending from this one into the specified
`newparent`. If the new parent has a child node with the same
name as one of the nodes in this group, the copy fails with a
`NodeError`, maybe resulting in a partial copy. Nothing is
logged.
| def _g_copy_children(self, newparent, **kwargs):
"""Copy child nodes.
Copies all nodes descending from this one into the specified
`newparent`. If the new parent has a child node with the same
name as one of the nodes in this group, the copy fails with a
`NodeError`, maybe resulting in a partial copy. Nothing is
logged.
"""
# Recursive version of children copy.
# for srcchild in self._v_children.itervalues():
# srcchild._g_copy_as_child(newparent, **kwargs)
# Non-recursive version of children copy.
use_hardlinks = kwargs.get('use_hardlinks', False)
if use_hardlinks:
address_map = kwargs.setdefault('address_map', {})
parentstack = [(self, newparent)] # [(source, destination), ...]
while parentstack:
(srcparent, dstparent) = parentstack.pop()
if use_hardlinks:
for srcchild in srcparent._v_children.values():
addr, rc = srcchild._get_obj_info()
if rc > 1 and addr in address_map:
where, name = address_map[addr][0]
localsrc = os.path.join(where, name)
dstparent._v_file.create_hard_link(dstparent,
srcchild.name,
localsrc)
address_map[addr].append(
(dstparent._v_pathname, srcchild.name)
)
# Update statistics if needed.
stats = kwargs.pop('stats', None)
if stats is not None:
stats['hardlinks'] += 1
else:
dstchild = srcchild._g_copy_as_child(dstparent,
**kwargs)
if isinstance(srcchild, Group):
parentstack.append((srcchild, dstchild))
if rc > 1:
address_map[addr] = [
(dstparent._v_pathname, srcchild.name)
]
else:
for srcchild in srcparent._v_children.values():
dstchild = srcchild._g_copy_as_child(dstparent, **kwargs)
if isinstance(srcchild, Group):
parentstack.append((srcchild, dstchild))
| (self, newparent, **kwargs) |
728,588 | tables.group | _g_delfilters | null | def _g_delfilters(self):
del self._v_attrs.FILTERS
| (self) |
728,589 | tables.group | _g_get_child_group_class | Get the class of a not-yet-loaded group child.
`childname` must be the name of a *group* child.
| def _g_get_child_group_class(self, childname):
"""Get the class of a not-yet-loaded group child.
`childname` must be the name of a *group* child.
"""
childCID = self._g_get_gchild_attr(childname, 'CLASS')
if childCID is not None and not isinstance(childCID, str):
childCID = childCID.decode('utf-8')
if childCID in class_id_dict:
return class_id_dict[childCID] # look up group class
else:
return Group # default group class
| (self, childname) |
728,590 | tables.group | _g_get_child_leaf_class | Get the class of a not-yet-loaded leaf child.
`childname` must be the name of a *leaf* child. If the child
belongs to an unknown kind of leaf, or if its kind can not be
guessed, `UnImplemented` will be returned and a warning will be
issued if `warn` is true.
| def _g_get_child_leaf_class(self, childname, warn=True):
"""Get the class of a not-yet-loaded leaf child.
`childname` must be the name of a *leaf* child. If the child
belongs to an unknown kind of leaf, or if its kind can not be
guessed, `UnImplemented` will be returned and a warning will be
issued if `warn` is true.
"""
if self._v_file.params['PYTABLES_SYS_ATTRS']:
childCID = self._g_get_lchild_attr(childname, 'CLASS')
if childCID is not None and not isinstance(childCID, str):
childCID = childCID.decode('utf-8')
else:
childCID = None
if childCID in class_id_dict:
return class_id_dict[childCID] # look up leaf class
else:
# Unknown or no ``CLASS`` attribute, try a guess.
childCID2 = utilsextension.which_class(self._v_objectid, childname)
if childCID2 == 'UNSUPPORTED':
if warn:
if childCID is None:
warnings.warn(
"leaf ``%s`` is of an unsupported type; "
"it will become an ``UnImplemented`` node"
% self._g_join(childname))
else:
warnings.warn(
("leaf ``%s`` has an unknown class ID ``%s``; "
"it will become an ``UnImplemented`` node")
% (self._g_join(childname), childCID))
return UnImplemented
assert childCID2 in class_id_dict
return class_id_dict[childCID2] # look up leaf class
| (self, childname, warn=True) |
728,591 | tables.group | _g_getfilters | null | def _g_getfilters(self):
filters = getattr(self._v_attrs, 'FILTERS', None)
if filters is None:
filters = Filters()
return filters
| (self) |
728,592 | tables.group | _g_getnchildren | The number of children hanging from this group. | def _g_getnchildren(self):
"""The number of children hanging from this group."""
return len(self._v_children)
| (self) |