index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
728,158 | tables.table | __init__ | null | def __init__(self, table, desc):
myDict = self.__dict__
myDict['_v__tableFile'] = table._v_file
myDict['_v__tablePath'] = table._v_pathname
myDict['_v_desc'] = desc
myDict['_v_colnames'] = desc._v_names
myDict['_v_colpathnames'] = table.description._v_pathnames
# Put the column in the local dictionary
for name in desc._v_names:
if name in desc._v_types:
myDict[name] = Column(table, name, desc)
else:
myDict[name] = Cols(table, desc._v_colobjects[name])
| (self, table, desc) |
728,159 | tables.table | __len__ | Get the number of top level columns in table. | def __len__(self):
"""Get the number of top level columns in table."""
return len(self._v_colnames)
| (self) |
728,160 | tables.table | __repr__ | A detailed string representation for this object. | def __repr__(self):
"""A detailed string representation for this object."""
lines = [f'{self!s}']
for name in self._v_colnames:
# Get this class name
classname = getattr(self, name).__class__.__name__
# The type
if name in self._v_desc._v_dtypes:
tcol = self._v_desc._v_dtypes[name]
# The shape for this column
shape = (self._v_table.nrows,) + \
self._v_desc._v_dtypes[name].shape
else:
tcol = "Description"
# Description doesn't have a shape currently
shape = ()
lines.append(f" {name} ({classname}{shape}, {tcol})")
return '\n'.join(lines) + '\n'
| (self) |
728,161 | tables.table | __setitem__ | Set a row or a range of rows in a table or nested column.
If key argument is an integer, the corresponding row is set to
value. If key is a slice, the range of rows determined by it is set to
value.
Examples
--------
::
table.cols[4] = record
table.cols.Info[4:1000:2] = recarray
Those statements are equivalent to::
table.modify_rows(4, rows=record)
table.modify_column(4, 1000, 2, colname='Info', column=recarray)
Here you can see how a mix of natural naming, indexing and slicing
can be used as shorthands for the :meth:`Table.modify_rows` and
:meth:`Table.modify_column` methods.
| def __setitem__(self, key, value):
"""Set a row or a range of rows in a table or nested column.
If key argument is an integer, the corresponding row is set to
value. If key is a slice, the range of rows determined by it is set to
value.
Examples
--------
::
table.cols[4] = record
table.cols.Info[4:1000:2] = recarray
Those statements are equivalent to::
table.modify_rows(4, rows=record)
table.modify_column(4, 1000, 2, colname='Info', column=recarray)
Here you can see how a mix of natural naming, indexing and slicing
can be used as shorthands for the :meth:`Table.modify_rows` and
:meth:`Table.modify_column` methods.
"""
table = self._v_table
nrows = table.nrows
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
else:
raise TypeError(f"invalid index or slice: {key!r}")
# Actually modify the correct columns
colgroup = self._v_desc._v_pathname
if colgroup == "": # The root group
table.modify_rows(start, stop, step, rows=value)
else:
table.modify_column(
start, stop, step, colname=colgroup, column=value)
| (self, key, value) |
728,162 | tables.table | __str__ | The string representation for this object. | def __str__(self):
"""The string representation for this object."""
# The pathname
descpathname = self._v_desc._v_pathname
if descpathname:
descpathname = "." + descpathname
return (f"{self._v__tablePath}.cols{descpathname} "
f"({self.__class__.__name__}), "
f"{len(self._v_colnames)} columns")
| (self) |
728,163 | tables.table | _f_col | Get an accessor to the column colname.
This method returns a Column instance (see :ref:`ColumnClassDescr`) if
the requested column is not nested, and a Cols instance (see
:ref:`ColsClassDescr`) if it is. You may use full column pathnames in
colname.
Calling cols._f_col('col1/col2') is equivalent to using cols.col1.col2.
However, the first syntax is more intended for programmatic use. It is
also better if you want to access columns with names that are not valid
Python identifiers.
| def _f_col(self, colname):
"""Get an accessor to the column colname.
This method returns a Column instance (see :ref:`ColumnClassDescr`) if
the requested column is not nested, and a Cols instance (see
:ref:`ColsClassDescr`) if it is. You may use full column pathnames in
colname.
Calling cols._f_col('col1/col2') is equivalent to using cols.col1.col2.
However, the first syntax is more intended for programmatic use. It is
also better if you want to access columns with names that are not valid
Python identifiers.
"""
if not isinstance(colname, str):
raise TypeError("Parameter can only be an string. You passed "
"object: %s" % colname)
if ((colname.find('/') > -1 and
colname not in self._v_colpathnames) and
colname not in self._v_colnames):
raise KeyError(("Cols accessor ``%s.cols%s`` does not have a "
"column named ``%s``")
% (self._v__tablePath, self._v_desc._v_pathname,
colname))
return self._g_col(colname)
| (self, colname) |
728,164 | tables.table | _g_close | null | def _g_close(self):
# First, close the columns (ie possible indices open)
for col in self._v_colnames:
colobj = self._g_col(col)
if isinstance(colobj, Column):
colobj.close()
# Delete the reference to column
del self.__dict__[col]
else:
colobj._g_close()
self.__dict__.clear()
| (self) |
728,165 | tables.table | _g_col | Like `self._f_col()` but it does not check arguments. | def _g_col(self, colname):
"""Like `self._f_col()` but it does not check arguments."""
# Get the Column or Description object
inames = colname.split('/')
cols = self
for iname in inames:
cols = cols.__dict__[iname]
return cols
| (self, colname) |
728,166 | tables.table | _g_update_table_location | Updates the location information about the associated `table`. | def _g_update_table_location(self, table):
"""Updates the location information about the associated `table`."""
myDict = self.__dict__
myDict['_v__tableFile'] = table._v_file
myDict['_v__tablePath'] = table._v_pathname
# Update the locations in individual columns.
for colname in self._v_colnames:
myDict[colname]._g_update_table_location(table)
| (self, table) |
728,167 | tables.table | Column | Accessor for a non-nested column in a table.
Each instance of this class is associated with one *non-nested* column of a
table. These instances are mainly used to read and write data from the
table columns using item access (like the Cols class - see
:ref:`ColsClassDescr`), but there are a few other associated methods to
deal with indexes.
.. rubric:: Column attributes
.. attribute:: descr
The Description (see :ref:`DescriptionClassDescr`) instance of the
parent table or nested column.
.. attribute:: name
The name of the associated column.
.. attribute:: pathname
The complete pathname of the associated column (the same as
Column.name if the column is not inside a nested column).
.. attribute:: attrs
Column attributes (see :ref:`ColClassDescr`).
Parameters
----------
table
The parent table instance
name
The name of the column that is associated with this object
descr
The parent description object
| class Column:
"""Accessor for a non-nested column in a table.
Each instance of this class is associated with one *non-nested* column of a
table. These instances are mainly used to read and write data from the
table columns using item access (like the Cols class - see
:ref:`ColsClassDescr`), but there are a few other associated methods to
deal with indexes.
.. rubric:: Column attributes
.. attribute:: descr
The Description (see :ref:`DescriptionClassDescr`) instance of the
parent table or nested column.
.. attribute:: name
The name of the associated column.
.. attribute:: pathname
The complete pathname of the associated column (the same as
Column.name if the column is not inside a nested column).
.. attribute:: attrs
Column attributes (see :ref:`ColClassDescr`).
Parameters
----------
table
The parent table instance
name
The name of the column that is associated with this object
descr
The parent description object
"""
@lazyattr
def dtype(self):
"""The NumPy dtype that most closely matches this column."""
return self.descr._v_dtypes[self.name].base # Get rid of shape info
@lazyattr
def type(self):
"""The PyTables type of the column (a string)."""
return self.descr._v_types[self.name]
@property
def table(self):
"""The parent Table instance (see :ref:`TableClassDescr`)."""
return self._table_file._get_node(self._table_path)
@property
def index(self):
"""The Index instance (see :ref:`IndexClassDescr`) associated with this
column (None if the column is not indexed)."""
indexPath = _index_pathname_of_column_(self._table_path, self.pathname)
try:
index = self._table_file._get_node(indexPath)
except NodeError:
index = None # The column is not indexed
return index
@lazyattr
def _itemtype(self):
return self.descr._v_dtypes[self.name]
@property
def shape(self):
"""The shape of this column."""
return (self.table.nrows,) + self.descr._v_dtypes[self.name].shape
@property
def is_indexed(self):
"""True if the column is indexed, false otherwise."""
if self.index is None:
return False
else:
return True
@property
def maindim(self):
""""The dimension along which iterators work. Its value is 0 (i.e. the
first dimension)."""
return 0
def __init__(self, table, name, descr):
self._table_file = table._v_file
self._table_path = table._v_pathname
self.name = name
"""The name of the associated column."""
self.pathname = descr._v_colobjects[name]._v_pathname
"""The complete pathname of the associated column (the same as
Column.name if the column is not inside a nested column)."""
self.descr = descr
"""The Description (see :ref:`DescriptionClassDescr`) instance of the
parent table or nested column."""
self._v_attrs = ColumnAttributeSet(self)
def _g_update_table_location(self, table):
"""Updates the location information about the associated `table`."""
self._table_file = table._v_file
self._table_path = table._v_pathname
def __len__(self):
"""Get the number of elements in the column.
This matches the length in rows of the parent table.
"""
return self.table.nrows
def __getitem__(self, key):
"""Get a row or a range of rows from a column.
If key argument is an integer, the corresponding element in the column
is returned as an object of the current flavor. If key is a slice, the
range of elements determined by it is returned as an array of the
current flavor.
Examples
--------
::
print("Column handlers:")
for name in table.colnames:
print(table.cols._f_col(name))
print("Select table.cols.name[1]-->", table.cols.name[1])
print("Select table.cols.name[1:2]-->", table.cols.name[1:2])
print("Select table.cols.name[:]-->", table.cols.name[:])
print("Select table.cols._f_col('name')[:]-->",
table.cols._f_col('name')[:])
The output of this for a certain arbitrary table is::
Column handlers:
/table.cols.name (Column(), string, idx=None)
/table.cols.lati (Column(), int32, idx=None)
/table.cols.longi (Column(), int32, idx=None)
/table.cols.vector (Column(2,), int32, idx=None)
/table.cols.matrix2D (Column(2, 2), float64, idx=None)
Select table.cols.name[1]--> Particle: 11
Select table.cols.name[1:2]--> ['Particle: 11']
Select table.cols.name[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
Select table.cols._f_col('name')[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
See the :file:`examples/table2.py` file for a more complete example.
"""
table = self.table
# Generalized key support not there yet, but at least allow
# for a tuple with one single element (the main dimension).
# (key,) --> key
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= table.nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += table.nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
return table.read(start, stop, step, self.pathname)[0]
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
return table.read(start, stop, step, self.pathname)
else:
raise TypeError(
"'%s' key type is not valid in this context" % key)
def __iter__(self):
"""Iterate through all items in the column."""
table = self.table
itemsize = self.dtype.itemsize
nrowsinbuf = table._v_file.params['IO_BUFFER_SIZE'] // itemsize
buf = np.empty((nrowsinbuf, ), self._itemtype)
max_row = len(self)
for start_row in range(0, len(self), nrowsinbuf):
end_row = min(start_row + nrowsinbuf, max_row)
buf_slice = buf[0:end_row - start_row]
table.read(start_row, end_row, 1, field=self.pathname,
out=buf_slice)
yield from buf_slice
def __setitem__(self, key, value):
"""Set a row or a range of rows in a column.
If key argument is an integer, the corresponding element is set to
value. If key is a slice, the range of elements determined by it is
set to value.
Examples
--------
::
# Modify row 1
table.cols.col1[1] = -1
# Modify rows 1 and 3
table.cols.col1[1::2] = [2,3]
Which is equivalent to::
# Modify row 1
table.modify_columns(start=1, columns=[[-1]], names=['col1'])
# Modify rows 1 and 3
columns = np.rec.fromarrays([[2,3]], formats='i4')
table.modify_columns(start=1, step=2, columns=columns,
names=['col1'])
"""
table = self.table
table._v_file._check_writable()
# Generalized key support not there yet, but at least allow
# for a tuple with one single element (the main dimension).
# (key,) --> key
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= table.nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += table.nrows
return table.modify_column(key, key + 1, 1,
[[value]], self.pathname)
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
return table.modify_column(start, stop, step,
value, self.pathname)
else:
raise ValueError("Non-valid index or slice: %s" % key)
def create_index(self, optlevel=6, kind="medium", filters=None,
tmp_dir=None, _blocksizes=None, _testmode=False,
_verbose=False):
"""Create an index for this column.
.. warning::
In some situations it is useful to get a completely sorted
index (CSI). For those cases, it is best to use the
:meth:`Column.create_csindex` method instead.
Parameters
----------
optlevel : int
The optimization level for building the index. The levels ranges
from 0 (no optimization) up to 9 (maximum optimization). Higher
levels of optimization mean better chances for reducing the entropy
of the index at the price of using more CPU, memory and I/O
resources for creating the index.
kind : str
The kind of the index to be built. It can take the 'ultralight',
'light', 'medium' or 'full' values. Lighter kinds ('ultralight'
and 'light') mean that the index takes less space on disk, but will
perform queries slower. Heavier kinds ('medium' and 'full') mean
better chances for reducing the entropy of the index (increasing
the query speed) at the price of using more disk space as well as
more CPU, memory and I/O resources for creating the index.
Note that selecting a full kind with an optlevel of 9 (the maximum)
guarantees the creation of an index with zero entropy, that is, a
completely sorted index (CSI) - provided that the number of rows in
the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). See :meth:`Column.create_csindex` method for a
more direct way to create a CSI index.
filters : Filters
Specify the Filters instance used to compress the index. If None,
default index filters will be used (currently, zlib level 1 with
shuffling).
tmp_dir
When kind is other than 'ultralight', a temporary file is created
during the index build process. You can use the tmp_dir argument
to specify the directory for this temporary file. The default is
to create it in the same directory as the file containing the
original table.
"""
kinds = ['ultralight', 'light', 'medium', 'full']
if kind not in kinds:
raise ValueError("Kind must have any of these values: %s" % kinds)
if (not isinstance(optlevel, int) or
(optlevel < 0 or optlevel > 9)):
raise ValueError("Optimization level must be an integer in the "
"range 0-9")
if filters is None:
filters = default_index_filters
if tmp_dir is None:
tmp_dir = str(Path(self._table_file.filename).parent)
else:
if not Path(tmp_dir).is_dir():
raise ValueError(
f"Temporary directory '{tmp_dir}' does not exist"
)
if (_blocksizes is not None and
(not isinstance(_blocksizes, tuple) or len(_blocksizes) != 4)):
raise ValueError("_blocksizes must be a tuple with exactly 4 "
"elements")
idxrows = _column__create_index(self, optlevel, kind, filters,
tmp_dir, _blocksizes, _verbose)
return SizeType(idxrows)
def create_csindex(self, filters=None, tmp_dir=None,
_blocksizes=None, _testmode=False, _verbose=False):
"""Create a completely sorted index (CSI) for this column.
This method guarantees the creation of an index with zero entropy, that
is, a completely sorted index (CSI) -- provided that the number of rows
in the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). A CSI index is needed for some table methods (like
:meth:`Table.itersorted` or :meth:`Table.read_sorted`) in order to
ensure completely sorted results.
For the meaning of filters and tmp_dir arguments see
:meth:`Column.create_index`.
Notes
-----
This method is equivalent to
Column.create_index(optlevel=9, kind='full', ...).
"""
return self.create_index(
kind='full', optlevel=9, filters=filters, tmp_dir=tmp_dir,
_blocksizes=_blocksizes, _testmode=_testmode, _verbose=_verbose)
def _do_reindex(self, dirty):
"""Common code for reindex() and reindex_dirty() codes."""
index = self.index
dodirty = True
if dirty and not index.dirty:
dodirty = False
if index is not None and dodirty:
self._table_file._check_writable()
# Get the old index parameters
kind = index.kind
optlevel = index.optlevel
filters = index.filters
# We *need* to tell the index that it is going to be undirty.
# This is needed here so as to unnail() the condition cache.
index.dirty = False
# Delete the existing Index
index._f_remove()
# Create a new Index with the previous parameters
return SizeType(self.create_index(
kind=kind, optlevel=optlevel, filters=filters))
else:
return SizeType(0) # The column is not intended for indexing
def reindex(self):
"""Recompute the index associated with this column.
This can be useful when you suspect that, for any reason,
the index information is no longer valid and you want to rebuild it.
This method does nothing if the column is not indexed.
"""
self._do_reindex(dirty=False)
def reindex_dirty(self):
"""Recompute the associated index only if it is dirty.
This can be useful when you have set :attr:`Table.autoindex` to false
for the table and you want to update the column's index after an
invalidating index operation (like :meth:`Table.remove_rows`).
This method does nothing if the column is not indexed.
"""
self._do_reindex(dirty=True)
def remove_index(self):
"""Remove the index associated with this column.
This method does nothing if the column is not indexed. The removed
index can be created again by calling the :meth:`Column.create_index`
method.
"""
self._table_file._check_writable()
# Remove the index if existing.
if self.is_indexed:
index = self.index
index._f_remove()
self.table._set_column_indexing(self.pathname, False)
def close(self):
"""Close this column."""
self.__dict__.clear()
def __str__(self):
"""The string representation for this object."""
return (f"{self._table_path}.cols.{self.pathname.replace('/', '.')} "
f"({self.__class__.__name__}{self.shape}, "
f"{self.descr._v_types[self.name]}, idx={self.index})")
def __repr__(self):
"""A detailed string representation for this object."""
return str(self)
@lazyattr
def _v_pos(self):
return self.descr._v_colobjects[self.name]._v_pos
@lazyattr
def _v_col_attrs(self):
return self.descr._v_colobjects[self.name]._v_col_attrs
@property
def attrs(self):
return self._v_attrs
| (table, name, descr) |
728,168 | tables.table | __getitem__ | Get a row or a range of rows from a column.
If key argument is an integer, the corresponding element in the column
is returned as an object of the current flavor. If key is a slice, the
range of elements determined by it is returned as an array of the
current flavor.
Examples
--------
::
print("Column handlers:")
for name in table.colnames:
print(table.cols._f_col(name))
print("Select table.cols.name[1]-->", table.cols.name[1])
print("Select table.cols.name[1:2]-->", table.cols.name[1:2])
print("Select table.cols.name[:]-->", table.cols.name[:])
print("Select table.cols._f_col('name')[:]-->",
table.cols._f_col('name')[:])
The output of this for a certain arbitrary table is::
Column handlers:
/table.cols.name (Column(), string, idx=None)
/table.cols.lati (Column(), int32, idx=None)
/table.cols.longi (Column(), int32, idx=None)
/table.cols.vector (Column(2,), int32, idx=None)
/table.cols.matrix2D (Column(2, 2), float64, idx=None)
Select table.cols.name[1]--> Particle: 11
Select table.cols.name[1:2]--> ['Particle: 11']
Select table.cols.name[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
Select table.cols._f_col('name')[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
See the :file:`examples/table2.py` file for a more complete example.
| def __getitem__(self, key):
"""Get a row or a range of rows from a column.
If key argument is an integer, the corresponding element in the column
is returned as an object of the current flavor. If key is a slice, the
range of elements determined by it is returned as an array of the
current flavor.
Examples
--------
::
print("Column handlers:")
for name in table.colnames:
print(table.cols._f_col(name))
print("Select table.cols.name[1]-->", table.cols.name[1])
print("Select table.cols.name[1:2]-->", table.cols.name[1:2])
print("Select table.cols.name[:]-->", table.cols.name[:])
print("Select table.cols._f_col('name')[:]-->",
table.cols._f_col('name')[:])
The output of this for a certain arbitrary table is::
Column handlers:
/table.cols.name (Column(), string, idx=None)
/table.cols.lati (Column(), int32, idx=None)
/table.cols.longi (Column(), int32, idx=None)
/table.cols.vector (Column(2,), int32, idx=None)
/table.cols.matrix2D (Column(2, 2), float64, idx=None)
Select table.cols.name[1]--> Particle: 11
Select table.cols.name[1:2]--> ['Particle: 11']
Select table.cols.name[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
Select table.cols._f_col('name')[:]--> ['Particle: 10'
'Particle: 11' 'Particle: 12'
'Particle: 13' 'Particle: 14']
See the :file:`examples/table2.py` file for a more complete example.
"""
table = self.table
# Generalized key support not there yet, but at least allow
# for a tuple with one single element (the main dimension).
# (key,) --> key
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= table.nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += table.nrows
(start, stop, step) = table._process_range(key, key + 1, 1)
return table.read(start, stop, step, self.pathname)[0]
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
return table.read(start, stop, step, self.pathname)
else:
raise TypeError(
"'%s' key type is not valid in this context" % key)
| (self, key) |
728,169 | tables.table | __init__ | null | def __init__(self, table, name, descr):
self._table_file = table._v_file
self._table_path = table._v_pathname
self.name = name
"""The name of the associated column."""
self.pathname = descr._v_colobjects[name]._v_pathname
"""The complete pathname of the associated column (the same as
Column.name if the column is not inside a nested column)."""
self.descr = descr
"""The Description (see :ref:`DescriptionClassDescr`) instance of the
parent table or nested column."""
self._v_attrs = ColumnAttributeSet(self)
| (self, table, name, descr) |
728,170 | tables.table | __iter__ | Iterate through all items in the column. | def __iter__(self):
"""Iterate through all items in the column."""
table = self.table
itemsize = self.dtype.itemsize
nrowsinbuf = table._v_file.params['IO_BUFFER_SIZE'] // itemsize
buf = np.empty((nrowsinbuf, ), self._itemtype)
max_row = len(self)
for start_row in range(0, len(self), nrowsinbuf):
end_row = min(start_row + nrowsinbuf, max_row)
buf_slice = buf[0:end_row - start_row]
table.read(start_row, end_row, 1, field=self.pathname,
out=buf_slice)
yield from buf_slice
| (self) |
728,171 | tables.table | __len__ | Get the number of elements in the column.
This matches the length in rows of the parent table.
| def __len__(self):
"""Get the number of elements in the column.
This matches the length in rows of the parent table.
"""
return self.table.nrows
| (self) |
728,172 | tables.table | __repr__ | A detailed string representation for this object. | def __repr__(self):
"""A detailed string representation for this object."""
return str(self)
| (self) |
728,173 | tables.table | __setitem__ | Set a row or a range of rows in a column.
If key argument is an integer, the corresponding element is set to
value. If key is a slice, the range of elements determined by it is
set to value.
Examples
--------
::
# Modify row 1
table.cols.col1[1] = -1
# Modify rows 1 and 3
table.cols.col1[1::2] = [2,3]
Which is equivalent to::
# Modify row 1
table.modify_columns(start=1, columns=[[-1]], names=['col1'])
# Modify rows 1 and 3
columns = np.rec.fromarrays([[2,3]], formats='i4')
table.modify_columns(start=1, step=2, columns=columns,
names=['col1'])
| def __setitem__(self, key, value):
"""Set a row or a range of rows in a column.
If key argument is an integer, the corresponding element is set to
value. If key is a slice, the range of elements determined by it is
set to value.
Examples
--------
::
# Modify row 1
table.cols.col1[1] = -1
# Modify rows 1 and 3
table.cols.col1[1::2] = [2,3]
Which is equivalent to::
# Modify row 1
table.modify_columns(start=1, columns=[[-1]], names=['col1'])
# Modify rows 1 and 3
columns = np.rec.fromarrays([[2,3]], formats='i4')
table.modify_columns(start=1, step=2, columns=columns,
names=['col1'])
"""
table = self.table
table._v_file._check_writable()
# Generalized key support not there yet, but at least allow
# for a tuple with one single element (the main dimension).
# (key,) --> key
if isinstance(key, tuple) and len(key) == 1:
key = key[0]
if is_idx(key):
key = operator.index(key)
# Index out of range protection
if key >= table.nrows:
raise IndexError("Index out of range")
if key < 0:
# To support negative values
key += table.nrows
return table.modify_column(key, key + 1, 1,
[[value]], self.pathname)
elif isinstance(key, slice):
(start, stop, step) = table._process_range(
key.start, key.stop, key.step)
return table.modify_column(start, stop, step,
value, self.pathname)
else:
raise ValueError("Non-valid index or slice: %s" % key)
| (self, key, value) |
728,174 | tables.table | __str__ | The string representation for this object. | def __str__(self):
"""The string representation for this object."""
return (f"{self._table_path}.cols.{self.pathname.replace('/', '.')} "
f"({self.__class__.__name__}{self.shape}, "
f"{self.descr._v_types[self.name]}, idx={self.index})")
| (self) |
728,175 | tables.table | _do_reindex | Common code for reindex() and reindex_dirty() codes. | def _do_reindex(self, dirty):
"""Common code for reindex() and reindex_dirty() codes."""
index = self.index
dodirty = True
if dirty and not index.dirty:
dodirty = False
if index is not None and dodirty:
self._table_file._check_writable()
# Get the old index parameters
kind = index.kind
optlevel = index.optlevel
filters = index.filters
# We *need* to tell the index that it is going to be undirty.
# This is needed here so as to unnail() the condition cache.
index.dirty = False
# Delete the existing Index
index._f_remove()
# Create a new Index with the previous parameters
return SizeType(self.create_index(
kind=kind, optlevel=optlevel, filters=filters))
else:
return SizeType(0) # The column is not intended for indexing
| (self, dirty) |
728,176 | tables.table | _g_update_table_location | Updates the location information about the associated `table`. | def _g_update_table_location(self, table):
"""Updates the location information about the associated `table`."""
self._table_file = table._v_file
self._table_path = table._v_pathname
| (self, table) |
728,177 | tables.table | close | Close this column. | def close(self):
"""Close this column."""
self.__dict__.clear()
| (self) |
728,178 | tables.table | create_csindex | Create a completely sorted index (CSI) for this column.
This method guarantees the creation of an index with zero entropy, that
is, a completely sorted index (CSI) -- provided that the number of rows
in the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). A CSI index is needed for some table methods (like
:meth:`Table.itersorted` or :meth:`Table.read_sorted`) in order to
ensure completely sorted results.
For the meaning of filters and tmp_dir arguments see
:meth:`Column.create_index`.
Notes
-----
This method is equivalent to
Column.create_index(optlevel=9, kind='full', ...).
| def create_csindex(self, filters=None, tmp_dir=None,
_blocksizes=None, _testmode=False, _verbose=False):
"""Create a completely sorted index (CSI) for this column.
This method guarantees the creation of an index with zero entropy, that
is, a completely sorted index (CSI) -- provided that the number of rows
in the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). A CSI index is needed for some table methods (like
:meth:`Table.itersorted` or :meth:`Table.read_sorted`) in order to
ensure completely sorted results.
For the meaning of filters and tmp_dir arguments see
:meth:`Column.create_index`.
Notes
-----
This method is equivalent to
Column.create_index(optlevel=9, kind='full', ...).
"""
return self.create_index(
kind='full', optlevel=9, filters=filters, tmp_dir=tmp_dir,
_blocksizes=_blocksizes, _testmode=_testmode, _verbose=_verbose)
| (self, filters=None, tmp_dir=None, _blocksizes=None, _testmode=False, _verbose=False) |
728,179 | tables.table | create_index | Create an index for this column.
.. warning::
In some situations it is useful to get a completely sorted
index (CSI). For those cases, it is best to use the
:meth:`Column.create_csindex` method instead.
Parameters
----------
optlevel : int
The optimization level for building the index. The levels ranges
from 0 (no optimization) up to 9 (maximum optimization). Higher
levels of optimization mean better chances for reducing the entropy
of the index at the price of using more CPU, memory and I/O
resources for creating the index.
kind : str
The kind of the index to be built. It can take the 'ultralight',
'light', 'medium' or 'full' values. Lighter kinds ('ultralight'
and 'light') mean that the index takes less space on disk, but will
perform queries slower. Heavier kinds ('medium' and 'full') mean
better chances for reducing the entropy of the index (increasing
the query speed) at the price of using more disk space as well as
more CPU, memory and I/O resources for creating the index.
Note that selecting a full kind with an optlevel of 9 (the maximum)
guarantees the creation of an index with zero entropy, that is, a
completely sorted index (CSI) - provided that the number of rows in
the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). See :meth:`Column.create_csindex` method for a
more direct way to create a CSI index.
filters : Filters
Specify the Filters instance used to compress the index. If None,
default index filters will be used (currently, zlib level 1 with
shuffling).
tmp_dir
When kind is other than 'ultralight', a temporary file is created
during the index build process. You can use the tmp_dir argument
to specify the directory for this temporary file. The default is
to create it in the same directory as the file containing the
original table.
| def create_index(self, optlevel=6, kind="medium", filters=None,
tmp_dir=None, _blocksizes=None, _testmode=False,
_verbose=False):
"""Create an index for this column.
.. warning::
In some situations it is useful to get a completely sorted
index (CSI). For those cases, it is best to use the
:meth:`Column.create_csindex` method instead.
Parameters
----------
optlevel : int
The optimization level for building the index. The levels ranges
from 0 (no optimization) up to 9 (maximum optimization). Higher
levels of optimization mean better chances for reducing the entropy
of the index at the price of using more CPU, memory and I/O
resources for creating the index.
kind : str
The kind of the index to be built. It can take the 'ultralight',
'light', 'medium' or 'full' values. Lighter kinds ('ultralight'
and 'light') mean that the index takes less space on disk, but will
perform queries slower. Heavier kinds ('medium' and 'full') mean
better chances for reducing the entropy of the index (increasing
the query speed) at the price of using more disk space as well as
more CPU, memory and I/O resources for creating the index.
Note that selecting a full kind with an optlevel of 9 (the maximum)
guarantees the creation of an index with zero entropy, that is, a
completely sorted index (CSI) - provided that the number of rows in
the table does not exceed the 2**48 figure (that is more than 100
trillions of rows). See :meth:`Column.create_csindex` method for a
more direct way to create a CSI index.
filters : Filters
Specify the Filters instance used to compress the index. If None,
default index filters will be used (currently, zlib level 1 with
shuffling).
tmp_dir
When kind is other than 'ultralight', a temporary file is created
during the index build process. You can use the tmp_dir argument
to specify the directory for this temporary file. The default is
to create it in the same directory as the file containing the
original table.
"""
kinds = ['ultralight', 'light', 'medium', 'full']
if kind not in kinds:
raise ValueError("Kind must have any of these values: %s" % kinds)
if (not isinstance(optlevel, int) or
(optlevel < 0 or optlevel > 9)):
raise ValueError("Optimization level must be an integer in the "
"range 0-9")
if filters is None:
filters = default_index_filters
if tmp_dir is None:
tmp_dir = str(Path(self._table_file.filename).parent)
else:
if not Path(tmp_dir).is_dir():
raise ValueError(
f"Temporary directory '{tmp_dir}' does not exist"
)
if (_blocksizes is not None and
(not isinstance(_blocksizes, tuple) or len(_blocksizes) != 4)):
raise ValueError("_blocksizes must be a tuple with exactly 4 "
"elements")
idxrows = _column__create_index(self, optlevel, kind, filters,
tmp_dir, _blocksizes, _verbose)
return SizeType(idxrows)
| (self, optlevel=6, kind='medium', filters=None, tmp_dir=None, _blocksizes=None, _testmode=False, _verbose=False) |
728,180 | tables.table | reindex | Recompute the index associated with this column.
This can be useful when you suspect that, for any reason,
the index information is no longer valid and you want to rebuild it.
This method does nothing if the column is not indexed.
| def reindex(self):
"""Recompute the index associated with this column.
This can be useful when you suspect that, for any reason,
the index information is no longer valid and you want to rebuild it.
This method does nothing if the column is not indexed.
"""
self._do_reindex(dirty=False)
| (self) |
728,181 | tables.table | reindex_dirty | Recompute the associated index only if it is dirty.
This can be useful when you have set :attr:`Table.autoindex` to false
for the table and you want to update the column's index after an
invalidating index operation (like :meth:`Table.remove_rows`).
This method does nothing if the column is not indexed.
| def reindex_dirty(self):
"""Recompute the associated index only if it is dirty.
This can be useful when you have set :attr:`Table.autoindex` to false
for the table and you want to update the column's index after an
invalidating index operation (like :meth:`Table.remove_rows`).
This method does nothing if the column is not indexed.
"""
self._do_reindex(dirty=True)
| (self) |
728,182 | tables.table | remove_index | Remove the index associated with this column.
This method does nothing if the column is not indexed. The removed
index can be created again by calling the :meth:`Column.create_index`
method.
| def remove_index(self):
"""Remove the index associated with this column.
This method does nothing if the column is not indexed. The removed
index can be created again by calling the :meth:`Column.create_index`
method.
"""
self._table_file._check_writable()
# Remove the index if existing.
if self.is_indexed:
index = self.index
index._f_remove()
self.table._set_column_indexing(self.pathname, False)
| (self) |
728,183 | tables.atom | _ComplexErrorAtom | Reminds the user to stop using the old complex atom names. | class _ComplexErrorAtom(ComplexAtom, metaclass=type):
"""Reminds the user to stop using the old complex atom names."""
def __init__(self, shape=(), dflt=ComplexAtom._defvalue):
raise TypeError(
"to avoid confusions with PyTables 1.X complex atom names, "
"please use ``ComplexAtom(itemsize=N)``, "
"where N=8 for single precision complex atoms, "
"and N=16 for double precision complex atoms")
| (shape=(), dflt=0j) |
728,185 | tables.atom | __init__ | null | def __init__(self, shape=(), dflt=ComplexAtom._defvalue):
raise TypeError(
"to avoid confusions with PyTables 1.X complex atom names, "
"please use ``ComplexAtom(itemsize=N)``, "
"where N=8 for single precision complex atoms, "
"and N=16 for double precision complex atoms")
| (self, shape=(), dflt=0j) |
728,191 | tables.description | Complex128Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Complex128Col
| (*args, **kwargs) |
728,207 | tables.description | Complex256Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Complex256Col
| (*args, **kwargs) |
728,223 | tables.description | Complex32Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Complex32Col
| (*args, **kwargs) |
728,239 | tables.description | Complex64Col | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import Complex64Col
| (*args, **kwargs) |
728,247 | tables.atom | ComplexAtom | Defines an atom of kind complex.
Allowed item sizes are 8 (single precision) and 16 (double precision). This
class must be used instead of more concrete ones to avoid confusions with
numarray-like precision specifications used in PyTables 1.X.
| class ComplexAtom(Atom):
"""Defines an atom of kind complex.
Allowed item sizes are 8 (single precision) and 16 (double precision). This
class must be used instead of more concrete ones to avoid confusions with
numarray-like precision specifications used in PyTables 1.X.
"""
# This definition is a little more complex (no pun intended)
# because, although the complex kind is a normal numerical one,
# the usage of bottom-level classes is artificially forbidden.
# Everything will be back to normality when people has stopped
# using the old bottom-level complex classes.
kind = 'complex'
_deftype = 'complex128'
_defvalue = 0j
_isizes = [8, 16]
@property
def itemsize(self):
"""Size in bytes of a sigle item in the atom."""
return self.dtype.base.itemsize
# Only instances have a `type` attribute, so complex types must be
# registered by hand.
all_types.add('complex64')
all_types.add('complex128')
if hasattr(np, 'complex192'):
all_types.add('complex192')
_isizes.append(24)
if hasattr(np, 'complex256'):
all_types.add('complex256')
_isizes.append(32)
def __init__(self, itemsize, shape=(), dflt=_defvalue):
if itemsize not in self._isizes:
raise _invalid_itemsize_error('complex', itemsize, self._isizes)
self.type = '%s%d' % (self.kind, itemsize * 8)
Atom.__init__(self, self.type, shape, dflt)
| (itemsize, shape=(), dflt=0j) |
728,249 | tables.atom | __init__ | null | def __init__(self, itemsize, shape=(), dflt=_defvalue):
if itemsize not in self._isizes:
raise _invalid_itemsize_error('complex', itemsize, self._isizes)
self.type = '%s%d' % (self.kind, itemsize * 8)
Atom.__init__(self, self.type, shape, dflt)
| (self, itemsize, shape=(), dflt=0j) |
728,255 | tables.description | ComplexCol | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import ComplexCol
| (*args, **kwargs) |
728,263 | tables.exceptions | DataTypeWarning | Unsupported data type.
This warning is issued when an unsupported HDF5 data type is found
(normally in a file created with other tool than PyTables).
| class DataTypeWarning(Warning):
"""Unsupported data type.
This warning is issued when an unsupported HDF5 data type is found
(normally in a file created with other tool than PyTables).
"""
pass
| null |
728,264 | tables.description | Description | This class represents descriptions of the structure of tables.
An instance of this class is automatically bound to Table (see
:ref:`TableClassDescr`) objects when they are created. It provides a
browseable representation of the structure of the table, made of non-nested
(Col - see :ref:`ColClassDescr`) and nested (Description) columns.
Column definitions under a description can be accessed as attributes of it
(*natural naming*). For instance, if table.description is a Description
instance with a column named col1 under it, the later can be accessed as
table.description.col1. If col1 is nested and contains a col2 column, this
can be accessed as table.description.col1.col2. Because of natural naming,
the names of members start with special prefixes, like in the Group class
(see :ref:`GroupClassDescr`).
.. rubric:: Description attributes
.. attribute:: _v_colobjects
A dictionary mapping the names of the columns hanging
directly from the associated table or nested column to their
respective descriptions (Col - see :ref:`ColClassDescr` or
Description - see :ref:`DescriptionClassDescr` instances).
.. versionchanged:: 3.0
The *_v_colObjects* attribute has been renamed into
*_v_colobjects*.
.. attribute:: _v_dflts
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective default values.
.. attribute:: _v_dtype
The NumPy type which reflects the structure of this
table or nested column. You can use this as the
dtype argument of NumPy array factories.
.. attribute:: _v_dtypes
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective NumPy types.
.. attribute:: _v_is_nested
Whether the associated table or nested column contains
further nested columns or not.
.. attribute:: _v_itemsize
The size in bytes of an item in this table or nested column.
.. attribute:: _v_name
The name of this description group. The name of the
root group is '/'.
.. attribute:: _v_names
A list of the names of the columns hanging directly
from the associated table or nested column. The order of the
names matches the order of their respective columns in the
containing table.
.. attribute:: _v_nested_descr
A nested list of pairs of (name, format) tuples for all the columns
under this table or nested column. You can use this as the dtype and
descr arguments of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedDescr* attribute has been renamed into
*_v_nested_descr*.
.. attribute:: _v_nested_formats
A nested list of the NumPy string formats (and shapes) of all the
columns under this table or nested column. You can use this as the
formats argument of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedFormats* attribute has been renamed into
*_v_nested_formats*.
.. attribute:: _v_nestedlvl
The level of the associated table or nested column in the nested
datatype.
.. attribute:: _v_nested_names
A nested list of the names of all the columns under this table or
nested column. You can use this as the names argument of NumPy array
factories.
.. versionchanged:: 3.0
The *_v_nestedNames* attribute has been renamed into
*_v_nested_names*.
.. attribute:: _v_pathname
Pathname of the table or nested column.
.. attribute:: _v_pathnames
A list of the pathnames of all the columns under this table or nested
column (in preorder). If it does not contain nested columns, this is
exactly the same as the :attr:`Description._v_names` attribute.
.. attribute:: _v_types
A dictionary mapping the names of non-nested columns hanging directly
from the associated table or nested column to their respective PyTables
types.
.. attribute:: _v_offsets
A list of offsets for all the columns. If the list is empty, means
that there are no padding in the data structure. However, the support
for offsets is currently limited to flat tables; for nested tables, the
potential padding is always removed (exactly the same as in pre-3.5
versions), and this variable is set to empty.
.. versionadded:: 3.5
Previous to this version all the compound types were converted
internally to 'packed' types, i.e. with no padding between the
component types. Starting with 3.5, the holes in native HDF5
types (non-nested) are honored and replicated during dataset
and attribute copies.
| class Description:
"""This class represents descriptions of the structure of tables.
An instance of this class is automatically bound to Table (see
:ref:`TableClassDescr`) objects when they are created. It provides a
browseable representation of the structure of the table, made of non-nested
(Col - see :ref:`ColClassDescr`) and nested (Description) columns.
Column definitions under a description can be accessed as attributes of it
(*natural naming*). For instance, if table.description is a Description
instance with a column named col1 under it, the later can be accessed as
table.description.col1. If col1 is nested and contains a col2 column, this
can be accessed as table.description.col1.col2. Because of natural naming,
the names of members start with special prefixes, like in the Group class
(see :ref:`GroupClassDescr`).
.. rubric:: Description attributes
.. attribute:: _v_colobjects
A dictionary mapping the names of the columns hanging
directly from the associated table or nested column to their
respective descriptions (Col - see :ref:`ColClassDescr` or
Description - see :ref:`DescriptionClassDescr` instances).
.. versionchanged:: 3.0
The *_v_colObjects* attribute has been renamed into
*_v_colobjects*.
.. attribute:: _v_dflts
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective default values.
.. attribute:: _v_dtype
The NumPy type which reflects the structure of this
table or nested column. You can use this as the
dtype argument of NumPy array factories.
.. attribute:: _v_dtypes
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective NumPy types.
.. attribute:: _v_is_nested
Whether the associated table or nested column contains
further nested columns or not.
.. attribute:: _v_itemsize
The size in bytes of an item in this table or nested column.
.. attribute:: _v_name
The name of this description group. The name of the
root group is '/'.
.. attribute:: _v_names
A list of the names of the columns hanging directly
from the associated table or nested column. The order of the
names matches the order of their respective columns in the
containing table.
.. attribute:: _v_nested_descr
A nested list of pairs of (name, format) tuples for all the columns
under this table or nested column. You can use this as the dtype and
descr arguments of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedDescr* attribute has been renamed into
*_v_nested_descr*.
.. attribute:: _v_nested_formats
A nested list of the NumPy string formats (and shapes) of all the
columns under this table or nested column. You can use this as the
formats argument of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedFormats* attribute has been renamed into
*_v_nested_formats*.
.. attribute:: _v_nestedlvl
The level of the associated table or nested column in the nested
datatype.
.. attribute:: _v_nested_names
A nested list of the names of all the columns under this table or
nested column. You can use this as the names argument of NumPy array
factories.
.. versionchanged:: 3.0
The *_v_nestedNames* attribute has been renamed into
*_v_nested_names*.
.. attribute:: _v_pathname
Pathname of the table or nested column.
.. attribute:: _v_pathnames
A list of the pathnames of all the columns under this table or nested
column (in preorder). If it does not contain nested columns, this is
exactly the same as the :attr:`Description._v_names` attribute.
.. attribute:: _v_types
A dictionary mapping the names of non-nested columns hanging directly
from the associated table or nested column to their respective PyTables
types.
.. attribute:: _v_offsets
A list of offsets for all the columns. If the list is empty, means
that there are no padding in the data structure. However, the support
for offsets is currently limited to flat tables; for nested tables, the
potential padding is always removed (exactly the same as in pre-3.5
versions), and this variable is set to empty.
.. versionadded:: 3.5
Previous to this version all the compound types were converted
internally to 'packed' types, i.e. with no padding between the
component types. Starting with 3.5, the holes in native HDF5
types (non-nested) are honored and replicated during dataset
and attribute copies.
"""
def __init__(self, classdict, nestedlvl=-1, validate=True, ptparams=None):
if not classdict:
raise ValueError("cannot create an empty data type")
# Do a shallow copy of classdict just in case this is going to
# be shared by other instances
newdict = self.__dict__
newdict["_v_name"] = "/" # The name for root descriptor
newdict["_v_names"] = []
newdict["_v_dtypes"] = {}
newdict["_v_types"] = {}
newdict["_v_dflts"] = {}
newdict["_v_colobjects"] = {}
newdict["_v_is_nested"] = False
nestedFormats = []
nestedDType = []
if not hasattr(newdict, "_v_nestedlvl"):
newdict["_v_nestedlvl"] = nestedlvl + 1
cols_with_pos = [] # colum (position, name) pairs
cols_no_pos = [] # just column names
cols_offsets = [] # the offsets of the columns
valid_offsets = False # by default there a no valid offsets
# Check for special variables and convert column descriptions
for (name, descr) in classdict.items():
if name.startswith('_v_'):
if name in newdict:
# print("Warning!")
# special methods &c: copy to newdict, warn about conflicts
warnings.warn("Can't set attr %r in description class %r"
% (name, self))
else:
# print("Special variable!-->", name, classdict[name])
newdict[name] = descr
continue # This variable is not needed anymore
columns = None
if (type(descr) == type(IsDescription) and
issubclass(descr, IsDescription)):
# print("Nested object (type I)-->", name)
columns = descr().columns
elif (type(descr.__class__) == type(IsDescription) and
issubclass(descr.__class__, IsDescription)):
# print("Nested object (type II)-->", name)
columns = descr.columns
elif isinstance(descr, dict):
# print("Nested object (type III)-->", name)
columns = descr
else:
# print("Nested object (type IV)-->", name)
descr = copy.copy(descr)
# The copies above and below ensure that the structures
# provided by the user will remain unchanged even if we
# tamper with the values of ``_v_pos`` here.
if columns is not None:
descr = Description(copy.copy(columns), self._v_nestedlvl,
ptparams=ptparams)
classdict[name] = descr
pos = getattr(descr, '_v_pos', None)
if pos is None:
cols_no_pos.append(name)
else:
cols_with_pos.append((pos, name))
offset = getattr(descr, '_v_offset', None)
if offset is not None:
cols_offsets.append(offset)
# Sort field names:
#
# 1. Fields with explicit positions, according to their
# positions (and their names if coincident).
# 2. Fields with no position, in alphabetical order.
cols_with_pos.sort()
cols_no_pos.sort()
keys = [name for (pos, name) in cols_with_pos] + cols_no_pos
pos = 0
nested = False
# Get properties for compound types
for k in keys:
if validate:
# Check for key name validity
check_name_validity(k)
# Class variables
object = classdict[k]
newdict[k] = object # To allow natural naming
if not isinstance(object, (Col, Description)):
raise TypeError('Passing an incorrect value to a table column.'
' Expected a Col (or subclass) instance and '
'got: "%s". Please make use of the Col(), or '
'descendant, constructor to properly '
'initialize columns.' % object)
object._v_pos = pos # Set the position of this object
object._v_parent = self # The parent description
pos += 1
newdict['_v_colobjects'][k] = object
newdict['_v_names'].append(k)
object.__dict__['_v_name'] = k
if not isinstance(k, str):
# numpy only accepts "str" for field names
# Python 3.x: bytes --> str (unicode)
kk = k.decode()
else:
kk = k
if isinstance(object, Col):
dtype = object.dtype
newdict['_v_dtypes'][k] = dtype
newdict['_v_types'][k] = object.type
newdict['_v_dflts'][k] = object.dflt
nestedFormats.append(object.recarrtype)
baserecarrtype = dtype.base.str[1:]
nestedDType.append((kk, baserecarrtype, dtype.shape))
else: # A description
nestedFormats.append(object._v_nested_formats)
nestedDType.append((kk, object._v_dtype))
nested = True
# Useful for debugging purposes
# import traceback
# if ptparams is None:
# print("*** print_stack:")
# traceback.print_stack()
# Check whether we are gonna use padding or not. Two possibilities:
# 1) Make padding True by default (except if ALLOW_PADDING is set
# to False)
# 2) Make padding False by default (except if ALLOW_PADDING is set
# to True)
# Currently we choose 1) because it favours honoring padding even on
# unhandled situations (should be very few).
# However, for development, option 2) is recommended as it catches
# most of the unhandled situations.
allow_padding = ptparams is None or ptparams['ALLOW_PADDING']
# allow_padding = ptparams is not None and ptparams['ALLOW_PADDING']
if (allow_padding and
len(cols_offsets) > 1 and
len(keys) == len(cols_with_pos) and
len(keys) == len(cols_offsets) and
not nested): # TODO: support offsets with nested types
# We have to sort the offsets too, as they must follow the column
# order. As the offsets and the pos should be place in the same
# order, a single sort is enough here.
cols_offsets.sort()
valid_offsets = True
else:
newdict['_v_offsets'] = []
# Assign the format list to _v_nested_formats
newdict['_v_nested_formats'] = nestedFormats
if self._v_nestedlvl == 0:
# Get recursively nested _v_nested_names and _v_nested_descr attrs
self._g_set_nested_names_descr()
# Get pathnames for nested groups
self._g_set_path_names()
# Check the _v_byteorder has been used an issue an Error
if hasattr(self, "_v_byteorder"):
raise ValueError(
"Using a ``_v_byteorder`` in the description is obsolete. "
"Use the byteorder parameter in the constructor instead.")
# Compute the dtype with offsets or without
# print("offsets ->", cols_offsets, nestedDType, nested, valid_offsets)
if valid_offsets:
# TODO: support offsets within nested types
dtype_fields = {
'names': newdict['_v_names'], 'formats': nestedFormats,
'offsets': cols_offsets}
itemsize = newdict.get('_v_itemsize', None)
if itemsize is not None:
dtype_fields['itemsize'] = itemsize
dtype = np.dtype(dtype_fields)
else:
dtype = np.dtype(nestedDType)
newdict['_v_dtype'] = dtype
newdict['_v_itemsize'] = dtype.itemsize
newdict['_v_offsets'] = [dtype.fields[name][1] for name in dtype.names]
def _g_set_nested_names_descr(self):
"""Computes the nested names and descriptions for nested datatypes."""
names = self._v_names
fmts = self._v_nested_formats
self._v_nested_names = names[:] # Important to do a copy!
self._v_nested_descr = list(zip(names, fmts))
for i, name in enumerate(names):
new_object = self._v_colobjects[name]
if isinstance(new_object, Description):
new_object._g_set_nested_names_descr()
# replace the column nested name by a correct tuple
self._v_nested_names[i] = (name, new_object._v_nested_names)
self._v_nested_descr[i] = (name, new_object._v_nested_descr)
# set the _v_is_nested flag
self._v_is_nested = True
def _g_set_path_names(self):
"""Compute the pathnames for arbitrary nested descriptions.
This method sets the ``_v_pathname`` and ``_v_pathnames``
attributes of all the elements (both descriptions and columns)
in this nested description.
"""
def get_cols_in_order(description):
return [description._v_colobjects[colname]
for colname in description._v_names]
def join_paths(path1, path2):
if not path1:
return path2
return f'{path1}/{path2}'
# The top of the stack always has a nested description
# and a list of its child columns
# (be they nested ``Description`` or non-nested ``Col`` objects).
# In the end, the list contains only a list of column paths
# under this one.
#
# For instance, given this top of the stack::
#
# (<Description X>, [<Column A>, <Column B>])
#
# After computing the rest of the stack, the top is::
#
# (<Description X>, ['a', 'a/m', 'a/n', ... , 'b', ...])
stack = []
# We start by pushing the top-level description
# and its child columns.
self._v_pathname = ''
stack.append((self, get_cols_in_order(self)))
while stack:
desc, cols = stack.pop()
head = cols[0]
# What's the first child in the list?
if isinstance(head, Description):
# A nested description. We remove it from the list and
# push it with its child columns. This will be the next
# handled description.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
stack.append((desc, cols[1:])) # alter the top
stack.append((head, get_cols_in_order(head))) # new top
elif isinstance(head, Col):
# A non-nested column. We simply remove it from the
# list and append its name to it.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
cols.append(head._v_name) # alter the top
stack.append((desc, cols[1:])) # alter the top
else:
# Since paths and names are appended *to the end* of
# children lists, a string signals that no more children
# remain to be processed, so we are done with the
# description at the top of the stack.
assert isinstance(head, str)
# Assign the computed set of descendent column paths.
desc._v_pathnames = cols
if len(stack) > 0:
# Compute the paths with respect to the parent node
# (including the path of the current description)
# and append them to its list.
descName = desc._v_name
colPaths = [join_paths(descName, path) for path in cols]
colPaths.insert(0, descName)
parentCols = stack[-1][1]
parentCols.extend(colPaths)
# (Nothing is pushed, we are done with this description.)
def _f_walk(self, type='All'):
"""Iterate over nested columns.
If type is 'All' (the default), all column description objects (Col and
Description instances) are yielded in top-to-bottom order (preorder).
If type is 'Col' or 'Description', only column descriptions of that
type are yielded.
"""
if type not in ["All", "Col", "Description"]:
raise ValueError("""\
type can only take the parameters 'All', 'Col' or 'Description'.""")
stack = [self]
while stack:
object = stack.pop(0) # pop at the front so as to ensure the order
if type in ["All", "Description"]:
yield object # yield description
for name in object._v_names:
new_object = object._v_colobjects[name]
if isinstance(new_object, Description):
stack.append(new_object)
else:
if type in ["All", "Col"]:
yield new_object # yield column
def __repr__(self):
"""Gives a detailed Description column representation."""
rep = ['%s\"%s\": %r' %
(" " * self._v_nestedlvl, k, self._v_colobjects[k])
for k in self._v_names]
return '{\n %s}' % (',\n '.join(rep))
def __str__(self):
"""Gives a brief Description representation."""
return f'Description({self._v_nested_descr})'
| (classdict, nestedlvl=-1, validate=True, ptparams=None) |
728,265 | tables.description | __init__ | null | def __init__(self, classdict, nestedlvl=-1, validate=True, ptparams=None):
if not classdict:
raise ValueError("cannot create an empty data type")
# Do a shallow copy of classdict just in case this is going to
# be shared by other instances
newdict = self.__dict__
newdict["_v_name"] = "/" # The name for root descriptor
newdict["_v_names"] = []
newdict["_v_dtypes"] = {}
newdict["_v_types"] = {}
newdict["_v_dflts"] = {}
newdict["_v_colobjects"] = {}
newdict["_v_is_nested"] = False
nestedFormats = []
nestedDType = []
if not hasattr(newdict, "_v_nestedlvl"):
newdict["_v_nestedlvl"] = nestedlvl + 1
cols_with_pos = [] # colum (position, name) pairs
cols_no_pos = [] # just column names
cols_offsets = [] # the offsets of the columns
valid_offsets = False # by default there a no valid offsets
# Check for special variables and convert column descriptions
for (name, descr) in classdict.items():
if name.startswith('_v_'):
if name in newdict:
# print("Warning!")
# special methods &c: copy to newdict, warn about conflicts
warnings.warn("Can't set attr %r in description class %r"
% (name, self))
else:
# print("Special variable!-->", name, classdict[name])
newdict[name] = descr
continue # This variable is not needed anymore
columns = None
if (type(descr) == type(IsDescription) and
issubclass(descr, IsDescription)):
# print("Nested object (type I)-->", name)
columns = descr().columns
elif (type(descr.__class__) == type(IsDescription) and
issubclass(descr.__class__, IsDescription)):
# print("Nested object (type II)-->", name)
columns = descr.columns
elif isinstance(descr, dict):
# print("Nested object (type III)-->", name)
columns = descr
else:
# print("Nested object (type IV)-->", name)
descr = copy.copy(descr)
# The copies above and below ensure that the structures
# provided by the user will remain unchanged even if we
# tamper with the values of ``_v_pos`` here.
if columns is not None:
descr = Description(copy.copy(columns), self._v_nestedlvl,
ptparams=ptparams)
classdict[name] = descr
pos = getattr(descr, '_v_pos', None)
if pos is None:
cols_no_pos.append(name)
else:
cols_with_pos.append((pos, name))
offset = getattr(descr, '_v_offset', None)
if offset is not None:
cols_offsets.append(offset)
# Sort field names:
#
# 1. Fields with explicit positions, according to their
# positions (and their names if coincident).
# 2. Fields with no position, in alphabetical order.
cols_with_pos.sort()
cols_no_pos.sort()
keys = [name for (pos, name) in cols_with_pos] + cols_no_pos
pos = 0
nested = False
# Get properties for compound types
for k in keys:
if validate:
# Check for key name validity
check_name_validity(k)
# Class variables
object = classdict[k]
newdict[k] = object # To allow natural naming
if not isinstance(object, (Col, Description)):
raise TypeError('Passing an incorrect value to a table column.'
' Expected a Col (or subclass) instance and '
'got: "%s". Please make use of the Col(), or '
'descendant, constructor to properly '
'initialize columns.' % object)
object._v_pos = pos # Set the position of this object
object._v_parent = self # The parent description
pos += 1
newdict['_v_colobjects'][k] = object
newdict['_v_names'].append(k)
object.__dict__['_v_name'] = k
if not isinstance(k, str):
# numpy only accepts "str" for field names
# Python 3.x: bytes --> str (unicode)
kk = k.decode()
else:
kk = k
if isinstance(object, Col):
dtype = object.dtype
newdict['_v_dtypes'][k] = dtype
newdict['_v_types'][k] = object.type
newdict['_v_dflts'][k] = object.dflt
nestedFormats.append(object.recarrtype)
baserecarrtype = dtype.base.str[1:]
nestedDType.append((kk, baserecarrtype, dtype.shape))
else: # A description
nestedFormats.append(object._v_nested_formats)
nestedDType.append((kk, object._v_dtype))
nested = True
# Useful for debugging purposes
# import traceback
# if ptparams is None:
# print("*** print_stack:")
# traceback.print_stack()
# Check whether we are gonna use padding or not. Two possibilities:
# 1) Make padding True by default (except if ALLOW_PADDING is set
# to False)
# 2) Make padding False by default (except if ALLOW_PADDING is set
# to True)
# Currently we choose 1) because it favours honoring padding even on
# unhandled situations (should be very few).
# However, for development, option 2) is recommended as it catches
# most of the unhandled situations.
allow_padding = ptparams is None or ptparams['ALLOW_PADDING']
# allow_padding = ptparams is not None and ptparams['ALLOW_PADDING']
if (allow_padding and
len(cols_offsets) > 1 and
len(keys) == len(cols_with_pos) and
len(keys) == len(cols_offsets) and
not nested): # TODO: support offsets with nested types
# We have to sort the offsets too, as they must follow the column
# order. As the offsets and the pos should be place in the same
# order, a single sort is enough here.
cols_offsets.sort()
valid_offsets = True
else:
newdict['_v_offsets'] = []
# Assign the format list to _v_nested_formats
newdict['_v_nested_formats'] = nestedFormats
if self._v_nestedlvl == 0:
# Get recursively nested _v_nested_names and _v_nested_descr attrs
self._g_set_nested_names_descr()
# Get pathnames for nested groups
self._g_set_path_names()
# Check the _v_byteorder has been used an issue an Error
if hasattr(self, "_v_byteorder"):
raise ValueError(
"Using a ``_v_byteorder`` in the description is obsolete. "
"Use the byteorder parameter in the constructor instead.")
# Compute the dtype with offsets or without
# print("offsets ->", cols_offsets, nestedDType, nested, valid_offsets)
if valid_offsets:
# TODO: support offsets within nested types
dtype_fields = {
'names': newdict['_v_names'], 'formats': nestedFormats,
'offsets': cols_offsets}
itemsize = newdict.get('_v_itemsize', None)
if itemsize is not None:
dtype_fields['itemsize'] = itemsize
dtype = np.dtype(dtype_fields)
else:
dtype = np.dtype(nestedDType)
newdict['_v_dtype'] = dtype
newdict['_v_itemsize'] = dtype.itemsize
newdict['_v_offsets'] = [dtype.fields[name][1] for name in dtype.names]
| (self, classdict, nestedlvl=-1, validate=True, ptparams=None) |
728,266 | tables.description | __repr__ | Gives a detailed Description column representation. | def __repr__(self):
"""Gives a detailed Description column representation."""
rep = ['%s\"%s\": %r' %
(" " * self._v_nestedlvl, k, self._v_colobjects[k])
for k in self._v_names]
return '{\n %s}' % (',\n '.join(rep))
| (self) |
728,267 | tables.description | __str__ | Gives a brief Description representation. | def __str__(self):
"""Gives a brief Description representation."""
return f'Description({self._v_nested_descr})'
| (self) |
728,268 | tables.description | _f_walk | Iterate over nested columns.
If type is 'All' (the default), all column description objects (Col and
Description instances) are yielded in top-to-bottom order (preorder).
If type is 'Col' or 'Description', only column descriptions of that
type are yielded.
| def _f_walk(self, type='All'):
"""Iterate over nested columns.
If type is 'All' (the default), all column description objects (Col and
Description instances) are yielded in top-to-bottom order (preorder).
If type is 'Col' or 'Description', only column descriptions of that
type are yielded.
"""
if type not in ["All", "Col", "Description"]:
raise ValueError("""\
type can only take the parameters 'All', 'Col' or 'Description'.""")
stack = [self]
while stack:
object = stack.pop(0) # pop at the front so as to ensure the order
if type in ["All", "Description"]:
yield object # yield description
for name in object._v_names:
new_object = object._v_colobjects[name]
if isinstance(new_object, Description):
stack.append(new_object)
else:
if type in ["All", "Col"]:
yield new_object # yield column
| (self, type='All') |
728,269 | tables.description | _g_set_nested_names_descr | Computes the nested names and descriptions for nested datatypes. | def _g_set_nested_names_descr(self):
"""Computes the nested names and descriptions for nested datatypes."""
names = self._v_names
fmts = self._v_nested_formats
self._v_nested_names = names[:] # Important to do a copy!
self._v_nested_descr = list(zip(names, fmts))
for i, name in enumerate(names):
new_object = self._v_colobjects[name]
if isinstance(new_object, Description):
new_object._g_set_nested_names_descr()
# replace the column nested name by a correct tuple
self._v_nested_names[i] = (name, new_object._v_nested_names)
self._v_nested_descr[i] = (name, new_object._v_nested_descr)
# set the _v_is_nested flag
self._v_is_nested = True
| (self) |
728,270 | tables.description | _g_set_path_names | Compute the pathnames for arbitrary nested descriptions.
This method sets the ``_v_pathname`` and ``_v_pathnames``
attributes of all the elements (both descriptions and columns)
in this nested description.
| def _g_set_path_names(self):
"""Compute the pathnames for arbitrary nested descriptions.
This method sets the ``_v_pathname`` and ``_v_pathnames``
attributes of all the elements (both descriptions and columns)
in this nested description.
"""
def get_cols_in_order(description):
return [description._v_colobjects[colname]
for colname in description._v_names]
def join_paths(path1, path2):
if not path1:
return path2
return f'{path1}/{path2}'
# The top of the stack always has a nested description
# and a list of its child columns
# (be they nested ``Description`` or non-nested ``Col`` objects).
# In the end, the list contains only a list of column paths
# under this one.
#
# For instance, given this top of the stack::
#
# (<Description X>, [<Column A>, <Column B>])
#
# After computing the rest of the stack, the top is::
#
# (<Description X>, ['a', 'a/m', 'a/n', ... , 'b', ...])
stack = []
# We start by pushing the top-level description
# and its child columns.
self._v_pathname = ''
stack.append((self, get_cols_in_order(self)))
while stack:
desc, cols = stack.pop()
head = cols[0]
# What's the first child in the list?
if isinstance(head, Description):
# A nested description. We remove it from the list and
# push it with its child columns. This will be the next
# handled description.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
stack.append((desc, cols[1:])) # alter the top
stack.append((head, get_cols_in_order(head))) # new top
elif isinstance(head, Col):
# A non-nested column. We simply remove it from the
# list and append its name to it.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
cols.append(head._v_name) # alter the top
stack.append((desc, cols[1:])) # alter the top
else:
# Since paths and names are appended *to the end* of
# children lists, a string signals that no more children
# remain to be processed, so we are done with the
# description at the top of the stack.
assert isinstance(head, str)
# Assign the computed set of descendent column paths.
desc._v_pathnames = cols
if len(stack) > 0:
# Compute the paths with respect to the parent node
# (including the path of the current description)
# and append them to its list.
descName = desc._v_name
colPaths = [join_paths(descName, path) for path in cols]
colPaths.insert(0, descName)
parentCols = stack[-1][1]
parentCols.extend(colPaths)
# (Nothing is pushed, we are done with this description.)
| (self) |
728,271 | tables.earray | EArray | This class represents extendable, homogeneous datasets in an HDF5 file.
The main difference between an EArray and a CArray (see
:ref:`CArrayClassDescr`), from which it inherits, is that the former
can be enlarged along one of its dimensions, the *enlargeable
dimension*. That means that the :attr:`Leaf.extdim` attribute (see
:class:`Leaf`) of any EArray instance will always be non-negative.
Multiple enlargeable dimensions might be supported in the future.
New rows can be added to the end of an enlargeable array by using the
:meth:`EArray.append` method.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape*
of the atomic objects to be saved.
shape
The shape of the new array. One (and only one) of
the shape dimensions *must* be 0. The dimension being 0
means that the resulting `EArray` object can be extended
along it. Multiple enlargeable dimensions are not supported
right now.
title
A description for this node (it sets the ``TITLE``
HDF5 attribute on disk).
filters
An instance of the `Filters` class that provides information
about the desired I/O filters to be applied during the life
of this object.
expectedrows
A user estimate about the number of row elements that will
be added to the growable dimension in the `EArray` node.
If not provided, the default value is ``EXPECTED_ROWS_EARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `EArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
chunkshape
The shape of the data chunk to be read or written in a single
HDF5 I/O operation. Filters are applied to those chunks of data.
The dimensionality of `chunkshape` must be the same as that of
`shape` (beware: no dimension should be 0 this time!).
If ``None``, a sensible value is calculated based on the
`expectedrows` parameter (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
Examples
--------
See below a small example of the use of the `EArray` class. The
code is available in ``examples/earray1.py``::
import numpy as np
import tables as tb
fileh = tb.open_file('earray1.h5', mode='w')
a = tb.StringAtom(itemsize=8)
# Use ``a`` as the object type for the enlargeable array.
array_c = fileh.create_earray(fileh.root, 'array_c', a, (0,),
"Chars")
array_c.append(np.array(['a'*2, 'b'*4], dtype='S8'))
array_c.append(np.array(['a'*6, 'b'*8, 'c'*10], dtype='S8'))
# Read the string ``EArray`` we have created on disk.
for s in array_c:
print('array_c[%s] => %r' % (array_c.nrow, s))
# Close the file.
fileh.close()
The output for the previous script is something like::
array_c[0] => 'aa'
array_c[1] => 'bbbb'
array_c[2] => 'aaaaaa'
array_c[3] => 'bbbbbbbb'
array_c[4] => 'cccccccc'
| class EArray(CArray):
"""This class represents extendable, homogeneous datasets in an HDF5 file.
The main difference between an EArray and a CArray (see
:ref:`CArrayClassDescr`), from which it inherits, is that the former
can be enlarged along one of its dimensions, the *enlargeable
dimension*. That means that the :attr:`Leaf.extdim` attribute (see
:class:`Leaf`) of any EArray instance will always be non-negative.
Multiple enlargeable dimensions might be supported in the future.
New rows can be added to the end of an enlargeable array by using the
:meth:`EArray.append` method.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape*
of the atomic objects to be saved.
shape
The shape of the new array. One (and only one) of
the shape dimensions *must* be 0. The dimension being 0
means that the resulting `EArray` object can be extended
along it. Multiple enlargeable dimensions are not supported
right now.
title
A description for this node (it sets the ``TITLE``
HDF5 attribute on disk).
filters
An instance of the `Filters` class that provides information
about the desired I/O filters to be applied during the life
of this object.
expectedrows
A user estimate about the number of row elements that will
be added to the growable dimension in the `EArray` node.
If not provided, the default value is ``EXPECTED_ROWS_EARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `EArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
chunkshape
The shape of the data chunk to be read or written in a single
HDF5 I/O operation. Filters are applied to those chunks of data.
The dimensionality of `chunkshape` must be the same as that of
`shape` (beware: no dimension should be 0 this time!).
If ``None``, a sensible value is calculated based on the
`expectedrows` parameter (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time, object
birth time); default True. Semantics of these times depend on their
implementation in the HDF5 library: refer to documentation of the
H5O_info_t data structure. As of HDF5 1.8.15, only ctime (metadata
change time) is implemented.
.. versionadded:: 3.4.3
Examples
--------
See below a small example of the use of the `EArray` class. The
code is available in ``examples/earray1.py``::
import numpy as np
import tables as tb
fileh = tb.open_file('earray1.h5', mode='w')
a = tb.StringAtom(itemsize=8)
# Use ``a`` as the object type for the enlargeable array.
array_c = fileh.create_earray(fileh.root, 'array_c', a, (0,),
\"Chars\")
array_c.append(np.array(['a'*2, 'b'*4], dtype='S8'))
array_c.append(np.array(['a'*6, 'b'*8, 'c'*10], dtype='S8'))
# Read the string ``EArray`` we have created on disk.
for s in array_c:
print('array_c[%s] => %r' % (array_c.nrow, s))
# Close the file.
fileh.close()
The output for the previous script is something like::
array_c[0] => 'aa'
array_c[1] => 'bbbb'
array_c[2] => 'aaaaaa'
array_c[3] => 'bbbbbbbb'
array_c[4] => 'cccccccc'
"""
# Class identifier.
_c_classid = 'EARRAY'
def __init__(self, parentnode, name,
atom=None, shape=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
_log=True, track_times=True):
# Specific of EArray
if expectedrows is None:
expectedrows = parentnode._v_file.params['EXPECTED_ROWS_EARRAY']
self._v_expectedrows = expectedrows
"""The expected number of rows to be stored in the array."""
# Call the parent (CArray) init code
super().__init__(parentnode, name, atom, shape, title, filters,
chunkshape, byteorder, _log, track_times)
def _g_create(self):
"""Create a new array in file (specific part)."""
# Pre-conditions and extdim computation
zerodims = np.sum(np.array(self.shape) == 0)
if zerodims > 0:
if zerodims == 1:
self.extdim = list(self.shape).index(0)
else:
raise NotImplementedError(
"Multiple enlargeable (0-)dimensions are not "
"supported.")
else:
raise ValueError(
"When creating EArrays, you need to set one of "
"the dimensions of the Atom instance to zero.")
# Finish the common part of the creation process
return self._g_create_common(self._v_expectedrows)
def _check_shape_append(self, nparr):
"""Test that nparr shape is consistent with underlying EArray."""
# Does the array conform to self expandibility?
myrank = len(self.shape)
narank = len(nparr.shape) - len(self.atom.shape)
if myrank != narank:
raise ValueError(("the ranks of the appended object (%d) and the "
"``%s`` EArray (%d) differ")
% (narank, self._v_pathname, myrank))
for i in range(myrank):
if i != self.extdim and self.shape[i] != nparr.shape[i]:
raise ValueError(("the shapes of the appended object and the "
"``%s`` EArray differ in non-enlargeable "
"dimension %d") % (self._v_pathname, i))
def append(self, sequence):
"""Add a sequence of data to the end of the dataset.
The sequence must have the same type as the array; otherwise a
TypeError is raised. In the same way, the dimensions of the
sequence must conform to the shape of the array, that is, all
dimensions must match, with the exception of the enlargeable
dimension, which can be of any length (even 0!). If the shape
of the sequence is invalid, a ValueError is raised.
"""
self._g_check_open()
self._v_file._check_writable()
# Convert the sequence into a NumPy object
nparr = convert_to_np_atom2(sequence, self.atom)
# Check if it has a consistent shape with underlying EArray
self._check_shape_append(nparr)
# If the size of the nparr is zero, don't do anything else
if nparr.size > 0:
self._append(nparr)
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
# Build the new EArray object
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = 0
# The number of final rows
nrows = len(range(start, stop, step))
# Build the new EArray object
object = EArray(
group, name, atom=self.atom, shape=shape, title=title,
filters=filters, expectedrows=nrows, chunkshape=chunkshape,
_log=_log)
# Now, fill the new earray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Start the copy itself
for start2 in range(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the extensible dimension
slices[maindim] = slice(start2, stop2, step)
object._append(self.__getitem__(tuple(slices)))
# Active the conversion again (default)
self._v_convert = True
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.itemsize
return (object, nbytes)
| (parentnode, name, atom=None, shape=None, title='', filters=None, expectedrows=None, chunkshape=None, byteorder=None, _log=True, track_times=True) |
728,274 | tables.earray | __init__ | null | def __init__(self, parentnode, name,
atom=None, shape=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
_log=True, track_times=True):
# Specific of EArray
if expectedrows is None:
expectedrows = parentnode._v_file.params['EXPECTED_ROWS_EARRAY']
self._v_expectedrows = expectedrows
"""The expected number of rows to be stored in the array."""
# Call the parent (CArray) init code
super().__init__(parentnode, name, atom, shape, title, filters,
chunkshape, byteorder, _log, track_times)
| (self, parentnode, name, atom=None, shape=None, title='', filters=None, expectedrows=None, chunkshape=None, byteorder=None, _log=True, track_times=True) |
728,284 | tables.earray | _check_shape_append | Test that nparr shape is consistent with underlying EArray. | def _check_shape_append(self, nparr):
"""Test that nparr shape is consistent with underlying EArray."""
# Does the array conform to self expandibility?
myrank = len(self.shape)
narank = len(nparr.shape) - len(self.atom.shape)
if myrank != narank:
raise ValueError(("the ranks of the appended object (%d) and the "
"``%s`` EArray (%d) differ")
% (narank, self._v_pathname, myrank))
for i in range(myrank):
if i != self.extdim and self.shape[i] != nparr.shape[i]:
raise ValueError(("the shapes of the appended object and the "
"``%s`` EArray differ in non-enlargeable "
"dimension %d") % (self._v_pathname, i))
| (self, nparr) |
728,301 | tables.earray | _g_copy_with_stats | Private part of Leaf.copy() for each kind of leaf. | def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
# Build the new EArray object
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = 0
# The number of final rows
nrows = len(range(start, stop, step))
# Build the new EArray object
object = EArray(
group, name, atom=self.atom, shape=shape, title=title,
filters=filters, expectedrows=nrows, chunkshape=chunkshape,
_log=_log)
# Now, fill the new earray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Start the copy itself
for start2 in range(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the extensible dimension
slices[maindim] = slice(start2, stop2, step)
object._append(self.__getitem__(tuple(slices)))
# Active the conversion again (default)
self._v_convert = True
nbytes = np.prod(self.shape, dtype=SizeType) * self.atom.itemsize
return (object, nbytes)
| (self, group, name, start, stop, step, title, filters, chunkshape, _log, **kwargs) |
728,302 | tables.earray | _g_create | Create a new array in file (specific part). | def _g_create(self):
"""Create a new array in file (specific part)."""
# Pre-conditions and extdim computation
zerodims = np.sum(np.array(self.shape) == 0)
if zerodims > 0:
if zerodims == 1:
self.extdim = list(self.shape).index(0)
else:
raise NotImplementedError(
"Multiple enlargeable (0-)dimensions are not "
"supported.")
else:
raise ValueError(
"When creating EArrays, you need to set one of "
"the dimensions of the Atom instance to zero.")
# Finish the common part of the creation process
return self._g_create_common(self._v_expectedrows)
| (self) |
728,333 | tables.earray | append | Add a sequence of data to the end of the dataset.
The sequence must have the same type as the array; otherwise a
TypeError is raised. In the same way, the dimensions of the
sequence must conform to the shape of the array, that is, all
dimensions must match, with the exception of the enlargeable
dimension, which can be of any length (even 0!). If the shape
of the sequence is invalid, a ValueError is raised.
| def append(self, sequence):
"""Add a sequence of data to the end of the dataset.
The sequence must have the same type as the array; otherwise a
TypeError is raised. In the same way, the dimensions of the
sequence must conform to the shape of the array, that is, all
dimensions must match, with the exception of the enlargeable
dimension, which can be of any length (even 0!). If the shape
of the sequence is invalid, a ValueError is raised.
"""
self._g_check_open()
self._v_file._check_writable()
# Convert the sequence into a NumPy object
nparr = convert_to_np_atom2(sequence, self.atom)
# Check if it has a consistent shape with underlying EArray
self._check_shape_append(nparr)
# If the size of the nparr is zero, don't do anything else
if nparr.size > 0:
self._append(nparr)
| (self, sequence) |
728,348 | tables.misc.enum | Enum | Enumerated type.
Each instance of this class represents an enumerated type. The
values of the type must be declared
*exhaustively* and named with
*strings*, and they might be given explicit
concrete values, though this is not compulsory. Once the type is
defined, it can not be modified.
There are three ways of defining an enumerated type. Each one
of them corresponds to the type of the only argument in the
constructor of Enum:
- *Sequence of names*: each enumerated
value is named using a string, and its order is determined by
its position in the sequence; the concrete value is assigned
automatically::
>>> boolEnum = Enum(['True', 'False'])
- *Mapping of names*: each enumerated
value is named by a string and given an explicit concrete value.
All of the concrete values must be different, or a
ValueError will be raised::
>>> priority = Enum({'red': 20, 'orange': 10, 'green': 0})
>>> colors = Enum({'red': 1, 'blue': 1})
Traceback (most recent call last):
...
ValueError: enumerated values contain duplicate concrete values: 1
- *Enumerated type*: in that case, a copy
of the original enumerated type is created. Both enumerated
types are considered equal::
>>> prio2 = Enum(priority)
>>> priority == prio2
True
Please note that names starting with _ are
not allowed, since they are reserved for internal usage::
>>> prio2 = Enum(['_xx'])
Traceback (most recent call last):
...
ValueError: name of enumerated value can not start with ``_``: '_xx'
The concrete value of an enumerated value is obtained by
getting its name as an attribute of the Enum
instance (see __getattr__()) or as an item (see
__getitem__()). This allows comparisons between
enumerated values and assigning them to ordinary Python
variables::
>>> redv = priority.red
>>> redv == priority['red']
True
>>> redv > priority.green
True
>>> priority.red == priority.orange
False
The name of the enumerated value corresponding to a concrete
value can also be obtained by using the
__call__() method of the enumerated type. In this
way you get the symbolic name to use it later with
__getitem__()::
>>> priority(redv)
'red'
>>> priority.red == priority[priority(priority.red)]
True
(If you ask, the __getitem__() method is
not used for this purpose to avoid ambiguity in the case of using
strings as concrete values.)
| class Enum:
"""Enumerated type.
Each instance of this class represents an enumerated type. The
values of the type must be declared
*exhaustively* and named with
*strings*, and they might be given explicit
concrete values, though this is not compulsory. Once the type is
defined, it can not be modified.
There are three ways of defining an enumerated type. Each one
of them corresponds to the type of the only argument in the
constructor of Enum:
- *Sequence of names*: each enumerated
value is named using a string, and its order is determined by
its position in the sequence; the concrete value is assigned
automatically::
>>> boolEnum = Enum(['True', 'False'])
- *Mapping of names*: each enumerated
value is named by a string and given an explicit concrete value.
All of the concrete values must be different, or a
ValueError will be raised::
>>> priority = Enum({'red': 20, 'orange': 10, 'green': 0})
>>> colors = Enum({'red': 1, 'blue': 1})
Traceback (most recent call last):
...
ValueError: enumerated values contain duplicate concrete values: 1
- *Enumerated type*: in that case, a copy
of the original enumerated type is created. Both enumerated
types are considered equal::
>>> prio2 = Enum(priority)
>>> priority == prio2
True
Please note that names starting with _ are
not allowed, since they are reserved for internal usage::
>>> prio2 = Enum(['_xx'])
Traceback (most recent call last):
...
ValueError: name of enumerated value can not start with ``_``: '_xx'
The concrete value of an enumerated value is obtained by
getting its name as an attribute of the Enum
instance (see __getattr__()) or as an item (see
__getitem__()). This allows comparisons between
enumerated values and assigning them to ordinary Python
variables::
>>> redv = priority.red
>>> redv == priority['red']
True
>>> redv > priority.green
True
>>> priority.red == priority.orange
False
The name of the enumerated value corresponding to a concrete
value can also be obtained by using the
__call__() method of the enumerated type. In this
way you get the symbolic name to use it later with
__getitem__()::
>>> priority(redv)
'red'
>>> priority.red == priority[priority(priority.red)]
True
(If you ask, the __getitem__() method is
not used for this purpose to avoid ambiguity in the case of using
strings as concrete values.)
"""
def __init__(self, enum):
mydict = self.__dict__
mydict['_names'] = {}
mydict['_values'] = {}
if isinstance(enum, list) or isinstance(enum, tuple):
for (value, name) in enumerate(enum): # values become 0, 1, 2...
self._check_and_set_pair(name, value)
elif isinstance(enum, dict):
for (name, value) in enum.items():
self._check_and_set_pair(name, value)
elif isinstance(enum, Enum):
for (name, value) in enum._names.items():
self._check_and_set_pair(name, value)
else:
raise TypeError("""\
enumerations can only be created from \
sequences, mappings and other enumerations""")
def _check_and_set_pair(self, name, value):
"""Check validity of enumerated value and insert it into type."""
names = self._names
values = self._values
if not isinstance(name, str):
raise TypeError(
f"name of enumerated value is not a string: {name!r}")
if name.startswith('_'):
raise ValueError(
"name of enumerated value can not start with ``_``: %r"
% name)
# This check is only necessary with a sequence base object.
if name in names:
raise ValueError(
"enumerated values contain duplicate names: %r" % name)
# This check is only necessary with a mapping base object.
if value in values:
raise ValueError(
"enumerated values contain duplicate concrete values: %r"
% value)
names[name] = value
values[value] = name
self.__dict__[name] = value
def __getitem__(self, name):
"""Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, a KeyError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum['T1']
2
>>> enum['foo']
Traceback (most recent call last):
...
KeyError: "no enumerated value with that name: 'foo'"
"""
try:
return self._names[name]
except KeyError:
raise KeyError(f"no enumerated value with that name: {name!r}")
def __setitem__(self, name, value):
"""This operation is forbidden."""
raise IndexError("operation not allowed")
def __delitem__(self, name):
"""This operation is forbidden."""
raise IndexError("operation not allowed")
def __getattr__(self, name):
"""Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, an AttributeError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum.T1
2
>>> enum.foo
Traceback (most recent call last):
...
AttributeError: no enumerated value with that name: 'foo'
"""
try:
return self[name]
except KeyError as ke:
raise AttributeError(*ke.args)
def __setattr__(self, name, value):
"""This operation is forbidden."""
raise AttributeError("operation not allowed")
def __delattr__(self, name):
"""This operation is forbidden."""
raise AttributeError("operation not allowed")
def __contains__(self, name):
"""Is there an enumerated value with that name in the type?
If the enumerated type has an enumerated value with that name, True is
returned. Otherwise, False is returned. The name must be a string.
This method does *not* check for concrete values matching a value in an
enumerated type. For that, please use the :meth:`Enum.__call__` method.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> 'T1' in enum
True
>>> 'foo' in enum
False
>>> 0 in enum
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 0
>>> enum.T1 in enum # Be careful with this!
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 2
"""
if not isinstance(name, str):
raise TypeError(
f"name of enumerated value is not a string: {name!r}")
return name in self._names
def __call__(self, value, *default):
"""Get the name of the enumerated value with that concrete value.
If there is no value with that concrete value in the enumeration and a
second argument is given as a default, this is returned. Else, a
ValueError is raised.
This method can be used for checking that a concrete value belongs to
the set of concrete values in an enumerated type.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum(5)
'T2'
>>> enum(42, None) is None
True
>>> enum(42)
Traceback (most recent call last):
...
ValueError: no enumerated value with that concrete value: 42
"""
try:
return self._values[value]
except KeyError:
if len(default) > 0:
return default[0]
raise ValueError(
f"no enumerated value with that concrete value: {value!r}")
def __len__(self):
"""Return the number of enumerated values in the enumerated type.
Examples
--------
>>> len(Enum(['e%d' % i for i in range(10)]))
10
"""
return len(self._names)
def __iter__(self):
"""Iterate over the enumerated values.
Enumerated values are returned as (name, value) pairs *in no particular
order*.
Examples
--------
>>> enumvals = {'red': 4, 'green': 2, 'blue': 1}
>>> enum = Enum(enumvals)
>>> enumdict = dict([(name, value) for (name, value) in enum])
>>> enumvals == enumdict
True
"""
yield from self._names.items()
def __eq__(self, other):
"""Is the other enumerated type equivalent to this one?
Two enumerated types are equivalent if they have exactly the same
enumerated values (i.e. with the same names and concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 == enum1
True
>>> enum1 == enum2 == enum3
True
>>> enum1 == enum4
False
>>> enum5 == enum1
False
>>> enum1 == enum6
False
Comparing enumerated types with other kinds of objects produces
a false result:
>>> enum1 == {'T0': 0, 'T1': 2}
False
>>> enum1 == ['T0', 'T1']
False
>>> enum1 == 2
False
"""
if not isinstance(other, Enum):
return False
return self._names == other._names
def __ne__(self, other):
"""Is the `other` enumerated type different from this one?
Two enumerated types are different if they don't have exactly
the same enumerated values (i.e. with the same names and
concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 != enum1
False
>>> enum1 != enum2 != enum3
False
>>> enum1 != enum4
True
>>> enum5 != enum1
True
>>> enum1 != enum6
True
"""
return not self.__eq__(other)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, tuple(self._names.items())))
def __repr__(self):
"""Return the canonical string representation of the enumeration. The
output of this method can be evaluated to give a new enumeration object
that will compare equal to this one.
Examples
--------
>>> repr(Enum({'name': 10}))
"Enum({'name': 10})"
"""
return 'Enum(%s)' % self._names
| (enum) |
728,349 | tables.misc.enum | __call__ | Get the name of the enumerated value with that concrete value.
If there is no value with that concrete value in the enumeration and a
second argument is given as a default, this is returned. Else, a
ValueError is raised.
This method can be used for checking that a concrete value belongs to
the set of concrete values in an enumerated type.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum(5)
'T2'
>>> enum(42, None) is None
True
>>> enum(42)
Traceback (most recent call last):
...
ValueError: no enumerated value with that concrete value: 42
| def __call__(self, value, *default):
"""Get the name of the enumerated value with that concrete value.
If there is no value with that concrete value in the enumeration and a
second argument is given as a default, this is returned. Else, a
ValueError is raised.
This method can be used for checking that a concrete value belongs to
the set of concrete values in an enumerated type.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum(5)
'T2'
>>> enum(42, None) is None
True
>>> enum(42)
Traceback (most recent call last):
...
ValueError: no enumerated value with that concrete value: 42
"""
try:
return self._values[value]
except KeyError:
if len(default) > 0:
return default[0]
raise ValueError(
f"no enumerated value with that concrete value: {value!r}")
| (self, value, *default) |
728,350 | tables.misc.enum | __contains__ | Is there an enumerated value with that name in the type?
If the enumerated type has an enumerated value with that name, True is
returned. Otherwise, False is returned. The name must be a string.
This method does *not* check for concrete values matching a value in an
enumerated type. For that, please use the :meth:`Enum.__call__` method.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> 'T1' in enum
True
>>> 'foo' in enum
False
>>> 0 in enum
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 0
>>> enum.T1 in enum # Be careful with this!
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 2
| def __contains__(self, name):
"""Is there an enumerated value with that name in the type?
If the enumerated type has an enumerated value with that name, True is
returned. Otherwise, False is returned. The name must be a string.
This method does *not* check for concrete values matching a value in an
enumerated type. For that, please use the :meth:`Enum.__call__` method.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> 'T1' in enum
True
>>> 'foo' in enum
False
>>> 0 in enum
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 0
>>> enum.T1 in enum # Be careful with this!
Traceback (most recent call last):
...
TypeError: name of enumerated value is not a string: 2
"""
if not isinstance(name, str):
raise TypeError(
f"name of enumerated value is not a string: {name!r}")
return name in self._names
| (self, name) |
728,351 | tables.misc.enum | __delattr__ | This operation is forbidden. | def __delattr__(self, name):
"""This operation is forbidden."""
raise AttributeError("operation not allowed")
| (self, name) |
728,352 | tables.misc.enum | __delitem__ | This operation is forbidden. | def __delitem__(self, name):
"""This operation is forbidden."""
raise IndexError("operation not allowed")
| (self, name) |
728,353 | tables.misc.enum | __eq__ | Is the other enumerated type equivalent to this one?
Two enumerated types are equivalent if they have exactly the same
enumerated values (i.e. with the same names and concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 == enum1
True
>>> enum1 == enum2 == enum3
True
>>> enum1 == enum4
False
>>> enum5 == enum1
False
>>> enum1 == enum6
False
Comparing enumerated types with other kinds of objects produces
a false result:
>>> enum1 == {'T0': 0, 'T1': 2}
False
>>> enum1 == ['T0', 'T1']
False
>>> enum1 == 2
False
| def __eq__(self, other):
"""Is the other enumerated type equivalent to this one?
Two enumerated types are equivalent if they have exactly the same
enumerated values (i.e. with the same names and concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 == enum1
True
>>> enum1 == enum2 == enum3
True
>>> enum1 == enum4
False
>>> enum5 == enum1
False
>>> enum1 == enum6
False
Comparing enumerated types with other kinds of objects produces
a false result:
>>> enum1 == {'T0': 0, 'T1': 2}
False
>>> enum1 == ['T0', 'T1']
False
>>> enum1 == 2
False
"""
if not isinstance(other, Enum):
return False
return self._names == other._names
| (self, other) |
728,354 | tables.misc.enum | __getattr__ | Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, an AttributeError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum.T1
2
>>> enum.foo
Traceback (most recent call last):
...
AttributeError: no enumerated value with that name: 'foo'
| def __getattr__(self, name):
"""Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, an AttributeError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum.T1
2
>>> enum.foo
Traceback (most recent call last):
...
AttributeError: no enumerated value with that name: 'foo'
"""
try:
return self[name]
except KeyError as ke:
raise AttributeError(*ke.args)
| (self, name) |
728,355 | tables.misc.enum | __getitem__ | Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, a KeyError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum['T1']
2
>>> enum['foo']
Traceback (most recent call last):
...
KeyError: "no enumerated value with that name: 'foo'"
| def __getitem__(self, name):
"""Get the concrete value of the enumerated value with that name.
The name of the enumerated value must be a string. If there is no value
with that name in the enumeration, a KeyError is raised.
Examples
--------
Let ``enum`` be an enumerated type defined as:
>>> enum = Enum({'T0': 0, 'T1': 2, 'T2': 5})
then:
>>> enum['T1']
2
>>> enum['foo']
Traceback (most recent call last):
...
KeyError: "no enumerated value with that name: 'foo'"
"""
try:
return self._names[name]
except KeyError:
raise KeyError(f"no enumerated value with that name: {name!r}")
| (self, name) |
728,356 | tables.misc.enum | __init__ | null | def __init__(self, enum):
mydict = self.__dict__
mydict['_names'] = {}
mydict['_values'] = {}
if isinstance(enum, list) or isinstance(enum, tuple):
for (value, name) in enumerate(enum): # values become 0, 1, 2...
self._check_and_set_pair(name, value)
elif isinstance(enum, dict):
for (name, value) in enum.items():
self._check_and_set_pair(name, value)
elif isinstance(enum, Enum):
for (name, value) in enum._names.items():
self._check_and_set_pair(name, value)
else:
raise TypeError("""\
enumerations can only be created from \
sequences, mappings and other enumerations""")
| (self, enum) |
728,357 | tables.misc.enum | __iter__ | Iterate over the enumerated values.
Enumerated values are returned as (name, value) pairs *in no particular
order*.
Examples
--------
>>> enumvals = {'red': 4, 'green': 2, 'blue': 1}
>>> enum = Enum(enumvals)
>>> enumdict = dict([(name, value) for (name, value) in enum])
>>> enumvals == enumdict
True
| def __iter__(self):
"""Iterate over the enumerated values.
Enumerated values are returned as (name, value) pairs *in no particular
order*.
Examples
--------
>>> enumvals = {'red': 4, 'green': 2, 'blue': 1}
>>> enum = Enum(enumvals)
>>> enumdict = dict([(name, value) for (name, value) in enum])
>>> enumvals == enumdict
True
"""
yield from self._names.items()
| (self) |
728,358 | tables.misc.enum | __len__ | Return the number of enumerated values in the enumerated type.
Examples
--------
>>> len(Enum(['e%d' % i for i in range(10)]))
10
| def __len__(self):
"""Return the number of enumerated values in the enumerated type.
Examples
--------
>>> len(Enum(['e%d' % i for i in range(10)]))
10
"""
return len(self._names)
| (self) |
728,359 | tables.misc.enum | __ne__ | Is the `other` enumerated type different from this one?
Two enumerated types are different if they don't have exactly
the same enumerated values (i.e. with the same names and
concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 != enum1
False
>>> enum1 != enum2 != enum3
False
>>> enum1 != enum4
True
>>> enum5 != enum1
True
>>> enum1 != enum6
True
| def __ne__(self, other):
"""Is the `other` enumerated type different from this one?
Two enumerated types are different if they don't have exactly
the same enumerated values (i.e. with the same names and
concrete values).
Examples
--------
Let ``enum*`` be enumerated types defined as:
>>> enum1 = Enum({'T0': 0, 'T1': 2})
>>> enum2 = Enum(enum1)
>>> enum3 = Enum({'T1': 2, 'T0': 0})
>>> enum4 = Enum({'T0': 0, 'T1': 2, 'T2': 5})
>>> enum5 = Enum({'T0': 0})
>>> enum6 = Enum({'T0': 10, 'T1': 20})
then:
>>> enum1 != enum1
False
>>> enum1 != enum2 != enum3
False
>>> enum1 != enum4
True
>>> enum5 != enum1
True
>>> enum1 != enum6
True
"""
return not self.__eq__(other)
| (self, other) |
728,360 | tables.misc.enum | __repr__ | Return the canonical string representation of the enumeration. The
output of this method can be evaluated to give a new enumeration object
that will compare equal to this one.
Examples
--------
>>> repr(Enum({'name': 10}))
"Enum({'name': 10})"
| def __repr__(self):
"""Return the canonical string representation of the enumeration. The
output of this method can be evaluated to give a new enumeration object
that will compare equal to this one.
Examples
--------
>>> repr(Enum({'name': 10}))
"Enum({'name': 10})"
"""
return 'Enum(%s)' % self._names
| (self) |
728,361 | tables.misc.enum | __setattr__ | This operation is forbidden. | def __setattr__(self, name, value):
"""This operation is forbidden."""
raise AttributeError("operation not allowed")
| (self, name, value) |
728,362 | tables.misc.enum | __setitem__ | This operation is forbidden. | def __setitem__(self, name, value):
"""This operation is forbidden."""
raise IndexError("operation not allowed")
| (self, name, value) |
728,363 | tables.misc.enum | _check_and_set_pair | Check validity of enumerated value and insert it into type. | def _check_and_set_pair(self, name, value):
"""Check validity of enumerated value and insert it into type."""
names = self._names
values = self._values
if not isinstance(name, str):
raise TypeError(
f"name of enumerated value is not a string: {name!r}")
if name.startswith('_'):
raise ValueError(
"name of enumerated value can not start with ``_``: %r"
% name)
# This check is only necessary with a sequence base object.
if name in names:
raise ValueError(
"enumerated values contain duplicate names: %r" % name)
# This check is only necessary with a mapping base object.
if value in values:
raise ValueError(
"enumerated values contain duplicate concrete values: %r"
% value)
names[name] = value
values[value] = name
self.__dict__[name] = value
| (self, name, value) |
728,364 | tables.atom | EnumAtom | Description of an atom of an enumerated type.
Instances of this class describe the atom type used to store enumerated
values. Those values belong to an enumerated type, defined by the first
argument (enum) in the constructor of the atom, which accepts the same
kinds of arguments as the Enum class (see :ref:`EnumClassDescr`). The
enumerated type is stored in the enum attribute of the atom.
A default value must be specified as the second argument (dflt) in the
constructor; it must be the *name* (a string) of one of the enumerated
values in the enumerated type. When the atom is created, the corresponding
concrete value is broadcast and stored in the dflt attribute (setting
different default values for items in a multidimensional atom is not
supported yet). If the name does not match any value in the enumerated
type, a KeyError is raised.
Another atom must be specified as the base argument in order to determine
the base type used for storing the values of enumerated values in memory
and disk. This *storage atom* is kept in the base attribute of the created
atom. As a shorthand, you may specify a PyTables type instead of the
storage atom, implying that this has a scalar shape.
The storage atom should be able to represent each and every concrete value
in the enumeration. If it is not, a TypeError is raised. The default value
of the storage atom is ignored.
The type attribute of enumerated atoms is always enum.
Enumerated atoms also support comparisons with other objects::
>>> enum = ['T0', 'T1', 'T2']
>>> atom1 = EnumAtom(enum, 'T0', 'int8') # same as ``atom2``
>>> atom2 = EnumAtom(enum, 'T0', Int8Atom()) # same as ``atom1``
>>> atom3 = EnumAtom(enum, 'T0', 'int16')
>>> atom4 = Int8Atom()
>>> atom1 == enum
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom1 == atom4
False
>>> atom4 != atom1
True
Examples
--------
The next C enum construction::
enum myEnum {
T0,
T1,
T2
};
would correspond to the following PyTables
declaration::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', 'int32')
Please note the dflt argument with a value of 'T0'. Since the concrete
value matching T0 is unknown right now (we have not used explicit concrete
values), using the name is the only option left for defining a default
value for the atom.
The chosen representation of values for this enumerated atom uses unsigned
32-bit integers, which surely wastes quite a lot of memory. Another size
could be selected by using the base argument (this time with a full-blown
storage atom)::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', UInt8Atom())
You can also define multidimensional arrays for data elements::
>>> my_enum_atom = EnumAtom(
... ['T0', 'T1', 'T2'], 'T0', base='uint32', shape=(3,2))
for 3x2 arrays of uint32.
| class EnumAtom(Atom):
"""Description of an atom of an enumerated type.
Instances of this class describe the atom type used to store enumerated
values. Those values belong to an enumerated type, defined by the first
argument (enum) in the constructor of the atom, which accepts the same
kinds of arguments as the Enum class (see :ref:`EnumClassDescr`). The
enumerated type is stored in the enum attribute of the atom.
A default value must be specified as the second argument (dflt) in the
constructor; it must be the *name* (a string) of one of the enumerated
values in the enumerated type. When the atom is created, the corresponding
concrete value is broadcast and stored in the dflt attribute (setting
different default values for items in a multidimensional atom is not
supported yet). If the name does not match any value in the enumerated
type, a KeyError is raised.
Another atom must be specified as the base argument in order to determine
the base type used for storing the values of enumerated values in memory
and disk. This *storage atom* is kept in the base attribute of the created
atom. As a shorthand, you may specify a PyTables type instead of the
storage atom, implying that this has a scalar shape.
The storage atom should be able to represent each and every concrete value
in the enumeration. If it is not, a TypeError is raised. The default value
of the storage atom is ignored.
The type attribute of enumerated atoms is always enum.
Enumerated atoms also support comparisons with other objects::
>>> enum = ['T0', 'T1', 'T2']
>>> atom1 = EnumAtom(enum, 'T0', 'int8') # same as ``atom2``
>>> atom2 = EnumAtom(enum, 'T0', Int8Atom()) # same as ``atom1``
>>> atom3 = EnumAtom(enum, 'T0', 'int16')
>>> atom4 = Int8Atom()
>>> atom1 == enum
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom1 == atom4
False
>>> atom4 != atom1
True
Examples
--------
The next C enum construction::
enum myEnum {
T0,
T1,
T2
};
would correspond to the following PyTables
declaration::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', 'int32')
Please note the dflt argument with a value of 'T0'. Since the concrete
value matching T0 is unknown right now (we have not used explicit concrete
values), using the name is the only option left for defining a default
value for the atom.
The chosen representation of values for this enumerated atom uses unsigned
32-bit integers, which surely wastes quite a lot of memory. Another size
could be selected by using the base argument (this time with a full-blown
storage atom)::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', UInt8Atom())
You can also define multidimensional arrays for data elements::
>>> my_enum_atom = EnumAtom(
... ['T0', 'T1', 'T2'], 'T0', base='uint32', shape=(3,2))
for 3x2 arrays of uint32.
"""
# Registering this class in the class map may be a little wrong,
# since the ``Atom.from_kind()`` method fails miserably with
# enumerations, as they don't support an ``itemsize`` argument.
# However, resetting ``__metaclass__`` to ``type`` doesn't seem to
# work and I don't feel like creating a subclass of ``MetaAtom``.
kind = 'enum'
type = 'enum'
@property
def itemsize(self):
"""Size in bytes of a single item in the atom."""
return self.dtype.base.itemsize
def _checkbase(self, base):
"""Check the `base` storage atom."""
if base.kind == 'enum':
raise TypeError("can not use an enumerated atom "
"as a storage atom: %r" % base)
# Check whether the storage atom can represent concrete values
# in the enumeration...
basedtype = base.dtype
pyvalues = [value for (name, value) in self.enum]
try:
npgenvalues = np.array(pyvalues)
except ValueError:
raise TypeError("concrete values are not uniformly-shaped")
try:
npvalues = np.array(npgenvalues, dtype=basedtype.base)
except ValueError:
raise TypeError("storage atom type is incompatible with "
"concrete values in the enumeration")
if npvalues.shape[1:] != basedtype.shape:
raise TypeError("storage atom shape does not match that of "
"concrete values in the enumeration")
if npvalues.tolist() != npgenvalues.tolist():
raise TypeError("storage atom type lacks precision for "
"concrete values in the enumeration")
# ...with some implementation limitations.
if npvalues.dtype.kind not in ['i', 'u']:
raise NotImplementedError("only integer concrete values "
"are supported for the moment, sorry")
if len(npvalues.shape) > 1:
raise NotImplementedError("only scalar concrete values "
"are supported for the moment, sorry")
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
return dict(enum=self.enum, dflt=self._defname,
base=self.base, shape=self.shape)
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return False
def _is_equal_to_enumatom(self, enumatom):
"""Is this object equal to the given `enumatom`?"""
return (self.enum == enumatom.enum and self.shape == enumatom.shape
and np.all(self.dflt == enumatom.dflt)
and self.base == enumatom.base)
def __init__(self, enum, dflt, base, shape=()):
if not isinstance(enum, Enum):
enum = Enum(enum)
self.enum = enum
if isinstance(base, str):
base = Atom.from_type(base)
self._checkbase(base)
self.base = base
default = enum[dflt] # check default value
self._defname = dflt # kept for representation purposes
# These are kept to ease dumping this particular
# representation of the enumeration to storage.
names, values = [], []
for (name, value) in enum:
names.append(name)
values.append(value)
basedtype = self.base.dtype
self._names = names
self._values = np.array(values, dtype=basedtype.base)
Atom.__init__(self, basedtype, shape, default)
def __repr__(self):
return ('EnumAtom(enum=%r, dflt=%r, base=%r, shape=%r)'
% (self.enum, self._defname, self.base, self.shape))
__eq__ = _cmp_dispatcher('_is_equal_to_enumatom')
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.enum, self.shape, self.dflt,
# self.base))
| (enum, dflt, base, shape=()) |
728,366 | tables.atom | __init__ | null | def __init__(self, enum, dflt, base, shape=()):
if not isinstance(enum, Enum):
enum = Enum(enum)
self.enum = enum
if isinstance(base, str):
base = Atom.from_type(base)
self._checkbase(base)
self.base = base
default = enum[dflt] # check default value
self._defname = dflt # kept for representation purposes
# These are kept to ease dumping this particular
# representation of the enumeration to storage.
names, values = [], []
for (name, value) in enum:
names.append(name)
values.append(value)
basedtype = self.base.dtype
self._names = names
self._values = np.array(values, dtype=basedtype.base)
Atom.__init__(self, basedtype, shape, default)
| (self, enum, dflt, base, shape=()) |
728,368 | tables.atom | __repr__ | null | def __repr__(self):
return ('EnumAtom(enum=%r, dflt=%r, base=%r, shape=%r)'
% (self.enum, self._defname, self.base, self.shape))
| (self) |
728,369 | tables.atom | _checkbase | Check the `base` storage atom. | def _checkbase(self, base):
"""Check the `base` storage atom."""
if base.kind == 'enum':
raise TypeError("can not use an enumerated atom "
"as a storage atom: %r" % base)
# Check whether the storage atom can represent concrete values
# in the enumeration...
basedtype = base.dtype
pyvalues = [value for (name, value) in self.enum]
try:
npgenvalues = np.array(pyvalues)
except ValueError:
raise TypeError("concrete values are not uniformly-shaped")
try:
npvalues = np.array(npgenvalues, dtype=basedtype.base)
except ValueError:
raise TypeError("storage atom type is incompatible with "
"concrete values in the enumeration")
if npvalues.shape[1:] != basedtype.shape:
raise TypeError("storage atom shape does not match that of "
"concrete values in the enumeration")
if npvalues.tolist() != npgenvalues.tolist():
raise TypeError("storage atom type lacks precision for "
"concrete values in the enumeration")
# ...with some implementation limitations.
if npvalues.dtype.kind not in ['i', 'u']:
raise NotImplementedError("only integer concrete values "
"are supported for the moment, sorry")
if len(npvalues.shape) > 1:
raise NotImplementedError("only scalar concrete values "
"are supported for the moment, sorry")
| (self, base) |
728,370 | tables.atom | _get_init_args | Get a dictionary of instance constructor arguments. | def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
return dict(enum=self.enum, dflt=self._defname,
base=self.base, shape=self.shape)
| (self) |
728,371 | tables.atom | _is_equal_to_atom | Is this object equal to the given `atom`? | def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return False
| (self, atom) |
728,372 | tables.atom | _is_equal_to_enumatom | Is this object equal to the given `enumatom`? | def _is_equal_to_enumatom(self, enumatom):
"""Is this object equal to the given `enumatom`?"""
return (self.enum == enumatom.enum and self.shape == enumatom.shape
and np.all(self.dflt == enumatom.dflt)
and self.base == enumatom.base)
| (self, enumatom) |
728,374 | tables.description | EnumCol | Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute and an ``attrs`` argument for storing additional metadata
similar to `table.attrs`, which is assigned to the `_v_col_attrs`
attribute.
| from tables.description import EnumCol
| (*args, **kwargs) |
728,382 | tables.description | _is_equal_to_enumatom | Is this object equal to the given `enumatom`? | def same_position(oldmethod):
"""Decorate `oldmethod` to also compare the `_v_pos` attribute."""
def newmethod(self, other):
try:
other._v_pos
except AttributeError:
return False # not a column definition
return self._v_pos == other._v_pos and oldmethod(self, other)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
| (self, other) |
728,384 | tables.exceptions | ExperimentalFeatureWarning | Generic warning for experimental features.
This warning is issued when using a functionality that is still
experimental and that users have to use with care.
| class ExperimentalFeatureWarning(Warning):
"""Generic warning for experimental features.
This warning is issued when using a functionality that is still
experimental and that users have to use with care.
"""
pass
| null |
728,385 | tables.expression | Expr | A class for evaluating expressions with arbitrary array-like objects.
Expr is a class for evaluating expressions containing array-like objects.
With it, you can evaluate expressions (like "3 * a + 4 * b") that
operate on arbitrary large arrays while optimizing the resources
required to perform them (basically main memory and CPU cache memory).
It is similar to the Numexpr package (see :ref:`[NUMEXPR] <NUMEXPR>`),
but in addition to NumPy objects, it also accepts disk-based homogeneous
arrays, like the Array, CArray, EArray and Column PyTables objects.
.. warning::
Expr class only offers a subset of the Numexpr features due to the
complexity of implement some of them when dealing with huge amount of
data.
All the internal computations are performed via the Numexpr package,
so all the broadcast and upcasting rules of Numexpr applies here too.
These rules are very similar to the NumPy ones, but with some exceptions
due to the particularities of having to deal with potentially very large
disk-based arrays. Be sure to read the documentation of the Expr
constructor and methods as well as that of Numexpr, if you want to fully
grasp these particularities.
Parameters
----------
expr : str
This specifies the expression to be evaluated, such as "2 * a + 3 * b".
uservars : dict
This can be used to define the variable names appearing in *expr*.
This mapping should consist of identifier-like strings pointing to any
`Array`, `CArray`, `EArray`, `Column` or NumPy ndarray instances (or
even others which will tried to be converted to ndarrays). When
`uservars` is not provided or `None`, the current local and global
namespace is sought instead of `uservars`. It is also possible to pass
just some of the variables in expression via the `uservars` mapping,
and the rest will be retrieved from the current local and global
namespaces.
kwargs : dict
This is meant to pass additional parameters to the Numexpr kernel.
This is basically the same as the kwargs argument in
Numexpr.evaluate(), and is mainly meant for advanced use.
Examples
--------
The following shows an example of using Expr::
>>> f = tb.open_file('/tmp/test_expr.h5', 'w')
>>> a = f.create_array('/', 'a', np.array([1,2,3]))
>>> b = f.create_array('/', 'b', np.array([3,4,5]))
>>> c = np.array([4,5,6])
>>> expr = tb.Expr("2 * a + b * c") # initialize the expression
>>> expr.eval() # evaluate it
array([14, 24, 36], dtype=int64)
>>> sum(expr) # use as an iterator
74
where you can see that you can mix different containers in
the expression (whenever shapes are consistent).
You can also work with multidimensional arrays::
>>> a2 = f.create_array('/', 'a2', np.array([[1,2],[3,4]]))
>>> b2 = f.create_array('/', 'b2', np.array([[3,4],[5,6]]))
>>> c2 = np.array([4,5]) # This will be broadcasted
>>> expr = tb.Expr("2 * a2 + b2-c2")
>>> expr.eval()
array([[1, 3],
[7, 9]], dtype=int64)
>>> sum(expr)
array([ 8, 12], dtype=int64)
>>> f.close()
.. rubric:: Expr attributes
.. attribute:: append_mode
The append mode for user-provided output containers.
.. attribute:: maindim
Common main dimension for inputs in expression.
.. attribute:: names
The names of variables in expression (list).
.. attribute:: out
The user-provided container (if any) for the expression outcome.
.. attribute:: o_start
The start range selection for the user-provided output.
.. attribute:: o_stop
The stop range selection for the user-provided output.
.. attribute:: o_step
The step range selection for the user-provided output.
.. attribute:: shape
Common shape for the arrays in expression.
.. attribute:: values
The values of variables in expression (list).
| class Expr:
"""A class for evaluating expressions with arbitrary array-like objects.
Expr is a class for evaluating expressions containing array-like objects.
With it, you can evaluate expressions (like "3 * a + 4 * b") that
operate on arbitrary large arrays while optimizing the resources
required to perform them (basically main memory and CPU cache memory).
It is similar to the Numexpr package (see :ref:`[NUMEXPR] <NUMEXPR>`),
but in addition to NumPy objects, it also accepts disk-based homogeneous
arrays, like the Array, CArray, EArray and Column PyTables objects.
.. warning::
Expr class only offers a subset of the Numexpr features due to the
complexity of implement some of them when dealing with huge amount of
data.
All the internal computations are performed via the Numexpr package,
so all the broadcast and upcasting rules of Numexpr applies here too.
These rules are very similar to the NumPy ones, but with some exceptions
due to the particularities of having to deal with potentially very large
disk-based arrays. Be sure to read the documentation of the Expr
constructor and methods as well as that of Numexpr, if you want to fully
grasp these particularities.
Parameters
----------
expr : str
This specifies the expression to be evaluated, such as "2 * a + 3 * b".
uservars : dict
This can be used to define the variable names appearing in *expr*.
This mapping should consist of identifier-like strings pointing to any
`Array`, `CArray`, `EArray`, `Column` or NumPy ndarray instances (or
even others which will tried to be converted to ndarrays). When
`uservars` is not provided or `None`, the current local and global
namespace is sought instead of `uservars`. It is also possible to pass
just some of the variables in expression via the `uservars` mapping,
and the rest will be retrieved from the current local and global
namespaces.
kwargs : dict
This is meant to pass additional parameters to the Numexpr kernel.
This is basically the same as the kwargs argument in
Numexpr.evaluate(), and is mainly meant for advanced use.
Examples
--------
The following shows an example of using Expr::
>>> f = tb.open_file('/tmp/test_expr.h5', 'w')
>>> a = f.create_array('/', 'a', np.array([1,2,3]))
>>> b = f.create_array('/', 'b', np.array([3,4,5]))
>>> c = np.array([4,5,6])
>>> expr = tb.Expr("2 * a + b * c") # initialize the expression
>>> expr.eval() # evaluate it
array([14, 24, 36], dtype=int64)
>>> sum(expr) # use as an iterator
74
where you can see that you can mix different containers in
the expression (whenever shapes are consistent).
You can also work with multidimensional arrays::
>>> a2 = f.create_array('/', 'a2', np.array([[1,2],[3,4]]))
>>> b2 = f.create_array('/', 'b2', np.array([[3,4],[5,6]]))
>>> c2 = np.array([4,5]) # This will be broadcasted
>>> expr = tb.Expr("2 * a2 + b2-c2")
>>> expr.eval()
array([[1, 3],
[7, 9]], dtype=int64)
>>> sum(expr)
array([ 8, 12], dtype=int64)
>>> f.close()
.. rubric:: Expr attributes
.. attribute:: append_mode
The append mode for user-provided output containers.
.. attribute:: maindim
Common main dimension for inputs in expression.
.. attribute:: names
The names of variables in expression (list).
.. attribute:: out
The user-provided container (if any) for the expression outcome.
.. attribute:: o_start
The start range selection for the user-provided output.
.. attribute:: o_stop
The stop range selection for the user-provided output.
.. attribute:: o_step
The step range selection for the user-provided output.
.. attribute:: shape
Common shape for the arrays in expression.
.. attribute:: values
The values of variables in expression (list).
"""
_exprvars_cache = {}
"""Cache of variables participating in expressions.
.. versionadded:: 3.0
"""
def __init__(self, expr, uservars=None, **kwargs):
self.append_mode = False
"""The append mode for user-provided output containers."""
self.maindim = 0
"""Common main dimension for inputs in expression."""
self.names = []
"""The names of variables in expression (list)."""
self.out = None
"""The user-provided container (if any) for the expression outcome."""
self.o_start = None
"""The start range selection for the user-provided output."""
self.o_stop = None
"""The stop range selection for the user-provided output."""
self.o_step = None
"""The step range selection for the user-provided output."""
self.shape = None
"""Common shape for the arrays in expression."""
self.start, self.stop, self.step = (None,) * 3
self.start = None
"""The start range selection for the input."""
self.stop = None
"""The stop range selection for the input."""
self.step = None
"""The step range selection for the input."""
self.values = []
"""The values of variables in expression (list)."""
self._compiled_expr = None
"""The compiled expression."""
self._single_row_out = None
"""A sample of the output with just a single row."""
# First, get the signature for the arrays in expression
vars_ = self._required_expr_vars(expr, uservars)
context = ne.necompiler.getContext(kwargs)
self.names, _ = ne.necompiler.getExprNames(expr, context)
# Raise a ValueError in case we have unsupported objects
for name, var in vars_.items():
if type(var) in (int, float, str):
continue
if not isinstance(var, (tb.Leaf, tb.Column)):
if hasattr(var, "dtype"):
# Quacks like a NumPy object
continue
raise TypeError("Unsupported variable type: %r" % var)
objname = var.__class__.__name__
if objname not in ("Array", "CArray", "EArray", "Column"):
raise TypeError("Unsupported variable type: %r" % var)
# NumPy arrays to be copied? (we don't need to worry about
# PyTables objects, as the reads always return contiguous and
# aligned objects, or at least I think so).
for name, var in vars_.items():
if isinstance(var, np.ndarray):
# See numexpr.necompiler.evaluate for a rational
# of the code below
if not var.flags.aligned:
if var.ndim != 1:
# Do a copy of this variable
var = var.copy()
# Update the vars_ dictionary
vars_[name] = var
# Get the variables and types
values = self.values
types_ = []
for name in self.names:
value = vars_[name]
if hasattr(value, 'atom'):
types_.append(value.atom)
elif hasattr(value, 'dtype'):
types_.append(value)
else:
# try to convert into a NumPy array
value = np.array(value)
types_.append(value)
values.append(value)
# Create a signature for the expression
signature = [(name, ne.necompiler.getType(type_))
for (name, type_) in zip(self.names, types_)]
# Compile the expression
self._compiled_expr = ne.necompiler.NumExpr(expr, signature, **kwargs)
# Guess the shape for the outcome and the maindim of inputs
self.shape, self.maindim = self._guess_shape()
# The next method is similar to their counterpart in `Table`, but
# adapted to the `Expr` own requirements.
def _required_expr_vars(self, expression, uservars, depth=2):
"""Get the variables required by the `expression`.
A new dictionary defining the variables used in the `expression`
is returned. Required variables are first looked up in the
`uservars` mapping, then in the set of top-level columns of the
table. Unknown variables cause a `NameError` to be raised.
When `uservars` is `None`, the local and global namespace where
the API callable which uses this method is called is sought
instead. To disable this mechanism, just specify a mapping as
`uservars`.
Nested columns and variables with an ``uint64`` type are not
allowed (`TypeError` and `NotImplementedError` are raised,
respectively).
`depth` specifies the depth of the frame in order to reach local
or global variables.
"""
# Get the names of variables used in the expression.
exprvars_cache = self._exprvars_cache
if expression not in exprvars_cache:
# Protection against growing the cache too much
if len(exprvars_cache) > 256:
# Remove 10 (arbitrary) elements from the cache
for k in list(exprvars_cache)[:10]:
del exprvars_cache[k]
cexpr = compile(expression, '<string>', 'eval')
exprvars = [var for var in cexpr.co_names
if var not in ['None', 'False', 'True']
and var not in ne.expressions.functions]
exprvars_cache[expression] = exprvars
else:
exprvars = exprvars_cache[expression]
# Get the local and global variable mappings of the user frame
# if no mapping has been explicitly given for user variables.
user_locals, user_globals = {}, {}
if uservars is None:
user_frame = sys._getframe(depth)
user_locals = user_frame.f_locals
user_globals = user_frame.f_globals
# Look for the required variables first among the ones
# explicitly provided by the user.
reqvars = {}
for var in exprvars:
# Get the value.
if uservars is not None and var in uservars:
val = uservars[var]
elif uservars is None and var in user_locals:
val = user_locals[var]
elif uservars is None and var in user_globals:
val = user_globals[var]
else:
raise NameError("name ``%s`` is not defined" % var)
# Check the value.
if hasattr(val, 'dtype') and val.dtype.str[1:] == 'u8':
raise NotImplementedError(
"variable ``%s`` refers to "
"a 64-bit unsigned integer object, that is "
"not yet supported in expressions, sorry; " % var)
elif hasattr(val, '_v_colpathnames'): # nested column
# This branch is never reached because the compile step
# above already raise a ``TypeError`` for nested
# columns, but that could change in the future. So it
# is best to let this here.
raise TypeError(
"variable ``%s`` refers to a nested column, "
"not allowed in expressions" % var)
reqvars[var] = val
return reqvars
def set_inputs_range(self, start=None, stop=None, step=None):
"""Define a range for all inputs in expression.
The computation will only take place for the range defined by
the start, stop and step parameters in the main dimension of
inputs (or the leading one, if the object lacks the concept of
main dimension, like a NumPy container). If not a common main
dimension exists for all inputs, the leading dimension will be
used instead.
"""
self.start = start
self.stop = stop
self.step = step
def set_output(self, out, append_mode=False):
"""Set out as container for output as well as the append_mode.
The out must be a container that is meant to keep the outcome of
the expression. It should be an homogeneous type container and
can typically be an Array, CArray, EArray, Column or a NumPy ndarray.
The append_mode specifies the way of which the output is filled.
If true, the rows of the outcome are *appended* to the out container.
Of course, for doing this it is necessary that out would have an
append() method (like an EArray, for example).
If append_mode is false, the output is set via the __setitem__()
method (see the Expr.set_output_range() for info on how to select
the rows to be updated). If out is smaller than what is required
by the expression, only the computations that are needed to fill
up the container are carried out. If it is larger, the excess
elements are unaffected.
"""
if not (hasattr(out, "shape") and hasattr(out, "__setitem__")):
raise ValueError(
"You need to pass a settable multidimensional container "
"as output")
self.out = out
if append_mode and not hasattr(out, "append"):
raise ValueError(
"For activating the ``append`` mode, you need a container "
"with an `append()` method (like the `EArray`)")
self.append_mode = append_mode
def set_output_range(self, start=None, stop=None, step=None):
"""Define a range for user-provided output object.
The output object will only be modified in the range specified by the
start, stop and step parameters in the main dimension of output (or the
leading one, if the object does not have the concept of main dimension,
like a NumPy container).
"""
if self.out is None:
raise IndexError(
"You need to pass an output object to `setOut()` first")
self.o_start = start
self.o_stop = stop
self.o_step = step
# Although the next code is similar to the method in `Leaf`, it
# allows the use of pure NumPy objects.
def _calc_nrowsinbuf(self, object_):
"""Calculate the number of rows that will fit in a buffer."""
# Compute the rowsize for the *leading* dimension
shape_ = list(object_.shape)
if shape_:
shape_[0] = 1
rowsize = np.prod(shape_) * object_.dtype.itemsize
# Compute the nrowsinbuf
# Multiplying the I/O buffer size by 4 gives optimal results
# in my benchmarks with `tables.Expr` (see ``bench/poly.py``)
buffersize = IO_BUFFER_SIZE * 4
nrowsinbuf = buffersize // rowsize
# Safeguard against row sizes being extremely large
if nrowsinbuf == 0:
nrowsinbuf = 1
# If rowsize is too large, issue a Performance warning
maxrowsize = BUFFER_TIMES * buffersize
if rowsize > maxrowsize:
warnings.warn("""\
The object ``%s`` is exceeding the maximum recommended rowsize (%d
bytes); be ready to see PyTables asking for *lots* of memory and
possibly slow I/O. You may want to reduce the rowsize by trimming the
value of dimensions that are orthogonal (and preferably close) to the
*leading* dimension of this object."""
% (object, maxrowsize),
PerformanceWarning)
return nrowsinbuf
def _guess_shape(self):
"""Guess the shape of the output of the expression."""
# First, compute the maximum dimension of inputs and maindim
# (if it exists)
maxndim = 0
maindims = []
for val in self.values:
# Get the minimum of the lengths
if len(val.shape) > maxndim:
maxndim = len(val.shape)
if hasattr(val, "maindim"):
maindims.append(val.maindim)
if maxndim == 0:
self._single_row_out = out = self._compiled_expr(*self.values)
return (), None
if maindims and [maindims[0]] * len(maindims) == maindims:
# If all maindims detected are the same, use this as maindim
maindim = maindims[0]
else:
# If not, the main dimension will be the default one
maindim = 0
# The slices parameter for inputs
slices = (slice(None),) * maindim + (0,)
# Now, collect the values in first row of arrays with maximum dims
vals = []
lens = []
for val in self.values:
shape = val.shape
# Warning: don't use len(val) below or it will raise an
# `Overflow` error on 32-bit platforms for large enough arrays.
if shape != () and shape[maindim] == 0:
vals.append(val[:])
lens.append(0)
elif len(shape) < maxndim:
vals.append(val)
else:
vals.append(val.__getitem__(slices))
lens.append(shape[maindim])
minlen = min(lens)
self._single_row_out = out = self._compiled_expr(*vals)
shape = list(out.shape)
if minlen > 0:
shape.insert(maindim, minlen)
return shape, maindim
def _get_info(self, shape, maindim, itermode=False):
"""Return various info needed for evaluating the computation loop."""
# Compute the shape of the resulting container having
# in account new possible values of start, stop and step in
# the inputs range
if maindim is not None:
(start, stop, step) = slice(
self.start, self.stop, self.step).indices(shape[maindim])
shape[maindim] = min(
shape[maindim], len(range(start, stop, step)))
i_nrows = shape[maindim]
else:
start, stop, step = 0, 0, None
i_nrows = 0
if not itermode:
# Create a container for output if not defined yet
o_maindim = 0 # Default maindim
if self.out is None:
out = np.empty(shape, dtype=self._single_row_out.dtype)
# Get the trivial values for start, stop and step
if maindim is not None:
(o_start, o_stop, o_step) = (0, shape[maindim], 1)
else:
(o_start, o_stop, o_step) = (0, 0, 1)
else:
out = self.out
# Out container already provided. Do some sanity checks.
if hasattr(out, "maindim"):
o_maindim = out.maindim
# Refine the shape of the resulting container having in
# account new possible values of start, stop and step in
# the output range
o_shape = list(out.shape)
s = slice(self.o_start, self.o_stop, self.o_step)
o_start, o_stop, o_step = s.indices(o_shape[o_maindim])
o_shape[o_maindim] = min(o_shape[o_maindim],
len(range(o_start, o_stop, o_step)))
# Check that the shape of output is consistent with inputs
tr_oshape = list(o_shape) # this implies a copy
olen_ = tr_oshape.pop(o_maindim)
tr_shape = list(shape) # do a copy
if maindim is not None:
len_ = tr_shape.pop(o_maindim)
else:
len_ = 1
if tr_oshape != tr_shape:
raise ValueError(
"Shape for out container does not match expression")
# Force the input length to fit in `out`
if not self.append_mode and olen_ < len_:
shape[o_maindim] = olen_
stop = start + olen_
# Get the positions of inputs that should be sliced (the others
# will be broadcasted)
ndim = len(shape)
slice_pos = [i for i, val in enumerate(self.values)
if len(val.shape) == ndim]
# The size of the I/O buffer
nrowsinbuf = 1
for i, val in enumerate(self.values):
# Skip scalar values in variables
if i in slice_pos:
nrows = self._calc_nrowsinbuf(val)
if nrows > nrowsinbuf:
nrowsinbuf = nrows
if not itermode:
return (i_nrows, slice_pos, start, stop, step, nrowsinbuf,
out, o_maindim, o_start, o_stop, o_step)
else:
# For itermode, we don't need the out info
return (i_nrows, slice_pos, start, stop, step, nrowsinbuf)
def eval(self):
"""Evaluate the expression and return the outcome.
Because of performance reasons, the computation order tries to go along
the common main dimension of all inputs. If not such a common main
dimension is found, the iteration will go along the leading dimension
instead.
For non-consistent shapes in inputs (i.e. shapes having a different
number of dimensions), the regular NumPy broadcast rules applies.
There is one exception to this rule though: when the dimensions
orthogonal to the main dimension of the expression are consistent, but
the main dimension itself differs among the inputs, then the shortest
one is chosen for doing the computations. This is so because trying to
expand very large on-disk arrays could be too expensive or simply not
possible.
Also, the regular Numexpr casting rules (which are similar to those of
NumPy, although you should check the Numexpr manual for the exceptions)
are applied to determine the output type.
Finally, if the setOuput() method specifying a user container has
already been called, the output is sent to this user-provided
container. If not, a fresh NumPy container is returned instead.
.. warning::
When dealing with large on-disk inputs, failing to specify an
on-disk container may consume all your available memory.
"""
values, shape, maindim = self.values, self.shape, self.maindim
# Get different info we need for the main computation loop
(i_nrows, slice_pos, start, stop, step, nrowsinbuf,
out, o_maindim, o_start, o_stop, o_step) = \
self._get_info(shape, maindim)
if i_nrows == 0:
# No elements to compute
if start >= stop and self.start is not None:
return out
else:
return self._single_row_out
# Create a key that selects every element in inputs and output
# (including the main dimension)
i_slices = [slice(None)] * (maindim + 1)
o_slices = [slice(None)] * (o_maindim + 1)
# This is a hack to prevent doing unnecessary flavor conversions
# while reading buffers
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = False
# Start the computation itself
for start2 in range(start, stop, step * nrowsinbuf):
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice for inputs
i_slices[maindim] = slice(start2, stop2, step)
# Get the input values
vals = []
for i, val in enumerate(values):
if i in slice_pos:
vals.append(val.__getitem__(tuple(i_slices)))
else:
# A read of values is not apparently needed, as PyTables
# leaves seems to work just fine inside Numexpr
vals.append(val)
# Do the actual computation for this slice
rout = self._compiled_expr(*vals)
# Set the values into the out buffer
if self.append_mode:
out.append(rout)
else:
# Compute the slice to be filled in output
start3 = o_start + (start2 - start) // step
stop3 = start3 + nrowsinbuf * o_step
if stop3 > o_stop:
stop3 = o_stop
o_slices[o_maindim] = slice(start3, stop3, o_step)
# Set the slice
out[tuple(o_slices)] = rout
# Activate the conversion again (default)
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = True
return out
def __iter__(self):
"""Iterate over the rows of the outcome of the expression.
This iterator always returns rows as NumPy objects, so a possible out
container specified in :meth:`Expr.set_output` method is ignored here.
"""
values, shape, maindim = self.values, self.shape, self.maindim
# Get different info we need for the main computation loop
(i_nrows, slice_pos, start, stop, step, nrowsinbuf) = \
self._get_info(shape, maindim, itermode=True)
if i_nrows == 0:
# No elements to compute
return
# Create a key that selects every element in inputs
# (including the main dimension)
i_slices = [slice(None)] * (maindim + 1)
# This is a hack to prevent doing unnecessary flavor conversions
# while reading buffers
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = False
# Start the computation itself
for start2 in range(start, stop, step * nrowsinbuf):
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the main dimension
i_slices[maindim] = slice(start2, stop2, step)
# Get the values for computing the buffer
vals = []
for i, val in enumerate(values):
if i in slice_pos:
vals.append(val.__getitem__(tuple(i_slices)))
else:
# A read of values is not apparently needed, as PyTables
# leaves seems to work just fine inside Numexpr
vals.append(val)
# Do the actual computation
rout = self._compiled_expr(*vals)
# Return one row per call
yield from rout
# Activate the conversion again (default)
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = True
| (expr, uservars=None, **kwargs) |
728,386 | tables.expression | __init__ | null | def __init__(self, expr, uservars=None, **kwargs):
self.append_mode = False
"""The append mode for user-provided output containers."""
self.maindim = 0
"""Common main dimension for inputs in expression."""
self.names = []
"""The names of variables in expression (list)."""
self.out = None
"""The user-provided container (if any) for the expression outcome."""
self.o_start = None
"""The start range selection for the user-provided output."""
self.o_stop = None
"""The stop range selection for the user-provided output."""
self.o_step = None
"""The step range selection for the user-provided output."""
self.shape = None
"""Common shape for the arrays in expression."""
self.start, self.stop, self.step = (None,) * 3
self.start = None
"""The start range selection for the input."""
self.stop = None
"""The stop range selection for the input."""
self.step = None
"""The step range selection for the input."""
self.values = []
"""The values of variables in expression (list)."""
self._compiled_expr = None
"""The compiled expression."""
self._single_row_out = None
"""A sample of the output with just a single row."""
# First, get the signature for the arrays in expression
vars_ = self._required_expr_vars(expr, uservars)
context = ne.necompiler.getContext(kwargs)
self.names, _ = ne.necompiler.getExprNames(expr, context)
# Raise a ValueError in case we have unsupported objects
for name, var in vars_.items():
if type(var) in (int, float, str):
continue
if not isinstance(var, (tb.Leaf, tb.Column)):
if hasattr(var, "dtype"):
# Quacks like a NumPy object
continue
raise TypeError("Unsupported variable type: %r" % var)
objname = var.__class__.__name__
if objname not in ("Array", "CArray", "EArray", "Column"):
raise TypeError("Unsupported variable type: %r" % var)
# NumPy arrays to be copied? (we don't need to worry about
# PyTables objects, as the reads always return contiguous and
# aligned objects, or at least I think so).
for name, var in vars_.items():
if isinstance(var, np.ndarray):
# See numexpr.necompiler.evaluate for a rational
# of the code below
if not var.flags.aligned:
if var.ndim != 1:
# Do a copy of this variable
var = var.copy()
# Update the vars_ dictionary
vars_[name] = var
# Get the variables and types
values = self.values
types_ = []
for name in self.names:
value = vars_[name]
if hasattr(value, 'atom'):
types_.append(value.atom)
elif hasattr(value, 'dtype'):
types_.append(value)
else:
# try to convert into a NumPy array
value = np.array(value)
types_.append(value)
values.append(value)
# Create a signature for the expression
signature = [(name, ne.necompiler.getType(type_))
for (name, type_) in zip(self.names, types_)]
# Compile the expression
self._compiled_expr = ne.necompiler.NumExpr(expr, signature, **kwargs)
# Guess the shape for the outcome and the maindim of inputs
self.shape, self.maindim = self._guess_shape()
| (self, expr, uservars=None, **kwargs) |
728,387 | tables.expression | __iter__ | Iterate over the rows of the outcome of the expression.
This iterator always returns rows as NumPy objects, so a possible out
container specified in :meth:`Expr.set_output` method is ignored here.
| def __iter__(self):
"""Iterate over the rows of the outcome of the expression.
This iterator always returns rows as NumPy objects, so a possible out
container specified in :meth:`Expr.set_output` method is ignored here.
"""
values, shape, maindim = self.values, self.shape, self.maindim
# Get different info we need for the main computation loop
(i_nrows, slice_pos, start, stop, step, nrowsinbuf) = \
self._get_info(shape, maindim, itermode=True)
if i_nrows == 0:
# No elements to compute
return
# Create a key that selects every element in inputs
# (including the main dimension)
i_slices = [slice(None)] * (maindim + 1)
# This is a hack to prevent doing unnecessary flavor conversions
# while reading buffers
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = False
# Start the computation itself
for start2 in range(start, stop, step * nrowsinbuf):
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the main dimension
i_slices[maindim] = slice(start2, stop2, step)
# Get the values for computing the buffer
vals = []
for i, val in enumerate(values):
if i in slice_pos:
vals.append(val.__getitem__(tuple(i_slices)))
else:
# A read of values is not apparently needed, as PyTables
# leaves seems to work just fine inside Numexpr
vals.append(val)
# Do the actual computation
rout = self._compiled_expr(*vals)
# Return one row per call
yield from rout
# Activate the conversion again (default)
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = True
| (self) |
728,388 | tables.expression | _calc_nrowsinbuf | Calculate the number of rows that will fit in a buffer. | def _calc_nrowsinbuf(self, object_):
"""Calculate the number of rows that will fit in a buffer."""
# Compute the rowsize for the *leading* dimension
shape_ = list(object_.shape)
if shape_:
shape_[0] = 1
rowsize = np.prod(shape_) * object_.dtype.itemsize
# Compute the nrowsinbuf
# Multiplying the I/O buffer size by 4 gives optimal results
# in my benchmarks with `tables.Expr` (see ``bench/poly.py``)
buffersize = IO_BUFFER_SIZE * 4
nrowsinbuf = buffersize // rowsize
# Safeguard against row sizes being extremely large
if nrowsinbuf == 0:
nrowsinbuf = 1
# If rowsize is too large, issue a Performance warning
maxrowsize = BUFFER_TIMES * buffersize
if rowsize > maxrowsize:
warnings.warn("""\
The object ``%s`` is exceeding the maximum recommended rowsize (%d
bytes); be ready to see PyTables asking for *lots* of memory and
possibly slow I/O. You may want to reduce the rowsize by trimming the
value of dimensions that are orthogonal (and preferably close) to the
*leading* dimension of this object."""
% (object, maxrowsize),
PerformanceWarning)
return nrowsinbuf
| (self, object_) |
728,389 | tables.expression | _get_info | Return various info needed for evaluating the computation loop. | def _get_info(self, shape, maindim, itermode=False):
"""Return various info needed for evaluating the computation loop."""
# Compute the shape of the resulting container having
# in account new possible values of start, stop and step in
# the inputs range
if maindim is not None:
(start, stop, step) = slice(
self.start, self.stop, self.step).indices(shape[maindim])
shape[maindim] = min(
shape[maindim], len(range(start, stop, step)))
i_nrows = shape[maindim]
else:
start, stop, step = 0, 0, None
i_nrows = 0
if not itermode:
# Create a container for output if not defined yet
o_maindim = 0 # Default maindim
if self.out is None:
out = np.empty(shape, dtype=self._single_row_out.dtype)
# Get the trivial values for start, stop and step
if maindim is not None:
(o_start, o_stop, o_step) = (0, shape[maindim], 1)
else:
(o_start, o_stop, o_step) = (0, 0, 1)
else:
out = self.out
# Out container already provided. Do some sanity checks.
if hasattr(out, "maindim"):
o_maindim = out.maindim
# Refine the shape of the resulting container having in
# account new possible values of start, stop and step in
# the output range
o_shape = list(out.shape)
s = slice(self.o_start, self.o_stop, self.o_step)
o_start, o_stop, o_step = s.indices(o_shape[o_maindim])
o_shape[o_maindim] = min(o_shape[o_maindim],
len(range(o_start, o_stop, o_step)))
# Check that the shape of output is consistent with inputs
tr_oshape = list(o_shape) # this implies a copy
olen_ = tr_oshape.pop(o_maindim)
tr_shape = list(shape) # do a copy
if maindim is not None:
len_ = tr_shape.pop(o_maindim)
else:
len_ = 1
if tr_oshape != tr_shape:
raise ValueError(
"Shape for out container does not match expression")
# Force the input length to fit in `out`
if not self.append_mode and olen_ < len_:
shape[o_maindim] = olen_
stop = start + olen_
# Get the positions of inputs that should be sliced (the others
# will be broadcasted)
ndim = len(shape)
slice_pos = [i for i, val in enumerate(self.values)
if len(val.shape) == ndim]
# The size of the I/O buffer
nrowsinbuf = 1
for i, val in enumerate(self.values):
# Skip scalar values in variables
if i in slice_pos:
nrows = self._calc_nrowsinbuf(val)
if nrows > nrowsinbuf:
nrowsinbuf = nrows
if not itermode:
return (i_nrows, slice_pos, start, stop, step, nrowsinbuf,
out, o_maindim, o_start, o_stop, o_step)
else:
# For itermode, we don't need the out info
return (i_nrows, slice_pos, start, stop, step, nrowsinbuf)
| (self, shape, maindim, itermode=False) |
728,390 | tables.expression | _guess_shape | Guess the shape of the output of the expression. | def _guess_shape(self):
"""Guess the shape of the output of the expression."""
# First, compute the maximum dimension of inputs and maindim
# (if it exists)
maxndim = 0
maindims = []
for val in self.values:
# Get the minimum of the lengths
if len(val.shape) > maxndim:
maxndim = len(val.shape)
if hasattr(val, "maindim"):
maindims.append(val.maindim)
if maxndim == 0:
self._single_row_out = out = self._compiled_expr(*self.values)
return (), None
if maindims and [maindims[0]] * len(maindims) == maindims:
# If all maindims detected are the same, use this as maindim
maindim = maindims[0]
else:
# If not, the main dimension will be the default one
maindim = 0
# The slices parameter for inputs
slices = (slice(None),) * maindim + (0,)
# Now, collect the values in first row of arrays with maximum dims
vals = []
lens = []
for val in self.values:
shape = val.shape
# Warning: don't use len(val) below or it will raise an
# `Overflow` error on 32-bit platforms for large enough arrays.
if shape != () and shape[maindim] == 0:
vals.append(val[:])
lens.append(0)
elif len(shape) < maxndim:
vals.append(val)
else:
vals.append(val.__getitem__(slices))
lens.append(shape[maindim])
minlen = min(lens)
self._single_row_out = out = self._compiled_expr(*vals)
shape = list(out.shape)
if minlen > 0:
shape.insert(maindim, minlen)
return shape, maindim
| (self) |
728,391 | tables.expression | _required_expr_vars | Get the variables required by the `expression`.
A new dictionary defining the variables used in the `expression`
is returned. Required variables are first looked up in the
`uservars` mapping, then in the set of top-level columns of the
table. Unknown variables cause a `NameError` to be raised.
When `uservars` is `None`, the local and global namespace where
the API callable which uses this method is called is sought
instead. To disable this mechanism, just specify a mapping as
`uservars`.
Nested columns and variables with an ``uint64`` type are not
allowed (`TypeError` and `NotImplementedError` are raised,
respectively).
`depth` specifies the depth of the frame in order to reach local
or global variables.
| def _required_expr_vars(self, expression, uservars, depth=2):
"""Get the variables required by the `expression`.
A new dictionary defining the variables used in the `expression`
is returned. Required variables are first looked up in the
`uservars` mapping, then in the set of top-level columns of the
table. Unknown variables cause a `NameError` to be raised.
When `uservars` is `None`, the local and global namespace where
the API callable which uses this method is called is sought
instead. To disable this mechanism, just specify a mapping as
`uservars`.
Nested columns and variables with an ``uint64`` type are not
allowed (`TypeError` and `NotImplementedError` are raised,
respectively).
`depth` specifies the depth of the frame in order to reach local
or global variables.
"""
# Get the names of variables used in the expression.
exprvars_cache = self._exprvars_cache
if expression not in exprvars_cache:
# Protection against growing the cache too much
if len(exprvars_cache) > 256:
# Remove 10 (arbitrary) elements from the cache
for k in list(exprvars_cache)[:10]:
del exprvars_cache[k]
cexpr = compile(expression, '<string>', 'eval')
exprvars = [var for var in cexpr.co_names
if var not in ['None', 'False', 'True']
and var not in ne.expressions.functions]
exprvars_cache[expression] = exprvars
else:
exprvars = exprvars_cache[expression]
# Get the local and global variable mappings of the user frame
# if no mapping has been explicitly given for user variables.
user_locals, user_globals = {}, {}
if uservars is None:
user_frame = sys._getframe(depth)
user_locals = user_frame.f_locals
user_globals = user_frame.f_globals
# Look for the required variables first among the ones
# explicitly provided by the user.
reqvars = {}
for var in exprvars:
# Get the value.
if uservars is not None and var in uservars:
val = uservars[var]
elif uservars is None and var in user_locals:
val = user_locals[var]
elif uservars is None and var in user_globals:
val = user_globals[var]
else:
raise NameError("name ``%s`` is not defined" % var)
# Check the value.
if hasattr(val, 'dtype') and val.dtype.str[1:] == 'u8':
raise NotImplementedError(
"variable ``%s`` refers to "
"a 64-bit unsigned integer object, that is "
"not yet supported in expressions, sorry; " % var)
elif hasattr(val, '_v_colpathnames'): # nested column
# This branch is never reached because the compile step
# above already raise a ``TypeError`` for nested
# columns, but that could change in the future. So it
# is best to let this here.
raise TypeError(
"variable ``%s`` refers to a nested column, "
"not allowed in expressions" % var)
reqvars[var] = val
return reqvars
| (self, expression, uservars, depth=2) |
728,392 | tables.expression | eval | Evaluate the expression and return the outcome.
Because of performance reasons, the computation order tries to go along
the common main dimension of all inputs. If not such a common main
dimension is found, the iteration will go along the leading dimension
instead.
For non-consistent shapes in inputs (i.e. shapes having a different
number of dimensions), the regular NumPy broadcast rules applies.
There is one exception to this rule though: when the dimensions
orthogonal to the main dimension of the expression are consistent, but
the main dimension itself differs among the inputs, then the shortest
one is chosen for doing the computations. This is so because trying to
expand very large on-disk arrays could be too expensive or simply not
possible.
Also, the regular Numexpr casting rules (which are similar to those of
NumPy, although you should check the Numexpr manual for the exceptions)
are applied to determine the output type.
Finally, if the setOuput() method specifying a user container has
already been called, the output is sent to this user-provided
container. If not, a fresh NumPy container is returned instead.
.. warning::
When dealing with large on-disk inputs, failing to specify an
on-disk container may consume all your available memory.
| def eval(self):
"""Evaluate the expression and return the outcome.
Because of performance reasons, the computation order tries to go along
the common main dimension of all inputs. If not such a common main
dimension is found, the iteration will go along the leading dimension
instead.
For non-consistent shapes in inputs (i.e. shapes having a different
number of dimensions), the regular NumPy broadcast rules applies.
There is one exception to this rule though: when the dimensions
orthogonal to the main dimension of the expression are consistent, but
the main dimension itself differs among the inputs, then the shortest
one is chosen for doing the computations. This is so because trying to
expand very large on-disk arrays could be too expensive or simply not
possible.
Also, the regular Numexpr casting rules (which are similar to those of
NumPy, although you should check the Numexpr manual for the exceptions)
are applied to determine the output type.
Finally, if the setOuput() method specifying a user container has
already been called, the output is sent to this user-provided
container. If not, a fresh NumPy container is returned instead.
.. warning::
When dealing with large on-disk inputs, failing to specify an
on-disk container may consume all your available memory.
"""
values, shape, maindim = self.values, self.shape, self.maindim
# Get different info we need for the main computation loop
(i_nrows, slice_pos, start, stop, step, nrowsinbuf,
out, o_maindim, o_start, o_stop, o_step) = \
self._get_info(shape, maindim)
if i_nrows == 0:
# No elements to compute
if start >= stop and self.start is not None:
return out
else:
return self._single_row_out
# Create a key that selects every element in inputs and output
# (including the main dimension)
i_slices = [slice(None)] * (maindim + 1)
o_slices = [slice(None)] * (o_maindim + 1)
# This is a hack to prevent doing unnecessary flavor conversions
# while reading buffers
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = False
# Start the computation itself
for start2 in range(start, stop, step * nrowsinbuf):
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice for inputs
i_slices[maindim] = slice(start2, stop2, step)
# Get the input values
vals = []
for i, val in enumerate(values):
if i in slice_pos:
vals.append(val.__getitem__(tuple(i_slices)))
else:
# A read of values is not apparently needed, as PyTables
# leaves seems to work just fine inside Numexpr
vals.append(val)
# Do the actual computation for this slice
rout = self._compiled_expr(*vals)
# Set the values into the out buffer
if self.append_mode:
out.append(rout)
else:
# Compute the slice to be filled in output
start3 = o_start + (start2 - start) // step
stop3 = start3 + nrowsinbuf * o_step
if stop3 > o_stop:
stop3 = o_stop
o_slices[o_maindim] = slice(start3, stop3, o_step)
# Set the slice
out[tuple(o_slices)] = rout
# Activate the conversion again (default)
for val in values:
if hasattr(val, 'maindim'):
val._v_convert = True
return out
| (self) |
728,393 | tables.expression | set_inputs_range | Define a range for all inputs in expression.
The computation will only take place for the range defined by
the start, stop and step parameters in the main dimension of
inputs (or the leading one, if the object lacks the concept of
main dimension, like a NumPy container). If not a common main
dimension exists for all inputs, the leading dimension will be
used instead.
| def set_inputs_range(self, start=None, stop=None, step=None):
"""Define a range for all inputs in expression.
The computation will only take place for the range defined by
the start, stop and step parameters in the main dimension of
inputs (or the leading one, if the object lacks the concept of
main dimension, like a NumPy container). If not a common main
dimension exists for all inputs, the leading dimension will be
used instead.
"""
self.start = start
self.stop = stop
self.step = step
| (self, start=None, stop=None, step=None) |
728,394 | tables.expression | set_output | Set out as container for output as well as the append_mode.
The out must be a container that is meant to keep the outcome of
the expression. It should be an homogeneous type container and
can typically be an Array, CArray, EArray, Column or a NumPy ndarray.
The append_mode specifies the way of which the output is filled.
If true, the rows of the outcome are *appended* to the out container.
Of course, for doing this it is necessary that out would have an
append() method (like an EArray, for example).
If append_mode is false, the output is set via the __setitem__()
method (see the Expr.set_output_range() for info on how to select
the rows to be updated). If out is smaller than what is required
by the expression, only the computations that are needed to fill
up the container are carried out. If it is larger, the excess
elements are unaffected.
| def set_output(self, out, append_mode=False):
"""Set out as container for output as well as the append_mode.
The out must be a container that is meant to keep the outcome of
the expression. It should be an homogeneous type container and
can typically be an Array, CArray, EArray, Column or a NumPy ndarray.
The append_mode specifies the way of which the output is filled.
If true, the rows of the outcome are *appended* to the out container.
Of course, for doing this it is necessary that out would have an
append() method (like an EArray, for example).
If append_mode is false, the output is set via the __setitem__()
method (see the Expr.set_output_range() for info on how to select
the rows to be updated). If out is smaller than what is required
by the expression, only the computations that are needed to fill
up the container are carried out. If it is larger, the excess
elements are unaffected.
"""
if not (hasattr(out, "shape") and hasattr(out, "__setitem__")):
raise ValueError(
"You need to pass a settable multidimensional container "
"as output")
self.out = out
if append_mode and not hasattr(out, "append"):
raise ValueError(
"For activating the ``append`` mode, you need a container "
"with an `append()` method (like the `EArray`)")
self.append_mode = append_mode
| (self, out, append_mode=False) |
728,395 | tables.expression | set_output_range | Define a range for user-provided output object.
The output object will only be modified in the range specified by the
start, stop and step parameters in the main dimension of output (or the
leading one, if the object does not have the concept of main dimension,
like a NumPy container).
| def set_output_range(self, start=None, stop=None, step=None):
"""Define a range for user-provided output object.
The output object will only be modified in the range specified by the
start, stop and step parameters in the main dimension of output (or the
leading one, if the object does not have the concept of main dimension,
like a NumPy container).
"""
if self.out is None:
raise IndexError(
"You need to pass an output object to `setOut()` first")
self.o_start = start
self.o_stop = stop
self.o_step = step
| (self, start=None, stop=None, step=None) |
728,396 | tables.file | File | The in-memory representation of a PyTables file.
An instance of this class is returned when a PyTables file is
opened with the :func:`tables.open_file` function. It offers methods
to manipulate (create, rename, delete...) nodes and handle their
attributes, as well as methods to traverse the object tree.
The *user entry point* to the object tree attached to the HDF5 file
is represented in the root_uep attribute.
Other attributes are available.
File objects support an *Undo/Redo mechanism* which can be enabled
with the :meth:`File.enable_undo` method. Once the Undo/Redo
mechanism is enabled, explicit *marks* (with an optional unique
name) can be set on the state of the database using the
:meth:`File.mark`
method. There are two implicit marks which are always available:
the initial mark (0) and the final mark (-1). Both the identifier
of a mark and its name can be used in *undo* and *redo* operations.
Hierarchy manipulation operations (node creation, movement and
removal) and attribute handling operations (setting and deleting)
made after a mark can be undone by using the :meth:`File.undo`
method, which returns the database to the state of a past mark.
If undo() is not followed by operations that modify the hierarchy
or attributes, the :meth:`File.redo` method can be used to return
the database to the state of a future mark. Else, future states of
the database are forgotten.
Note that data handling operations can not be undone nor redone by
now. Also, hierarchy manipulation operations on nodes that do not
support the Undo/Redo mechanism issue an UndoRedoWarning *before*
changing the database.
The Undo/Redo mechanism is persistent between sessions and can
only be disabled by calling the :meth:`File.disable_undo` method.
File objects can also act as context managers when using the with
statement introduced in Python 2.5. When exiting a context, the
file is automatically closed.
Parameters
----------
filename : str
The name of the file (supports environment variable expansion).
It is suggested that file names have any of the .h5, .hdf or
.hdf5 extensions, although this is not mandatory.
mode : str
The mode to open the file. It can be one of the
following:
* *'r'*: Read-only; no data can be modified.
* *'w'*: Write; a new file is created (an existing file
with the same name would be deleted).
* *'a'*: Append; an existing file is opened for reading
and writing, and if the file does not exist it is created.
* *'r+'*: It is similar to 'a', but the file must already
exist.
title : str
If the file is to be created, a TITLE string attribute will be
set on the root group with the given value. Otherwise, the
title will be read from disk, and this will not have any effect.
root_uep : str
The root User Entry Point. This is a group in the HDF5 hierarchy
which will be taken as the starting point to create the object
tree. It can be whatever existing group in the file, named by
its HDF5 path. If it does not exist, an HDF5ExtError is issued.
Use this if you do not want to build the *entire* object tree,
but rather only a *subtree* of it.
.. versionchanged:: 3.0
The *rootUEP* parameter has been renamed into *root_uep*.
filters : Filters
An instance of the Filters (see :ref:`FiltersClassDescr`) class that
provides information about the desired I/O filters applicable to the
leaves that hang directly from the *root group*, unless other filter
properties are specified for these leaves. Besides, if you do not
specify filter properties for child groups, they will inherit these
ones, which will in turn propagate to child nodes.
Notes
-----
In addition, it recognizes the (lowercase) names of parameters
present in :file:`tables/parameters.py` as additional keyword
arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
.. rubric:: File attributes
.. attribute:: filename
The name of the opened file.
.. attribute:: format_version
The PyTables version number of this file.
.. attribute:: isopen
True if the underlying file is open, false otherwise.
.. attribute:: mode
The mode in which the file was opened.
.. attribute:: root
The *root* of the object tree hierarchy (a Group instance).
.. attribute:: root_uep
The UEP (user entry point) group name in the file (see
the :func:`open_file` function).
.. versionchanged:: 3.0
The *rootUEP* attribute has been renamed into *root_uep*.
| class File(hdf5extension.File):
"""The in-memory representation of a PyTables file.
An instance of this class is returned when a PyTables file is
opened with the :func:`tables.open_file` function. It offers methods
to manipulate (create, rename, delete...) nodes and handle their
attributes, as well as methods to traverse the object tree.
The *user entry point* to the object tree attached to the HDF5 file
is represented in the root_uep attribute.
Other attributes are available.
File objects support an *Undo/Redo mechanism* which can be enabled
with the :meth:`File.enable_undo` method. Once the Undo/Redo
mechanism is enabled, explicit *marks* (with an optional unique
name) can be set on the state of the database using the
:meth:`File.mark`
method. There are two implicit marks which are always available:
the initial mark (0) and the final mark (-1). Both the identifier
of a mark and its name can be used in *undo* and *redo* operations.
Hierarchy manipulation operations (node creation, movement and
removal) and attribute handling operations (setting and deleting)
made after a mark can be undone by using the :meth:`File.undo`
method, which returns the database to the state of a past mark.
If undo() is not followed by operations that modify the hierarchy
or attributes, the :meth:`File.redo` method can be used to return
the database to the state of a future mark. Else, future states of
the database are forgotten.
Note that data handling operations can not be undone nor redone by
now. Also, hierarchy manipulation operations on nodes that do not
support the Undo/Redo mechanism issue an UndoRedoWarning *before*
changing the database.
The Undo/Redo mechanism is persistent between sessions and can
only be disabled by calling the :meth:`File.disable_undo` method.
File objects can also act as context managers when using the with
statement introduced in Python 2.5. When exiting a context, the
file is automatically closed.
Parameters
----------
filename : str
The name of the file (supports environment variable expansion).
It is suggested that file names have any of the .h5, .hdf or
.hdf5 extensions, although this is not mandatory.
mode : str
The mode to open the file. It can be one of the
following:
* *'r'*: Read-only; no data can be modified.
* *'w'*: Write; a new file is created (an existing file
with the same name would be deleted).
* *'a'*: Append; an existing file is opened for reading
and writing, and if the file does not exist it is created.
* *'r+'*: It is similar to 'a', but the file must already
exist.
title : str
If the file is to be created, a TITLE string attribute will be
set on the root group with the given value. Otherwise, the
title will be read from disk, and this will not have any effect.
root_uep : str
The root User Entry Point. This is a group in the HDF5 hierarchy
which will be taken as the starting point to create the object
tree. It can be whatever existing group in the file, named by
its HDF5 path. If it does not exist, an HDF5ExtError is issued.
Use this if you do not want to build the *entire* object tree,
but rather only a *subtree* of it.
.. versionchanged:: 3.0
The *rootUEP* parameter has been renamed into *root_uep*.
filters : Filters
An instance of the Filters (see :ref:`FiltersClassDescr`) class that
provides information about the desired I/O filters applicable to the
leaves that hang directly from the *root group*, unless other filter
properties are specified for these leaves. Besides, if you do not
specify filter properties for child groups, they will inherit these
ones, which will in turn propagate to child nodes.
Notes
-----
In addition, it recognizes the (lowercase) names of parameters
present in :file:`tables/parameters.py` as additional keyword
arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
.. rubric:: File attributes
.. attribute:: filename
The name of the opened file.
.. attribute:: format_version
The PyTables version number of this file.
.. attribute:: isopen
True if the underlying file is open, false otherwise.
.. attribute:: mode
The mode in which the file was opened.
.. attribute:: root
The *root* of the object tree hierarchy (a Group instance).
.. attribute:: root_uep
The UEP (user entry point) group name in the file (see
the :func:`open_file` function).
.. versionchanged:: 3.0
The *rootUEP* attribute has been renamed into *root_uep*.
"""
# The top level kinds. Group must go first!
_node_kinds = ('Group', 'Leaf', 'Link', 'Unknown')
@property
def title(self):
"""The title of the root group in the file."""
return self.root._v_title
@title.setter
def title(self, title):
self.root._v_title = title
@title.deleter
def title(self):
del self.root._v_title
@property
def filters(self):
"""Default filter properties for the root group
(see :ref:`FiltersClassDescr`)."""
return self.root._v_filters
@filters.setter
def filters(self, filters):
self.root._v_filters = filters
@filters.deleter
def filters(self):
del self.root._v_filters
def __init__(self, filename, mode="r", title="",
root_uep="/", filters=None, **kwargs):
self.filename = os.fspath(filename)
"""The name of the opened file."""
self.mode = mode
"""The mode in which the file was opened."""
if mode not in ('r', 'r+', 'a', 'w'):
raise ValueError("invalid mode string ``%s``. Allowed modes are: "
"'r', 'r+', 'a' and 'w'" % mode)
# Get all the parameters in parameter file(s)
params = {k: v for k, v in parameters.__dict__.items()
if k.isupper() and not k.startswith('_')}
# Update them with possible keyword arguments
if [k for k in kwargs if k.isupper()]:
warnings.warn("The use of uppercase keyword parameters is "
"deprecated", DeprecationWarning)
kwargs = {k.upper(): v for k, v in kwargs.items()}
params.update(kwargs)
# If MAX_ * _THREADS is not set yet, set it to the number of cores
# on this machine.
if params['MAX_NUMEXPR_THREADS'] is None:
params['MAX_NUMEXPR_THREADS'] = detect_number_of_cores()
if params['MAX_BLOSC_THREADS'] is None:
params['MAX_BLOSC_THREADS'] = detect_number_of_cores()
self.params = params
# Now, it is time to initialize the File extension
self._g_new(filename, mode, **params)
# Check filters and set PyTables format version for new files.
new = self._v_new
if new:
_checkfilters(filters)
self.format_version = format_version
"""The PyTables version number of this file."""
# The node manager must be initialized before the root group
# initialization but the node_factory attribute is set onl later
# because it is a bount method of the root grop itself.
node_cache_slots = params['NODE_CACHE_SLOTS']
self._node_manager = NodeManager(nslots=node_cache_slots)
# For the moment Undo/Redo is not enabled.
self._undoEnabled = False
# Set the flag to indicate that the file has been opened.
# It must be set before opening the root group
# to allow some basic access to its attributes.
self.isopen = 1
"""True if the underlying file os open, False otherwise."""
# Append the name of the file to the global dict of files opened.
_open_files.add(self)
# Set the number of times this file has been opened to 1
self._open_count = 1
# Get the root group from this file
self.root = root = self.__get_root_group(root_uep, title, filters)
"""The *root* of the object tree hierarchy (a Group instance)."""
# Complete the creation of the root node
# (see the explanation in ``RootGroup.__init__()``.
root._g_post_init_hook()
self._node_manager.node_factory = self.root._g_load_child
# Save the PyTables format version for this file.
if new:
if params['PYTABLES_SYS_ATTRS']:
root._v_attrs._g__setattr(
'PYTABLES_FORMAT_VERSION', format_version)
# If the file is old, and not opened in "read-only" mode,
# check if it has a transaction log
if not new and self.mode != "r" and _trans_group_path in self:
# It does. Enable the undo.
self.enable_undo()
# Set the maximum number of threads for Numexpr
ne.set_vml_num_threads(params['MAX_NUMEXPR_THREADS'])
def __get_root_group(self, root_uep, title, filters):
"""Returns a Group instance which will act as the root group in the
hierarchical tree.
If file is opened in "r", "r+" or "a" mode, and the file already
exists, this method dynamically builds a python object tree
emulating the structure present on file.
"""
self._v_objectid = self._get_file_id()
if root_uep in [None, ""]:
root_uep = "/"
# Save the User Entry Point in a variable class
self.root_uep = root_uep
new = self._v_new
# Get format version *before* getting the object tree
if not new:
# Firstly, get the PyTables format version for this file
self.format_version = utilsextension.read_f_attr(
self._v_objectid, 'PYTABLES_FORMAT_VERSION')
if not self.format_version:
# PYTABLES_FORMAT_VERSION attribute is not present
self.format_version = "unknown"
self._isPTFile = False
elif not isinstance(self.format_version, str):
# system attributes should always be str
self.format_version = self.format_version.decode('utf-8')
# Create new attributes for the root Group instance and
# create the object tree
return RootGroup(self, root_uep, title=title, new=new, filters=filters)
def _get_or_create_path(self, path, create):
"""Get the given `path` or create it if `create` is true.
If `create` is true, `path` *must* be a string path and not a
node, otherwise a `TypeError`will be raised.
"""
if create:
return self._create_path(path)
else:
return self.get_node(path)
def _create_path(self, path):
"""Create the groups needed for the `path` to exist.
The group associated with the given `path` is returned.
"""
if not hasattr(path, 'split'):
raise TypeError("when creating parents, parent must be a path")
if path == '/':
return self.root
parent, create_group = self.root, self.create_group
for pcomp in path.split('/')[1:]:
try:
child = parent._f_get_child(pcomp)
except NoSuchNodeError:
child = create_group(parent, pcomp)
parent = child
return parent
def create_group(self, where, name, title="", filters=None,
createparents=False):
"""Create a new group.
Parameters
----------
where : str or Group
The parent group from which the new group will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new group.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters applicable
to the leaves that hang directly from this new group (unless other
filter properties are specified for these leaves). Besides, if you
do not specify filter properties for its child groups, they will
inherit these ones.
createparents : bool
Whether to create the needed groups for the parent
path to exist (not done by default).
See Also
--------
Group : for more information on groups
"""
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
return Group(parentnode, name,
title=title, new=True, filters=filters)
def create_table(self, where, name, description=None, title="",
filters=None, expectedrows=10_000,
chunkshape=None, byteorder=None,
createparents=False, obj=None, track_times=True):
"""Create a new table with the given name in where location.
Parameters
----------
where : str or Group
The parent group from which the new table will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new table.
description : Description
This is an object that describes the table, i.e. how
many columns it has, their names, types, shapes, etc. It
can be any of the following:
* *A user-defined class*: This should inherit from the
IsDescription class (see :ref:`IsDescriptionClassDescr`)
where table fields are specified.
* *A dictionary*: For example, when you do not know
beforehand which structure your table will have).
* *A Description instance*: You can use the description
attribute of another table to create a new one with the
same structure.
* *A NumPy dtype*: A completely general structured NumPy
dtype.
* *A NumPy (structured) array instance*: The dtype of
this structured array will be used as the description.
Also, in case the array has actual data, it will be
injected into the newly created table.
.. versionchanged:: 3.0
The *description* parameter can be None (default) if *obj* is
provided. In that case the structure of the table is deduced
by *obj*.
title : str
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to be
applied during the life of this object.
expectedrows : int
A user estimate of the number of records that will be in the table.
If not provided, the default value is EXPECTED_ROWS_TABLE (see
:file:`tables/parameters.py`). If you plan to create a bigger
table try providing a guess; this will optimize the HDF5 B-Tree
creation and management process time and memory used.
chunkshape
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The rank of the chunkshape for tables must
be 1. If None, a sensible value is calculated based on the
expectedrows parameter (which is recommended).
byteorder : str
The byteorder of data *on disk*, specified as 'little' or 'big'.
If this is not specified, the byteorder is that of the platform,
unless you passed an array as the description, in which case
its byteorder will be used.
createparents : bool
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The recarray to be saved. Accepted types are NumPy record
arrays.
The *obj* parameter is optional and it can be provided in
alternative to the *description* parameter.
If both *obj* and *description* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Table : for more information on tables
"""
if obj is not None:
if not isinstance(obj, np.ndarray):
raise TypeError('invalid obj parameter %r' % obj)
descr, _ = descr_from_dtype(obj.dtype, ptparams=self.params)
if (description is not None and
dtype_from_descr(description,
ptparams=self.params) != obj.dtype):
raise TypeError('the desctiption parameter is not consistent '
'with the data type of the obj parameter')
elif description is None:
description = descr
parentnode = self._get_or_create_path(where, createparents)
if description is None:
raise ValueError("invalid table description: None")
_checkfilters(filters)
ptobj = Table(parentnode, name,
description=description, title=title,
filters=filters, expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
def create_array(self, where, name, obj=None, title="",
byteorder=None, createparents=False,
atom=None, shape=None, track_times=True):
"""Create a new array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported (use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0).
.. versionchanged:: 3.0
The *Object parameter has been renamed into *obj*.*
title : str
A description for this node (it sets the TITLE HDF5 attribute on
disk).
byteorder : str
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionadded:: 3.0
shape : tuple of ints
The shape of the stored array.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
Array : for more information on arrays
create_table : for more information on the rest of parameters
"""
if obj is None:
if atom is None or shape is None:
raise TypeError('if the obj parameter is not specified '
'(or None) then both the atom and shape '
'parametes should be provided.')
else:
# Making strides=(0,...) below is a trick to create the
# array fast and without memory consumption
dflt = np.zeros((), dtype=atom.dtype)
obj = np.ndarray(shape, dtype=atom.dtype, buffer=dflt,
strides=(0,)*len(shape))
else:
flavor = flavor_of(obj)
# use a temporary object because converting obj at this stage
# breaks some test. This is solution performs a double,
# potentially expensive, conversion of the obj parameter.
_obj = array_as_internal(obj, flavor)
if shape is not None and shape != _obj.shape:
raise TypeError('the shape parameter do not match obj.shape')
if atom is not None and atom.dtype != _obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
parentnode = self._get_or_create_path(where, createparents)
return Array(parentnode, name,
obj=obj, title=title, byteorder=byteorder,
track_times=track_times)
def create_carray(self, where, name, atom=None, shape=None, title="",
filters=None, chunkshape=None,
byteorder=None, createparents=False, obj=None,
track_times=True):
"""Create a new chunked array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters, optional
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
chunkshape : tuple or number or None, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be
the same as that of shape. If None, a sensible value is
calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little'
or 'big'. If this is not specified, the byteorder is that
of the given object.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
Also, objects that have some of their dimensions equal to 0
are not supported. Please use an EArray node (see
:ref:`EArrayClassDescr`) if you want to store an array with
one of its dimensions equal to 0.
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
CArray : for more information on chunked arrays
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
if shape is not None and shape != obj.shape:
raise TypeError('the shape parameter do not match obj.shape')
else:
shape = obj.shape
if atom is not None and atom.dtype != obj.dtype:
raise TypeError("the 'atom' parameter is not consistent with "
"the data type of the 'obj' parameter")
elif atom is None:
atom = Atom.from_dtype(obj.dtype)
else:
if atom is None and shape is None:
raise TypeError(
"the 'atom' and 'shape' parameters or the 'obj' parameter "
"must be provided")
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = CArray(parentnode, name,
atom=atom, shape=shape, title=title, filters=filters,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj[...] = obj
return ptobj
def create_earray(self, where, name, atom=None, shape=None, title="",
filters=None, expectedrows=1000,
chunkshape=None, byteorder=None,
createparents=False, obj=None, track_times=True):
"""Create a new enlargeable array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can be a
path string (for example '/level1/leaf5'), or a Group instance
(see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing the
*type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
shape : tuple
The shape of the new array. One (and only one) of the shape
dimensions *must* be 0. The dimension being 0 means that the
resulting EArray object can be extended along it. Multiple
enlargeable dimensions are not supported right now.
.. versionchanged:: 3.0
The *shape* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute on
disk).
expectedrows : int, optional
A user estimate about the number of row elements that will be added
to the growable dimension in the EArray node. If not provided, the
default value is EXPECTED_ROWS_EARRAY (see tables/parameters.py).
If you plan to create either a much smaller or a much bigger array
try providing a guess; this will optimize the HDF5 B-Tree creation
and management process time and the amount of memory used.
chunkshape : tuple, numeric, or None, optional
The shape of the data chunk to be read or written in a single HDF5
I/O operation. Filters are applied to those chunks of data. The
dimensionality of chunkshape must be the same as that of shape
(beware: no dimension should be 0 this time!). If None, a sensible
value is calculated based on the expectedrows parameter (which is
recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to exist
(not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* and *shape* parameters.
If both *obj* and *atom* and/or *shape* are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
EArray : for more information on enlargeable arrays
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
earray_shape = (0,) + obj.shape[1:]
if shape is not None and shape != earray_shape:
raise TypeError('the shape parameter is not compatible '
'with obj.shape.')
else:
shape = earray_shape
if atom is not None and atom.dtype != obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
elif atom is None:
atom = Atom.from_dtype(obj.dtype)
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = EArray(parentnode, name,
atom=atom, shape=shape, title=title,
filters=filters, expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
def create_vlarray(self, where, name, atom=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
createparents=False, obj=None,
track_times=True):
"""Create a new variable-length array.
Parameters
----------
where : str or Group
The parent group from which the new array will hang. It can
be a path string (for example '/level1/leaf5'), or a Group
instance (see :ref:`GroupClassDescr`).
name : str
The name of the new array
atom : Atom
An Atom (see :ref:`AtomClassDescr`) instance representing
the *type* and *shape* of the atomic objects to be saved.
.. versionchanged:: 3.0
The *atom* parameter can be None (default) if *obj* is
provided.
title : str, optional
A description for this node (it sets the TITLE HDF5 attribute
on disk).
filters : Filters
An instance of the Filters class (see :ref:`FiltersClassDescr`)
that provides information about the desired I/O filters to
be applied during the life of this object.
expectedrows : int, optional
A user estimate about the number of row elements that will
be added to the growable dimension in the `VLArray` node.
If not provided, the default value is ``EXPECTED_ROWS_VLARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `VLArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
.. versionadded:: 3.0
chunkshape : int or tuple of int, optional
The shape of the data chunk to be read or written in a
single HDF5 I/O operation. Filters are applied to those
chunks of data. The dimensionality of chunkshape must be 1.
If None, a sensible value is calculated (which is recommended).
byteorder : str, optional
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
createparents : bool, optional
Whether to create the needed groups for the parent path to
exist (not done by default).
obj : python object
The array or scalar to be saved. Accepted types are NumPy
arrays and scalars, as well as native Python sequences and
scalars, provided that values are regular (i.e. they are
not like ``[[1,2],2]``) and homogeneous (i.e. all the
elements are of the same type).
The *obj* parameter is optional and it can be provided in
alternative to the *atom* parameter.
If both *obj* and *atom* and are provided they must
be consistent with each other.
.. versionadded:: 3.0
track_times
Whether time data associated with the leaf are recorded (object
access time, raw data modification time, metadata change time,
object birth time); default True. Semantics of these times
depend on their implementation in the HDF5 library: refer to
documentation of the H5O_info_t data structure. As of HDF5
1.8.15, only ctime (metadata change time) is implemented.
.. versionadded:: 3.4.3
See Also
--------
VLArray : for more informationon variable-length arrays
.. versionchanged:: 3.0
The *expectedsizeinMB* parameter has been replaced by
*expectedrows*.
"""
if obj is not None:
flavor = flavor_of(obj)
obj = array_as_internal(obj, flavor)
if atom is not None and atom.dtype != obj.dtype:
raise TypeError('the atom parameter is not consistent with '
'the data type of the obj parameter')
if atom is None:
atom = Atom.from_dtype(obj.dtype)
elif atom is None:
raise ValueError('atom parameter cannot be None')
parentnode = self._get_or_create_path(where, createparents)
_checkfilters(filters)
ptobj = VLArray(parentnode, name,
atom=atom, title=title, filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape, byteorder=byteorder,
track_times=track_times)
if obj is not None:
ptobj.append(obj)
return ptobj
def create_hard_link(self, where, name, target, createparents=False):
"""Create a hard link.
Create a hard link to a `target` node with the given `name` in
`where` location. `target` can be a node object or a path
string. If `createparents` is true, the intermediate groups
required for reaching `where` are created (the default is not
doing so).
The returned node is a regular `Group` or `Leaf` instance.
"""
targetnode = self.get_node(target)
parentnode = self._get_or_create_path(where, createparents)
linkextension._g_create_hard_link(parentnode, name, targetnode)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
# Return the target node
return self.get_node(parentnode, name)
def create_soft_link(self, where, name, target, createparents=False):
"""Create a soft link (aka symbolic link) to a `target` node.
Create a soft link (aka symbolic link) to a `target` nodewith
the given `name` in `where` location. `target` can be a node
object or a path string. If `createparents` is true, the
intermediate groups required for reaching `where` are created.
(the default is not doing so).
The returned node is a SoftLink instance. See the SoftLink
class (in :ref:`SoftLinkClassDescr`) for more information on
soft links.
"""
if not isinstance(target, str):
if hasattr(target, '_v_pathname'): # quacks like a Node
target = target._v_pathname
else:
raise ValueError(
"`target` has to be a string or a node object")
parentnode = self._get_or_create_path(where, createparents)
slink = SoftLink(parentnode, name, target)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
return slink
def create_external_link(self, where, name, target, createparents=False):
"""Create an external link.
Create an external link to a *target* node with the given *name*
in *where* location. *target* can be a node object in another
file or a path string in the form 'file:/path/to/node'. If
*createparents* is true, the intermediate groups required for
reaching *where* are created (the default is not doing so).
The returned node is an :class:`ExternalLink` instance.
"""
if not isinstance(target, str):
if hasattr(target, '_v_pathname'): # quacks like a Node
target = target._v_file.filename + ':' + target._v_pathname
else:
raise ValueError(
"`target` has to be a string or a node object")
elif target.find(':/') == -1:
raise ValueError(
"`target` must expressed as 'file:/path/to/node'")
parentnode = self._get_or_create_path(where, createparents)
elink = ExternalLink(parentnode, name, target)
# Refresh children names in link's parent node
parentnode._g_add_children_names()
return elink
def _get_node(self, nodepath):
# The root node is always at hand.
if nodepath == '/':
return self.root
node = self._node_manager.get_node(nodepath)
assert node is not None, "unable to instantiate node ``%s``" % nodepath
return node
def get_node(self, where, name=None, classname=None):
"""Get the node under where with the given name.
Parameters
----------
where : str or Node
This can be a path string leading to a node or a Node instance (see
:ref:`NodeClassDescr`). If no name is specified, that node is
returned.
.. note::
If where is a Node instance from a different file than the one
on which this function is called, the returned node will also
be from that other file.
name : str, optional
If a name is specified, this must be a string with the name of
a node under where. In this case the where argument can only
lead to a Group (see :ref:`GroupClassDescr`) instance (else a
TypeError is raised). The node called name under the group
where is returned.
classname : str, optional
If the classname argument is specified, it must be the name of
a class derived from Node (e.g. Table). If the node is found but it
is not an instance of that class, a NoSuchNodeError is also raised.
Notes
-----
If the node to be returned does not exist, a NoSuchNodeError is
raised. Please note that hidden nodes are also considered.
"""
self._check_open()
if isinstance(where, Node):
where._g_check_open()
basepath = where._v_pathname
nodepath = join_path(basepath, name or '') or '/'
node = where._v_file._get_node(nodepath)
elif isinstance(where, (str, np.str_)):
if not where.startswith('/'):
raise NameError("``where`` must start with a slash ('/')")
basepath = where
nodepath = join_path(basepath, name or '') or '/'
node = self._get_node(nodepath)
else:
raise TypeError(
f"``where`` must be a string or a node: {where!r}")
# Finally, check whether the desired node is an instance
# of the expected class.
if classname:
class_ = get_class_by_name(classname)
if not isinstance(node, class_):
npathname = node._v_pathname
nclassname = node.__class__.__name__
# This error message is right since it can never be shown
# for ``classname in [None, 'Node']``.
raise NoSuchNodeError(
"could not find a ``%s`` node at ``%s``; "
"instead, a ``%s`` node has been found there"
% (classname, npathname, nclassname))
return node
def is_visible_node(self, path):
"""Is the node under `path` visible?
If the node does not exist, a NoSuchNodeError is raised.
"""
# ``util.isvisiblepath()`` is still recommended for internal use.
return self.get_node(path)._f_isvisible()
def rename_node(self, where, newname, name=None, overwrite=False):
"""Change the name of the node specified by where and name to newname.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newname : str
The new name to be assigned to the node (a string).
overwrite : bool
Whether to recursively remove a node with the same
newname if it already exists (not done by default).
"""
obj = self.get_node(where, name=name)
obj._f_rename(newname, overwrite)
def move_node(self, where, newparent=None, newname=None, name=None,
overwrite=False, createparents=False):
"""Move the node specified by where and name to newparent/newname.
Parameters
----------
where, name : path
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
newparent
The destination group the node will be moved into (a
path name or a Group instance). If it is
not specified or None, the current parent
group is chosen as the new parent.
newname
The new name to be assigned to the node in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
Notes
-----
The other arguments work as in :meth:`Node._f_move`.
"""
obj = self.get_node(where, name=name)
obj._f_move(newparent, newname, overwrite, createparents)
def copy_node(self, where, newparent=None, newname=None, name=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy the node specified by where and name to newparent/newname.
Parameters
----------
where : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
newparent : str or Group
The destination group that the node will be copied
into (a path name or a Group
instance). If not specified or None, the
current parent group is chosen as the new parent.
newname : str
The name to be assigned to the new copy in its
destination (a string). If it is not specified or
None, the current name is chosen as the
new name.
name : str
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted
upon.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy`
for a description of those arguments.
Returns
-------
node : Node
The newly created copy of the source node (i.e. the destination
node). See :meth:`.Node._f_copy` for further details on the
semantics of copying nodes.
"""
obj = self.get_node(where, name=name)
if obj._v_depth == 0 and newparent and not newname:
npobj = self.get_node(newparent)
if obj._v_file is not npobj._v_file:
# Special case for copying file1:/ --> file2:/path
self.root._f_copy_children(npobj, overwrite=overwrite,
recursive=recursive, **kwargs)
return npobj
else:
raise OSError(
"You cannot copy a root group over the same file")
return obj._f_copy(newparent, newname,
overwrite, recursive, createparents, **kwargs)
def remove_node(self, where, name=None, recursive=False):
"""Remove the object node *name* under *where* location.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
recursive : bool
If not supplied or false, the node will be removed
only if it has no children; if it does, a
NodeError will be raised. If supplied
with a true value, the node and all its descendants will be
completely removed.
"""
obj = self.get_node(where, name=name)
obj._f_remove(recursive)
def get_node_attr(self, where, attrname, name=None):
"""Get a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to retrieve. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
return obj._f_getattr(attrname)
def set_node_attr(self, where, attrname, attrvalue, name=None):
"""Set a PyTables attribute for the given node.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
attrname
The name of the attribute to set.
attrvalue
The value of the attribute to set. Any kind of Python
object (like strings, ints, floats, lists, tuples, dicts,
small NumPy objects ...) can be stored as an attribute.
However, if necessary, pickle is automatically used so as
to serialize objects that you might want to save.
See the :class:`AttributeSet` class for details.
Notes
-----
If the node already has a large number of attributes, a
PerformanceWarning is issued.
"""
obj = self.get_node(where, name=name)
obj._f_setattr(attrname, attrvalue)
def del_node_attr(self, where, attrname, name=None):
"""Delete a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to delete. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
obj._f_delattr(attrname)
def copy_node_attrs(self, where, dstnode, name=None):
"""Copy PyTables attributes from one node to another.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
dstnode
The destination node where the attributes will be copied to. It can
be a path string or a Node instance (see :ref:`NodeClassDescr`).
"""
srcobject = self.get_node(where, name=name)
dstobject = self.get_node(dstnode)
srcobject._v_attrs._f_copy(dstobject)
def copy_children(self, srcgroup, dstgroup,
overwrite=False, recursive=False,
createparents=False, **kwargs):
"""Copy the children of a group into another group.
Parameters
----------
srcgroup : str
The group to copy from.
dstgroup : str
The destination group.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs : dict
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy_children`
for a description of those arguments.
"""
srcgroup = self.get_node(srcgroup) # Does the source node exist?
self._check_group(srcgroup) # Is it a group?
srcgroup._f_copy_children(
dstgroup, overwrite, recursive, createparents, **kwargs)
def copy_file(self, dstfilename, overwrite=False, **kwargs):
"""Copy the contents of this file to dstfilename.
Parameters
----------
dstfilename : str
A path string indicating the name of the destination file. If
it already exists, the copy will fail with an IOError, unless
the overwrite argument is true.
overwrite : bool, optional
If true, the destination file will be overwritten if it already
exists. In this case, the destination file must be closed, or
errors will occur. Defaults to False.
kwargs
Additional keyword arguments discussed below.
Notes
-----
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be
sub-sampled, stats may be collected, etc. Arguments unknown to
nodes are simply ignored. Check the documentation for copying
operations of nodes to see which options they support.
In addition, it recognizes the names of parameters present in
:file:`tables/parameters.py` as additional keyword arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
Copying a file usually has the beneficial side effect of
creating a more compact and cleaner version of the original
file.
"""
self._check_open()
# Check that we are not treading our own shoes
if Path(self.filename).resolve() == Path(dstfilename).resolve():
raise OSError("You cannot copy a file over itself")
# Compute default arguments.
# These are *not* passed on.
filters = kwargs.pop('filters', None)
if filters is None:
# By checking the HDF5 attribute, we avoid setting filters
# in the destination file if not explicitly set in the
# source file. Just by assigning ``self.filters`` we would
# not be able to tell.
filters = getattr(self.root._v_attrs, 'FILTERS', None)
copyuserattrs = kwargs.get('copyuserattrs', True)
title = kwargs.pop('title', self.title)
if Path(dstfilename).is_file() and not overwrite:
raise OSError(
f"file ``{dstfilename}`` already exists; you may want to "
f"use the ``overwrite`` argument"
)
# Create destination file, overwriting it.
dstfileh = open_file(
dstfilename, mode="w", title=title, filters=filters, **kwargs)
try:
# Maybe copy the user attributes of the root group.
if copyuserattrs:
self.root._v_attrs._f_copy(dstfileh.root)
# Copy the rest of the hierarchy.
self.root._f_copy_children(dstfileh.root, recursive=True, **kwargs)
finally:
dstfileh.close()
def list_nodes(self, where, classname=None):
"""Return a *list* with children nodes hanging from where.
This is a list-returning version of :meth:`File.iter_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_list_nodes(classname)
def iter_nodes(self, where, classname=None):
"""Iterate over children nodes hanging from where.
Parameters
----------
where
This argument works as in :meth:`File.get_node`, referencing the
node to be acted upon.
classname
If the name of a class derived from
Node (see :ref:`NodeClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
The returned nodes are alphanumerically sorted by their name.
This is an iterator version of :meth:`File.list_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_iter_nodes(classname)
def __contains__(self, path):
"""Is there a node with that path?
Returns True if the file has a node with the given path (a
string), False otherwise.
"""
try:
self.get_node(path)
except NoSuchNodeError:
return False
else:
return True
def __iter__(self):
"""Recursively iterate over the nodes in the tree.
This is equivalent to calling :meth:`File.walk_nodes` with no
arguments.
Examples
--------
::
# Recursively list all the nodes in the object tree.
h5file = tables.open_file('vlarray1.h5')
print("All nodes in the object tree:")
for node in h5file:
print(node)
"""
return self.walk_nodes('/')
def walk_nodes(self, where="/", classname=None):
"""Recursively iterate over nodes hanging from where.
Parameters
----------
where : str or Group, optional
If supplied, the iteration starts from (and includes)
this group. It can be a path string or a
Group instance (see :ref:`GroupClassDescr`).
classname
If the name of a class derived from
Node (see :ref:`GroupClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
This version iterates over the leaves in the same group in order
to avoid having a list referencing to them and thus, preventing
the LRU cache to remove them after their use.
Examples
--------
::
# Recursively print all the nodes hanging from '/detector'.
print("Nodes hanging from group '/detector':")
for node in h5file.walk_nodes('/detector', classname='EArray'):
print(node)
"""
class_ = get_class_by_name(classname)
if class_ is Group: # only groups
yield from self.walk_groups(where)
elif class_ is Node: # all nodes
yield self.get_node(where)
for group in self.walk_groups(where):
yield from self.iter_nodes(group)
else: # only nodes of the named type
for group in self.walk_groups(where):
yield from self.iter_nodes(group, classname)
def walk_groups(self, where="/"):
"""Recursively iterate over groups (not leaves) hanging from where.
The where group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure. If where is not supplied, the root
group is used.
The where argument can be a path string
or a Group instance (see :ref:`GroupClassDescr`).
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_walk_groups()
def _check_open(self):
"""Check the state of the file.
If the file is closed, a `ClosedFileError` is raised.
"""
if not self.isopen:
raise ClosedFileError("the file object is closed")
def _iswritable(self):
"""Is this file writable?"""
return self.mode in ('w', 'a', 'r+')
def _check_writable(self):
"""Check whether the file is writable.
If the file is not writable, a `FileModeError` is raised.
"""
if not self._iswritable():
raise FileModeError("the file is not writable")
def _check_group(self, node):
# `node` must already be a node.
if not isinstance(node, Group):
raise TypeError(f"node ``{node._v_pathname}`` is not a group")
def is_undo_enabled(self):
"""Is the Undo/Redo mechanism enabled?
Returns True if the Undo/Redo mechanism has been enabled for
this file, False otherwise. Please note that this mechanism is
persistent, so a newly opened PyTables file may already have
Undo/Redo support enabled.
"""
self._check_open()
return self._undoEnabled
def _check_undo_enabled(self):
if not self._undoEnabled:
raise UndoRedoError("Undo/Redo feature is currently disabled!")
def _create_transaction_group(self):
tgroup = TransactionGroupG(
self.root, _trans_group_name,
"Transaction information container", new=True)
# The format of the transaction container.
tgroup._v_attrs._g__setattr('FORMATVERSION', _trans_version)
return tgroup
def _create_transaction(self, troot, tid):
return TransactionG(
troot, _trans_name % tid,
"Transaction number %d" % tid, new=True)
def _create_mark(self, trans, mid):
return MarkG(
trans, _markName % mid,
"Mark number %d" % mid, new=True)
def enable_undo(self, filters=Filters(complevel=1)):
"""Enable the Undo/Redo mechanism.
This operation prepares the database for undoing and redoing
modifications in the node hierarchy. This
allows :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and
other methods to be called.
The filters argument, when specified,
must be an instance of class Filters (see :ref:`FiltersClassDescr`) and
is meant for setting the compression values for the action log. The
default is having compression enabled, as the gains in terms of
space can be considerable. You may want to disable compression if
you want maximum speed for Undo/Redo operations.
Calling this method when the Undo/Redo mechanism is already
enabled raises an UndoRedoError.
"""
maxundo = self.params['MAX_UNDO_PATH_LENGTH']
class ActionLog(NotLoggedMixin, Table):
pass
class ActionLogDesc(IsDescription):
opcode = UInt8Col(pos=0)
arg1 = StringCol(maxundo, pos=1, dflt=b"")
arg2 = StringCol(maxundo, pos=2, dflt=b"")
self._check_open()
# Enabling several times is not allowed to avoid the user having
# the illusion that a new implicit mark has been created
# when calling enable_undo for the second time.
if self.is_undo_enabled():
raise UndoRedoError("Undo/Redo feature is already enabled!")
self._markers = {}
self._seqmarkers = []
self._nmarks = 0
self._curtransaction = 0
self._curmark = -1 # No marks yet
# Get the Group for keeping user actions
try:
tgroup = self.get_node(_trans_group_path)
except NodeError:
# The file is going to be changed.
self._check_writable()
# A transaction log group does not exist. Create it
tgroup = self._create_transaction_group()
# Create a transaction.
self._trans = self._create_transaction(
tgroup, self._curtransaction)
# Create an action log
self._actionlog = ActionLog(
tgroup, _action_log_name, ActionLogDesc, "Action log",
filters=filters)
# Create an implicit mark
self._actionlog.append([(_op_to_code["MARK"], str(0), '')])
self._nmarks += 1
self._seqmarkers.append(0) # current action is 0
# Create a group for mark 0
self._create_mark(self._trans, 0)
# Initialize the marker pointer
self._curmark = int(self._nmarks - 1)
# Initialize the action pointer
self._curaction = self._actionlog.nrows - 1
else:
# The group seems to exist already
# Get the default transaction
self._trans = tgroup._f_get_child(
_trans_name % self._curtransaction)
# Open the action log and go to the end of it
self._actionlog = tgroup.actionlog
for row in self._actionlog:
if row["opcode"] == _op_to_code["MARK"]:
name = row["arg2"].decode('utf-8')
self._markers[name] = self._nmarks
self._seqmarkers.append(row.nrow)
self._nmarks += 1
# Get the current mark and current action
self._curmark = int(self._actionlog.attrs.CURMARK)
self._curaction = self._actionlog.attrs.CURACTION
# The Undo/Redo mechanism has been enabled.
self._undoEnabled = True
def disable_undo(self):
"""Disable the Undo/Redo mechanism.
Disabling the Undo/Redo mechanism leaves the database in the
current state and forgets past and future database states. This
makes :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and other
methods fail with an UndoRedoError.
Calling this method when the Undo/Redo mechanism is already
disabled raises an UndoRedoError.
"""
self._check_open()
if not self.is_undo_enabled():
raise UndoRedoError("Undo/Redo feature is already disabled!")
# The file is going to be changed.
self._check_writable()
del self._markers
del self._seqmarkers
del self._curmark
del self._curaction
del self._curtransaction
del self._nmarks
del self._actionlog
# Recursively delete the transaction group
tnode = self.get_node(_trans_group_path)
tnode._g_remove(recursive=1)
# The Undo/Redo mechanism has been disabled.
self._undoEnabled = False
def mark(self, name=None):
"""Mark the state of the database.
Creates a mark for the current state of the database. A unique (and
immutable) identifier for the mark is returned. An optional name (a
string) can be assigned to the mark. Both the identifier of a mark and
its name can be used in :meth:`File.undo` and :meth:`File.redo`
operations. When the name has already been used for another mark,
an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
if name is None:
name = ''
else:
if not isinstance(name, str):
raise TypeError("Only strings are allowed as mark names. "
"You passed object: '%s'" % name)
if name in self._markers:
raise UndoRedoError("Name '%s' is already used as a marker "
"name. Try another one." % name)
# The file is going to be changed.
self._check_writable()
self._markers[name] = self._curmark + 1
# Create an explicit mark
# Insert the mark in the action log
self._log("MARK", str(self._curmark + 1), name)
self._curmark += 1
self._nmarks = self._curmark + 1
self._seqmarkers.append(self._curaction)
# Create a group for the current mark
self._create_mark(self._trans, self._curmark)
return self._curmark
def _log(self, action, *args):
"""Log an action.
The `action` must be an all-uppercase string identifying it.
Arguments must also be strings.
This method should be called once the action has been completed.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an `UndoRedoError` is raised.
"""
assert self.is_undo_enabled()
maxundo = self.params['MAX_UNDO_PATH_LENGTH']
# Check whether we are at the end of the action log or not
if self._curaction != self._actionlog.nrows - 1:
# We are not, so delete the trailing actions
self._actionlog.remove_rows(self._curaction + 1,
self._actionlog.nrows)
# Reset the current marker group
mnode = self.get_node(_markPath % (self._curtransaction,
self._curmark))
mnode._g_reset()
# Delete the marker groups with backup objects
for mark in range(self._curmark + 1, self._nmarks):
mnode = self.get_node(_markPath % (self._curtransaction, mark))
mnode._g_remove(recursive=1)
# Update the new number of marks
self._nmarks = self._curmark + 1
self._seqmarkers = self._seqmarkers[:self._nmarks]
if action not in _op_to_code: # INTERNAL
raise UndoRedoError("Action ``%s`` not in ``_op_to_code`` "
"dictionary: %r" % (action, _op_to_code))
arg1 = ""
arg2 = ""
if len(args) <= 1:
arg1 = args[0]
elif len(args) <= 2:
arg1 = args[0]
arg2 = args[1]
else: # INTERNAL
raise UndoRedoError("Too many parameters for action log: "
"%r").with_traceback(args)
if (len(arg1) > maxundo
or len(arg2) > maxundo): # INTERNAL
raise UndoRedoError("Parameter arg1 or arg2 is too long: "
"(%r, %r)" % (arg1, arg2))
# print("Logging-->", (action, arg1, arg2))
self._actionlog.append([(_op_to_code[action],
arg1.encode('utf-8'),
arg2.encode('utf-8'))])
self._curaction += 1
def _get_mark_id(self, mark):
"""Get an integer markid from a mark sequence number or name."""
if isinstance(mark, int):
markid = mark
elif isinstance(mark, str):
if mark not in self._markers:
lmarkers = sorted(self._markers)
raise UndoRedoError("The mark that you have specified has not "
"been found in the internal marker list: "
"%r" % lmarkers)
markid = self._markers[mark]
else:
raise TypeError("Parameter mark can only be an integer or a "
"string, and you passed a type <%s>" % type(mark))
# print("markid, self._nmarks:", markid, self._nmarks)
return markid
def _get_final_action(self, markid):
"""Get the action to go.
It does not touch the self private attributes
"""
if markid > self._nmarks - 1:
# The required mark is beyond the end of the action log
# The final action is the last row
return self._actionlog.nrows
elif markid <= 0:
# The required mark is the first one
# return the first row
return 0
return self._seqmarkers[markid]
def _doundo(self, finalaction, direction):
"""Undo/Redo actions up to final action in the specificed direction."""
if direction < 0:
actionlog = \
self._actionlog[finalaction + 1:self._curaction + 1][::-1]
else:
actionlog = self._actionlog[self._curaction:finalaction]
# Uncomment this for debugging
# print("curaction, finalaction, direction", \
# self._curaction, finalaction, direction)
for i in range(len(actionlog)):
if actionlog['opcode'][i] != _op_to_code["MARK"]:
# undo/redo the action
if direction > 0:
# Uncomment this for debugging
# print("redo-->", \
# _code_to_op[actionlog['opcode'][i]],\
# actionlog['arg1'][i],\
# actionlog['arg2'][i])
undoredo.redo(self,
# _code_to_op[actionlog['opcode'][i]],
# The next is a workaround for python < 2.5
_code_to_op[int(actionlog['opcode'][i])],
actionlog['arg1'][i].decode('utf8'),
actionlog['arg2'][i].decode('utf8'))
else:
# Uncomment this for debugging
# print("undo-->", \
# _code_to_op[actionlog['opcode'][i]],\
# actionlog['arg1'][i].decode('utf8'),\
# actionlog['arg2'][i].decode('utf8'))
undoredo.undo(self,
# _code_to_op[actionlog['opcode'][i]],
# The next is a workaround for python < 2.5
_code_to_op[int(actionlog['opcode'][i])],
actionlog['arg1'][i].decode('utf8'),
actionlog['arg2'][i].decode('utf8'))
else:
if direction > 0:
self._curmark = int(actionlog['arg1'][i])
else:
self._curmark = int(actionlog['arg1'][i]) - 1
# Protection against negative marks
if self._curmark < 0:
self._curmark = 0
self._curaction += direction
def undo(self, mark=None):
"""Go to a past state of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used. If the mark is
omitted, the last created mark is used. If there are no past
marks, or the specified mark is not older than the current one, an
UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
"""
self._check_open()
self._check_undo_enabled()
# print("(pre)UNDO: (curaction, curmark) = (%s,%s)" % \
# (self._curaction, self._curmark))
if mark is None:
markid = self._curmark
# Correction if we are settled on top of a mark
opcode = self._actionlog.cols.opcode
if opcode[self._curaction] == _op_to_code["MARK"]:
markid -= 1
else:
# Get the mark ID number
markid = self._get_mark_id(mark)
# Get the final action ID to go
finalaction = self._get_final_action(markid)
if finalaction > self._curaction:
raise UndoRedoError("Mark ``%s`` is newer than the current mark. "
"Use `redo()` or `goto()` instead." % (mark,))
# The file is going to be changed.
self._check_writable()
# Try to reach this mark by unwinding actions in the log
self._doundo(finalaction - 1, -1)
if self._curaction < self._actionlog.nrows - 1:
self._curaction += 1
self._curmark = int(self._actionlog.cols.arg1[self._curaction])
# print("(post)UNDO: (curaction, curmark) = (%s,%s)" % \
# (self._curaction, self._curmark))
def redo(self, mark=None):
"""Go to a future state of the database.
Returns the database to the state associated with the specified
mark. Both the identifier of a mark and its name can be used.
If the `mark` is omitted, the next created mark is used. If
there are no future marks, or the specified mark is not newer
than the current one, an UndoRedoError is raised.
This method can only be called when the Undo/Redo mechanism has
been enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
# print("(pre)REDO: (curaction, curmark) = (%s, %s)" % \
# (self._curaction, self._curmark))
if self._curaction >= self._actionlog.nrows - 1:
# We are at the end of log, so no action
return
if mark is None:
mark = self._curmark + 1
elif mark == -1:
mark = int(self._nmarks) # Go beyond the mark bounds up to the end
# Get the mark ID number
markid = self._get_mark_id(mark)
finalaction = self._get_final_action(markid)
if finalaction < self._curaction + 1:
raise UndoRedoError("Mark ``%s`` is older than the current mark. "
"Use `redo()` or `goto()` instead." % (mark,))
# The file is going to be changed.
self._check_writable()
# Get the final action ID to go
self._curaction += 1
# Try to reach this mark by redoing the actions in the log
self._doundo(finalaction, 1)
# Increment the current mark only if we are not at the end of marks
if self._curmark < self._nmarks - 1:
self._curmark += 1
if self._curaction > self._actionlog.nrows - 1:
self._curaction = self._actionlog.nrows - 1
# print("(post)REDO: (curaction, curmark) = (%s,%s)" % \
# (self._curaction, self._curmark))
def goto(self, mark):
"""Go to a specific mark of the database.
Returns the database to the state associated with the specified mark.
Both the identifier of a mark and its name can be used.
This method can only be called when the Undo/Redo mechanism has been
enabled. Otherwise, an UndoRedoError is raised.
"""
self._check_open()
self._check_undo_enabled()
if mark == -1: # Special case
mark = self._nmarks # Go beyond the mark bounds up to the end
# Get the mark ID number
markid = self._get_mark_id(mark)
finalaction = self._get_final_action(markid)
if finalaction < self._curaction:
self.undo(mark)
else:
self.redo(mark)
def get_current_mark(self):
"""Get the identifier of the current mark.
Returns the identifier of the current mark. This can be used
to know the state of a database after an application crash, or to
get the identifier of the initial implicit mark after a call
to :meth:`File.enable_undo`.
This method can only be called when the Undo/Redo mechanism
has been enabled. Otherwise, an UndoRedoError
is raised.
"""
self._check_open()
self._check_undo_enabled()
return self._curmark
def _shadow_name(self):
"""Compute and return a shadow name.
Computes the current shadow name according to the current
transaction, mark and action. It returns a tuple with the
shadow parent node and the name of the shadow in it.
"""
parent = self.get_node(
_shadow_parent % (self._curtransaction, self._curmark))
name = _shadow_name % (self._curaction,)
return (parent, name)
def flush(self):
"""Flush all the alive leaves in the object tree."""
self._check_open()
# Flush the cache to disk
self._node_manager.flush_nodes()
self._flush_file(0) # 0 means local scope, 1 global (virtual) scope
def close(self):
"""Flush all the alive leaves in object tree and close the file."""
# If the file is already closed, return immediately
if not self.isopen:
return
# If this file has been opened more than once, decrease the
# counter and return
if self._open_count > 1:
self._open_count -= 1
return
filename = self.filename
if self._undoEnabled and self._iswritable():
# Save the current mark and current action
self._actionlog.attrs._g__setattr("CURMARK", self._curmark)
self._actionlog.attrs._g__setattr("CURACTION", self._curaction)
# Close all loaded nodes.
self.root._f_close()
self._node_manager.shutdown()
# Post-conditions
assert len(self._node_manager.cache) == 0, \
("cached nodes remain after closing: %s"
% list(self._node_manager.cache))
# No other nodes should have been revived.
assert len(self._node_manager.registry) == 0, \
("alive nodes remain after closing: %s"
% list(self._node_manager.registry))
# Close the file
self._close_file()
# After the objects are disconnected, destroy the
# object dictionary using the brute force ;-)
# This should help to the garbage collector
self.__dict__.clear()
# Set the flag to indicate that the file is closed
self.isopen = 0
# Restore the filename attribute that is used by _FileRegistry
self.filename = filename
# Delete the entry from he registry of opened files
_open_files.remove(self)
def __enter__(self):
"""Enter a context and return the same file."""
return self
def __exit__(self, *exc_info):
"""Exit a context and close the file."""
self.close()
return False # do not hide exceptions
def __str__(self):
"""Return a short string representation of the object tree.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> print(f)
tables/tests/Tables_lzo2.h5 (File) 'Table Benchmark'
Last modif.: '...'
Object Tree:
/ (RootGroup) 'Table Benchmark'
/tuple0 (Table(100,)lzo(1)) 'This is the table title'
/group0 (Group) ''
/group0/tuple1 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1 (Group) ''
/group0/group1/tuple2 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1/group2 (Group) ''
>>> f.close()
"""
if not self.isopen:
return "<closed File>"
# Print all the nodes (Group and Leaf objects) on object tree
try:
date = datetime.datetime.fromtimestamp(
Path(self.filename).stat().st_mtime, datetime.timezone.utc
).isoformat(timespec='seconds')
except OSError:
# in-memory file
date = "<in-memory file>"
lines = [f'{self.filename} (File) {self.title!r}',
f'Last modif.: {date!r}',
'Object Tree: ']
for group in self.walk_groups("/"):
lines.append(f'{group}')
for kind in self._node_kinds[1:]:
for node in self.list_nodes(group, kind):
lines.append(f'{node}')
return '\n'.join(lines) + '\n'
def __repr__(self):
"""Return a detailed string representation of the object tree."""
if not self.isopen:
return "<closed File>"
# Print all the nodes (Group and Leaf objects) on object tree
lines = [
f'File(filename={self.filename!s}, title={self.title!r}, '
f'mode={self.mode!r}, root_uep={self.root_uep!r}, '
f'filters={self.filters!r})']
for group in self.walk_groups("/"):
lines.append(f'{group}')
for kind in self._node_kinds[1:]:
for node in self.list_nodes(group, kind):
lines.append(f'{node!r}')
return '\n'.join(lines) + '\n'
def _update_node_locations(self, oldpath, newpath):
"""Update location information of nodes under `oldpath`.
This only affects *already loaded* nodes.
"""
oldprefix = oldpath + '/' # root node can not be renamed, anyway
oldprefix_len = len(oldprefix)
# Update alive and dead descendents.
for cache in [self._node_manager.cache, self._node_manager.registry]:
for nodepath in list(cache):
if nodepath.startswith(oldprefix) and nodepath != oldprefix:
nodesuffix = nodepath[oldprefix_len:]
newnodepath = join_path(newpath, nodesuffix)
newnodeppath = split_path(newnodepath)[0]
descendent_node = self._get_node(nodepath)
descendent_node._g_update_location(newnodeppath)
| (filename, mode='r', title='', root_uep='/', filters=None, **kwargs) |
728,397 | tables.file | __get_root_group | Returns a Group instance which will act as the root group in the
hierarchical tree.
If file is opened in "r", "r+" or "a" mode, and the file already
exists, this method dynamically builds a python object tree
emulating the structure present on file.
| def __get_root_group(self, root_uep, title, filters):
"""Returns a Group instance which will act as the root group in the
hierarchical tree.
If file is opened in "r", "r+" or "a" mode, and the file already
exists, this method dynamically builds a python object tree
emulating the structure present on file.
"""
self._v_objectid = self._get_file_id()
if root_uep in [None, ""]:
root_uep = "/"
# Save the User Entry Point in a variable class
self.root_uep = root_uep
new = self._v_new
# Get format version *before* getting the object tree
if not new:
# Firstly, get the PyTables format version for this file
self.format_version = utilsextension.read_f_attr(
self._v_objectid, 'PYTABLES_FORMAT_VERSION')
if not self.format_version:
# PYTABLES_FORMAT_VERSION attribute is not present
self.format_version = "unknown"
self._isPTFile = False
elif not isinstance(self.format_version, str):
# system attributes should always be str
self.format_version = self.format_version.decode('utf-8')
# Create new attributes for the root Group instance and
# create the object tree
return RootGroup(self, root_uep, title=title, new=new, filters=filters)
| (self, root_uep, title, filters) |
728,398 | tables.file | __contains__ | Is there a node with that path?
Returns True if the file has a node with the given path (a
string), False otherwise.
| def __contains__(self, path):
"""Is there a node with that path?
Returns True if the file has a node with the given path (a
string), False otherwise.
"""
try:
self.get_node(path)
except NoSuchNodeError:
return False
else:
return True
| (self, path) |
728,399 | tables.file | __enter__ | Enter a context and return the same file. | def __enter__(self):
"""Enter a context and return the same file."""
return self
| (self) |
728,400 | tables.file | __exit__ | Exit a context and close the file. | def __exit__(self, *exc_info):
"""Exit a context and close the file."""
self.close()
return False # do not hide exceptions
| (self, *exc_info) |
728,401 | tables.file | __init__ | null | def __init__(self, filename, mode="r", title="",
root_uep="/", filters=None, **kwargs):
self.filename = os.fspath(filename)
"""The name of the opened file."""
self.mode = mode
"""The mode in which the file was opened."""
if mode not in ('r', 'r+', 'a', 'w'):
raise ValueError("invalid mode string ``%s``. Allowed modes are: "
"'r', 'r+', 'a' and 'w'" % mode)
# Get all the parameters in parameter file(s)
params = {k: v for k, v in parameters.__dict__.items()
if k.isupper() and not k.startswith('_')}
# Update them with possible keyword arguments
if [k for k in kwargs if k.isupper()]:
warnings.warn("The use of uppercase keyword parameters is "
"deprecated", DeprecationWarning)
kwargs = {k.upper(): v for k, v in kwargs.items()}
params.update(kwargs)
# If MAX_ * _THREADS is not set yet, set it to the number of cores
# on this machine.
if params['MAX_NUMEXPR_THREADS'] is None:
params['MAX_NUMEXPR_THREADS'] = detect_number_of_cores()
if params['MAX_BLOSC_THREADS'] is None:
params['MAX_BLOSC_THREADS'] = detect_number_of_cores()
self.params = params
# Now, it is time to initialize the File extension
self._g_new(filename, mode, **params)
# Check filters and set PyTables format version for new files.
new = self._v_new
if new:
_checkfilters(filters)
self.format_version = format_version
"""The PyTables version number of this file."""
# The node manager must be initialized before the root group
# initialization but the node_factory attribute is set onl later
# because it is a bount method of the root grop itself.
node_cache_slots = params['NODE_CACHE_SLOTS']
self._node_manager = NodeManager(nslots=node_cache_slots)
# For the moment Undo/Redo is not enabled.
self._undoEnabled = False
# Set the flag to indicate that the file has been opened.
# It must be set before opening the root group
# to allow some basic access to its attributes.
self.isopen = 1
"""True if the underlying file os open, False otherwise."""
# Append the name of the file to the global dict of files opened.
_open_files.add(self)
# Set the number of times this file has been opened to 1
self._open_count = 1
# Get the root group from this file
self.root = root = self.__get_root_group(root_uep, title, filters)
"""The *root* of the object tree hierarchy (a Group instance)."""
# Complete the creation of the root node
# (see the explanation in ``RootGroup.__init__()``.
root._g_post_init_hook()
self._node_manager.node_factory = self.root._g_load_child
# Save the PyTables format version for this file.
if new:
if params['PYTABLES_SYS_ATTRS']:
root._v_attrs._g__setattr(
'PYTABLES_FORMAT_VERSION', format_version)
# If the file is old, and not opened in "read-only" mode,
# check if it has a transaction log
if not new and self.mode != "r" and _trans_group_path in self:
# It does. Enable the undo.
self.enable_undo()
# Set the maximum number of threads for Numexpr
ne.set_vml_num_threads(params['MAX_NUMEXPR_THREADS'])
| (self, filename, mode='r', title='', root_uep='/', filters=None, **kwargs) |
728,402 | tables.file | __iter__ | Recursively iterate over the nodes in the tree.
This is equivalent to calling :meth:`File.walk_nodes` with no
arguments.
Examples
--------
::
# Recursively list all the nodes in the object tree.
h5file = tables.open_file('vlarray1.h5')
print("All nodes in the object tree:")
for node in h5file:
print(node)
| def __iter__(self):
"""Recursively iterate over the nodes in the tree.
This is equivalent to calling :meth:`File.walk_nodes` with no
arguments.
Examples
--------
::
# Recursively list all the nodes in the object tree.
h5file = tables.open_file('vlarray1.h5')
print("All nodes in the object tree:")
for node in h5file:
print(node)
"""
return self.walk_nodes('/')
| (self) |
728,403 | tables.file | __repr__ | Return a detailed string representation of the object tree. | def __repr__(self):
"""Return a detailed string representation of the object tree."""
if not self.isopen:
return "<closed File>"
# Print all the nodes (Group and Leaf objects) on object tree
lines = [
f'File(filename={self.filename!s}, title={self.title!r}, '
f'mode={self.mode!r}, root_uep={self.root_uep!r}, '
f'filters={self.filters!r})']
for group in self.walk_groups("/"):
lines.append(f'{group}')
for kind in self._node_kinds[1:]:
for node in self.list_nodes(group, kind):
lines.append(f'{node!r}')
return '\n'.join(lines) + '\n'
| (self) |
728,404 | tables.file | __str__ | Return a short string representation of the object tree.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> print(f)
tables/tests/Tables_lzo2.h5 (File) 'Table Benchmark'
Last modif.: '...'
Object Tree:
/ (RootGroup) 'Table Benchmark'
/tuple0 (Table(100,)lzo(1)) 'This is the table title'
/group0 (Group) ''
/group0/tuple1 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1 (Group) ''
/group0/group1/tuple2 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1/group2 (Group) ''
>>> f.close()
| def __str__(self):
"""Return a short string representation of the object tree.
Examples
--------
::
>>> import tables
>>> f = tables.open_file('tables/tests/Tables_lzo2.h5')
>>> print(f)
tables/tests/Tables_lzo2.h5 (File) 'Table Benchmark'
Last modif.: '...'
Object Tree:
/ (RootGroup) 'Table Benchmark'
/tuple0 (Table(100,)lzo(1)) 'This is the table title'
/group0 (Group) ''
/group0/tuple1 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1 (Group) ''
/group0/group1/tuple2 (Table(100,)lzo(1)) 'This is the table title'
/group0/group1/group2 (Group) ''
>>> f.close()
"""
if not self.isopen:
return "<closed File>"
# Print all the nodes (Group and Leaf objects) on object tree
try:
date = datetime.datetime.fromtimestamp(
Path(self.filename).stat().st_mtime, datetime.timezone.utc
).isoformat(timespec='seconds')
except OSError:
# in-memory file
date = "<in-memory file>"
lines = [f'{self.filename} (File) {self.title!r}',
f'Last modif.: {date!r}',
'Object Tree: ']
for group in self.walk_groups("/"):
lines.append(f'{group}')
for kind in self._node_kinds[1:]:
for node in self.list_nodes(group, kind):
lines.append(f'{node}')
return '\n'.join(lines) + '\n'
| (self) |
728,405 | tables.file | _check_group | null | def _check_group(self, node):
# `node` must already be a node.
if not isinstance(node, Group):
raise TypeError(f"node ``{node._v_pathname}`` is not a group")
| (self, node) |
728,406 | tables.file | _check_open | Check the state of the file.
If the file is closed, a `ClosedFileError` is raised.
| def _check_open(self):
"""Check the state of the file.
If the file is closed, a `ClosedFileError` is raised.
"""
if not self.isopen:
raise ClosedFileError("the file object is closed")
| (self) |
728,407 | tables.file | _check_undo_enabled | null | def _check_undo_enabled(self):
if not self._undoEnabled:
raise UndoRedoError("Undo/Redo feature is currently disabled!")
| (self) |
728,408 | tables.file | _check_writable | Check whether the file is writable.
If the file is not writable, a `FileModeError` is raised.
| def _check_writable(self):
"""Check whether the file is writable.
If the file is not writable, a `FileModeError` is raised.
"""
if not self._iswritable():
raise FileModeError("the file is not writable")
| (self) |
728,409 | tables.file | _create_mark | null | def _create_mark(self, trans, mid):
return MarkG(
trans, _markName % mid,
"Mark number %d" % mid, new=True)
| (self, trans, mid) |
728,410 | tables.file | _create_path | Create the groups needed for the `path` to exist.
The group associated with the given `path` is returned.
| def _create_path(self, path):
"""Create the groups needed for the `path` to exist.
The group associated with the given `path` is returned.
"""
if not hasattr(path, 'split'):
raise TypeError("when creating parents, parent must be a path")
if path == '/':
return self.root
parent, create_group = self.root, self.create_group
for pcomp in path.split('/')[1:]:
try:
child = parent._f_get_child(pcomp)
except NoSuchNodeError:
child = create_group(parent, pcomp)
parent = child
return parent
| (self, path) |