repo
stringclasses
679 values
path
stringlengths
6
122
func_name
stringlengths
2
76
original_string
stringlengths
87
70.9k
language
stringclasses
1 value
code
stringlengths
87
70.9k
code_tokens
sequencelengths
20
6.91k
docstring
stringlengths
1
21.7k
docstring_tokens
sequencelengths
1
1.6k
sha
stringclasses
679 values
url
stringlengths
92
213
partition
stringclasses
1 value
numenta/nupic
src/nupic/encoders/coordinate.py
CoordinateEncoder._topWCoordinates
def _topWCoordinates(cls, coordinates, w): """ Returns the top W coordinates by order. @param coordinates (numpy.array) A 2D numpy array, where each element is a coordinate @param w (int) Number of top coordinates to return @return (numpy.array) A subset of `coordinates`, containing only the top ones by order """ orders = numpy.array([cls._orderForCoordinate(c) for c in coordinates.tolist()]) indices = numpy.argsort(orders)[-w:] return coordinates[indices]
python
def _topWCoordinates(cls, coordinates, w): """ Returns the top W coordinates by order. @param coordinates (numpy.array) A 2D numpy array, where each element is a coordinate @param w (int) Number of top coordinates to return @return (numpy.array) A subset of `coordinates`, containing only the top ones by order """ orders = numpy.array([cls._orderForCoordinate(c) for c in coordinates.tolist()]) indices = numpy.argsort(orders)[-w:] return coordinates[indices]
[ "def", "_topWCoordinates", "(", "cls", ",", "coordinates", ",", "w", ")", ":", "orders", "=", "numpy", ".", "array", "(", "[", "cls", ".", "_orderForCoordinate", "(", "c", ")", "for", "c", "in", "coordinates", ".", "tolist", "(", ")", "]", ")", "indices", "=", "numpy", ".", "argsort", "(", "orders", ")", "[", "-", "w", ":", "]", "return", "coordinates", "[", "indices", "]" ]
Returns the top W coordinates by order. @param coordinates (numpy.array) A 2D numpy array, where each element is a coordinate @param w (int) Number of top coordinates to return @return (numpy.array) A subset of `coordinates`, containing only the top ones by order
[ "Returns", "the", "top", "W", "coordinates", "by", "order", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/coordinate.py#L139-L152
valid
numenta/nupic
src/nupic/encoders/coordinate.py
CoordinateEncoder._hashCoordinate
def _hashCoordinate(coordinate): """Hash a coordinate to a 64 bit integer.""" coordinateStr = ",".join(str(v) for v in coordinate) # Compute the hash and convert to 64 bit int. hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64)) return hash
python
def _hashCoordinate(coordinate): """Hash a coordinate to a 64 bit integer.""" coordinateStr = ",".join(str(v) for v in coordinate) # Compute the hash and convert to 64 bit int. hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64)) return hash
[ "def", "_hashCoordinate", "(", "coordinate", ")", ":", "coordinateStr", "=", "\",\"", ".", "join", "(", "str", "(", "v", ")", "for", "v", "in", "coordinate", ")", "# Compute the hash and convert to 64 bit int.", "hash", "=", "int", "(", "int", "(", "hashlib", ".", "md5", "(", "coordinateStr", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "(", "2", "**", "64", ")", ")", "return", "hash" ]
Hash a coordinate to a 64 bit integer.
[ "Hash", "a", "coordinate", "to", "a", "64", "bit", "integer", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/coordinate.py#L156-L161
valid
numenta/nupic
src/nupic/encoders/coordinate.py
CoordinateEncoder._orderForCoordinate
def _orderForCoordinate(cls, coordinate): """ Returns the order for a coordinate. @param coordinate (numpy.array) Coordinate @return (float) A value in the interval [0, 1), representing the order of the coordinate """ seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getReal64()
python
def _orderForCoordinate(cls, coordinate): """ Returns the order for a coordinate. @param coordinate (numpy.array) Coordinate @return (float) A value in the interval [0, 1), representing the order of the coordinate """ seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getReal64()
[ "def", "_orderForCoordinate", "(", "cls", ",", "coordinate", ")", ":", "seed", "=", "cls", ".", "_hashCoordinate", "(", "coordinate", ")", "rng", "=", "Random", "(", "seed", ")", "return", "rng", ".", "getReal64", "(", ")" ]
Returns the order for a coordinate. @param coordinate (numpy.array) Coordinate @return (float) A value in the interval [0, 1), representing the order of the coordinate
[ "Returns", "the", "order", "for", "a", "coordinate", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/coordinate.py#L165-L175
valid
numenta/nupic
src/nupic/encoders/coordinate.py
CoordinateEncoder._bitForCoordinate
def _bitForCoordinate(cls, coordinate, n): """ Maps the coordinate to a bit in the SDR. @param coordinate (numpy.array) Coordinate @param n (int) The number of available bits in the SDR @return (int) The index to a bit in the SDR """ seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getUInt32(n)
python
def _bitForCoordinate(cls, coordinate, n): """ Maps the coordinate to a bit in the SDR. @param coordinate (numpy.array) Coordinate @param n (int) The number of available bits in the SDR @return (int) The index to a bit in the SDR """ seed = cls._hashCoordinate(coordinate) rng = Random(seed) return rng.getUInt32(n)
[ "def", "_bitForCoordinate", "(", "cls", ",", "coordinate", ",", "n", ")", ":", "seed", "=", "cls", ".", "_hashCoordinate", "(", "coordinate", ")", "rng", "=", "Random", "(", "seed", ")", "return", "rng", ".", "getUInt32", "(", "n", ")" ]
Maps the coordinate to a bit in the SDR. @param coordinate (numpy.array) Coordinate @param n (int) The number of available bits in the SDR @return (int) The index to a bit in the SDR
[ "Maps", "the", "coordinate", "to", "a", "bit", "in", "the", "SDR", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/coordinate.py#L179-L189
valid
numenta/nupic
src/nupic/algorithms/connections.py
binSearch
def binSearch(arr, val): """ Function for running binary search on a sorted list. :param arr: (list) a sorted list of integers to search :param val: (int) a integer to search for in the sorted array :returns: (int) the index of the element if it is found and -1 otherwise. """ i = bisect_left(arr, val) if i != len(arr) and arr[i] == val: return i return -1
python
def binSearch(arr, val): """ Function for running binary search on a sorted list. :param arr: (list) a sorted list of integers to search :param val: (int) a integer to search for in the sorted array :returns: (int) the index of the element if it is found and -1 otherwise. """ i = bisect_left(arr, val) if i != len(arr) and arr[i] == val: return i return -1
[ "def", "binSearch", "(", "arr", ",", "val", ")", ":", "i", "=", "bisect_left", "(", "arr", ",", "val", ")", "if", "i", "!=", "len", "(", "arr", ")", "and", "arr", "[", "i", "]", "==", "val", ":", "return", "i", "return", "-", "1" ]
Function for running binary search on a sorted list. :param arr: (list) a sorted list of integers to search :param val: (int) a integer to search for in the sorted array :returns: (int) the index of the element if it is found and -1 otherwise.
[ "Function", "for", "running", "binary", "search", "on", "a", "sorted", "list", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L124-L135
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.createSegment
def createSegment(self, cell): """ Adds a new segment on a cell. :param cell: (int) Cell index :returns: (int) New segment index """ cellData = self._cells[cell] if len(self._freeFlatIdxs) > 0: flatIdx = self._freeFlatIdxs.pop() else: flatIdx = self._nextFlatIdx self._segmentForFlatIdx.append(None) self._nextFlatIdx += 1 ordinal = self._nextSegmentOrdinal self._nextSegmentOrdinal += 1 segment = Segment(cell, flatIdx, ordinal) cellData._segments.append(segment) self._segmentForFlatIdx[flatIdx] = segment return segment
python
def createSegment(self, cell): """ Adds a new segment on a cell. :param cell: (int) Cell index :returns: (int) New segment index """ cellData = self._cells[cell] if len(self._freeFlatIdxs) > 0: flatIdx = self._freeFlatIdxs.pop() else: flatIdx = self._nextFlatIdx self._segmentForFlatIdx.append(None) self._nextFlatIdx += 1 ordinal = self._nextSegmentOrdinal self._nextSegmentOrdinal += 1 segment = Segment(cell, flatIdx, ordinal) cellData._segments.append(segment) self._segmentForFlatIdx[flatIdx] = segment return segment
[ "def", "createSegment", "(", "self", ",", "cell", ")", ":", "cellData", "=", "self", ".", "_cells", "[", "cell", "]", "if", "len", "(", "self", ".", "_freeFlatIdxs", ")", ">", "0", ":", "flatIdx", "=", "self", ".", "_freeFlatIdxs", ".", "pop", "(", ")", "else", ":", "flatIdx", "=", "self", ".", "_nextFlatIdx", "self", ".", "_segmentForFlatIdx", ".", "append", "(", "None", ")", "self", ".", "_nextFlatIdx", "+=", "1", "ordinal", "=", "self", ".", "_nextSegmentOrdinal", "self", ".", "_nextSegmentOrdinal", "+=", "1", "segment", "=", "Segment", "(", "cell", ",", "flatIdx", ",", "ordinal", ")", "cellData", ".", "_segments", ".", "append", "(", "segment", ")", "self", ".", "_segmentForFlatIdx", "[", "flatIdx", "]", "=", "segment", "return", "segment" ]
Adds a new segment on a cell. :param cell: (int) Cell index :returns: (int) New segment index
[ "Adds", "a", "new", "segment", "on", "a", "cell", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L260-L283
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.destroySegment
def destroySegment(self, segment): """ Destroys a segment. :param segment: (:class:`Segment`) representing the segment to be destroyed. """ # Remove the synapses from all data structures outside this Segment. for synapse in segment._synapses: self._removeSynapseFromPresynapticMap(synapse) self._numSynapses -= len(segment._synapses) # Remove the segment from the cell's list. segments = self._cells[segment.cell]._segments i = segments.index(segment) del segments[i] # Free the flatIdx and remove the final reference so the Segment can be # garbage-collected. self._freeFlatIdxs.append(segment.flatIdx) self._segmentForFlatIdx[segment.flatIdx] = None
python
def destroySegment(self, segment): """ Destroys a segment. :param segment: (:class:`Segment`) representing the segment to be destroyed. """ # Remove the synapses from all data structures outside this Segment. for synapse in segment._synapses: self._removeSynapseFromPresynapticMap(synapse) self._numSynapses -= len(segment._synapses) # Remove the segment from the cell's list. segments = self._cells[segment.cell]._segments i = segments.index(segment) del segments[i] # Free the flatIdx and remove the final reference so the Segment can be # garbage-collected. self._freeFlatIdxs.append(segment.flatIdx) self._segmentForFlatIdx[segment.flatIdx] = None
[ "def", "destroySegment", "(", "self", ",", "segment", ")", ":", "# Remove the synapses from all data structures outside this Segment.", "for", "synapse", "in", "segment", ".", "_synapses", ":", "self", ".", "_removeSynapseFromPresynapticMap", "(", "synapse", ")", "self", ".", "_numSynapses", "-=", "len", "(", "segment", ".", "_synapses", ")", "# Remove the segment from the cell's list.", "segments", "=", "self", ".", "_cells", "[", "segment", ".", "cell", "]", ".", "_segments", "i", "=", "segments", ".", "index", "(", "segment", ")", "del", "segments", "[", "i", "]", "# Free the flatIdx and remove the final reference so the Segment can be", "# garbage-collected.", "self", ".", "_freeFlatIdxs", ".", "append", "(", "segment", ".", "flatIdx", ")", "self", ".", "_segmentForFlatIdx", "[", "segment", ".", "flatIdx", "]", "=", "None" ]
Destroys a segment. :param segment: (:class:`Segment`) representing the segment to be destroyed.
[ "Destroys", "a", "segment", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L286-L305
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.createSynapse
def createSynapse(self, segment, presynapticCell, permanence): """ Creates a new synapse on a segment. :param segment: (:class:`Segment`) Segment object for synapse to be synapsed to. :param presynapticCell: (int) Source cell index. :param permanence: (float) Initial permanence of synapse. :returns: (:class:`Synapse`) created synapse """ idx = len(segment._synapses) synapse = Synapse(segment, presynapticCell, permanence, self._nextSynapseOrdinal) self._nextSynapseOrdinal += 1 segment._synapses.add(synapse) self._synapsesForPresynapticCell[presynapticCell].add(synapse) self._numSynapses += 1 return synapse
python
def createSynapse(self, segment, presynapticCell, permanence): """ Creates a new synapse on a segment. :param segment: (:class:`Segment`) Segment object for synapse to be synapsed to. :param presynapticCell: (int) Source cell index. :param permanence: (float) Initial permanence of synapse. :returns: (:class:`Synapse`) created synapse """ idx = len(segment._synapses) synapse = Synapse(segment, presynapticCell, permanence, self._nextSynapseOrdinal) self._nextSynapseOrdinal += 1 segment._synapses.add(synapse) self._synapsesForPresynapticCell[presynapticCell].add(synapse) self._numSynapses += 1 return synapse
[ "def", "createSynapse", "(", "self", ",", "segment", ",", "presynapticCell", ",", "permanence", ")", ":", "idx", "=", "len", "(", "segment", ".", "_synapses", ")", "synapse", "=", "Synapse", "(", "segment", ",", "presynapticCell", ",", "permanence", ",", "self", ".", "_nextSynapseOrdinal", ")", "self", ".", "_nextSynapseOrdinal", "+=", "1", "segment", ".", "_synapses", ".", "add", "(", "synapse", ")", "self", ".", "_synapsesForPresynapticCell", "[", "presynapticCell", "]", ".", "add", "(", "synapse", ")", "self", ".", "_numSynapses", "+=", "1", "return", "synapse" ]
Creates a new synapse on a segment. :param segment: (:class:`Segment`) Segment object for synapse to be synapsed to. :param presynapticCell: (int) Source cell index. :param permanence: (float) Initial permanence of synapse. :returns: (:class:`Synapse`) created synapse
[ "Creates", "a", "new", "synapse", "on", "a", "segment", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L308-L328
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.destroySynapse
def destroySynapse(self, synapse): """ Destroys a synapse. :param synapse: (:class:`Synapse`) synapse to destroy """ self._numSynapses -= 1 self._removeSynapseFromPresynapticMap(synapse) synapse.segment._synapses.remove(synapse)
python
def destroySynapse(self, synapse): """ Destroys a synapse. :param synapse: (:class:`Synapse`) synapse to destroy """ self._numSynapses -= 1 self._removeSynapseFromPresynapticMap(synapse) synapse.segment._synapses.remove(synapse)
[ "def", "destroySynapse", "(", "self", ",", "synapse", ")", ":", "self", ".", "_numSynapses", "-=", "1", "self", ".", "_removeSynapseFromPresynapticMap", "(", "synapse", ")", "synapse", ".", "segment", ".", "_synapses", ".", "remove", "(", "synapse", ")" ]
Destroys a synapse. :param synapse: (:class:`Synapse`) synapse to destroy
[ "Destroys", "a", "synapse", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L340-L351
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.computeActivity
def computeActivity(self, activePresynapticCells, connectedPermanence): """ Compute each segment's number of active synapses for a given input. In the returned lists, a segment's active synapse count is stored at index ``segment.flatIdx``. :param activePresynapticCells: (iter) Active cells. :param connectedPermanence: (float) Permanence threshold for a synapse to be considered connected :returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list], ``numActivePotentialSynapsesForSegment`` [list]) """ numActiveConnectedSynapsesForSegment = [0] * self._nextFlatIdx numActivePotentialSynapsesForSegment = [0] * self._nextFlatIdx threshold = connectedPermanence - EPSILON for cell in activePresynapticCells: for synapse in self._synapsesForPresynapticCell[cell]: flatIdx = synapse.segment.flatIdx numActivePotentialSynapsesForSegment[flatIdx] += 1 if synapse.permanence > threshold: numActiveConnectedSynapsesForSegment[flatIdx] += 1 return (numActiveConnectedSynapsesForSegment, numActivePotentialSynapsesForSegment)
python
def computeActivity(self, activePresynapticCells, connectedPermanence): """ Compute each segment's number of active synapses for a given input. In the returned lists, a segment's active synapse count is stored at index ``segment.flatIdx``. :param activePresynapticCells: (iter) Active cells. :param connectedPermanence: (float) Permanence threshold for a synapse to be considered connected :returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list], ``numActivePotentialSynapsesForSegment`` [list]) """ numActiveConnectedSynapsesForSegment = [0] * self._nextFlatIdx numActivePotentialSynapsesForSegment = [0] * self._nextFlatIdx threshold = connectedPermanence - EPSILON for cell in activePresynapticCells: for synapse in self._synapsesForPresynapticCell[cell]: flatIdx = synapse.segment.flatIdx numActivePotentialSynapsesForSegment[flatIdx] += 1 if synapse.permanence > threshold: numActiveConnectedSynapsesForSegment[flatIdx] += 1 return (numActiveConnectedSynapsesForSegment, numActivePotentialSynapsesForSegment)
[ "def", "computeActivity", "(", "self", ",", "activePresynapticCells", ",", "connectedPermanence", ")", ":", "numActiveConnectedSynapsesForSegment", "=", "[", "0", "]", "*", "self", ".", "_nextFlatIdx", "numActivePotentialSynapsesForSegment", "=", "[", "0", "]", "*", "self", ".", "_nextFlatIdx", "threshold", "=", "connectedPermanence", "-", "EPSILON", "for", "cell", "in", "activePresynapticCells", ":", "for", "synapse", "in", "self", ".", "_synapsesForPresynapticCell", "[", "cell", "]", ":", "flatIdx", "=", "synapse", ".", "segment", ".", "flatIdx", "numActivePotentialSynapsesForSegment", "[", "flatIdx", "]", "+=", "1", "if", "synapse", ".", "permanence", ">", "threshold", ":", "numActiveConnectedSynapsesForSegment", "[", "flatIdx", "]", "+=", "1", "return", "(", "numActiveConnectedSynapsesForSegment", ",", "numActivePotentialSynapsesForSegment", ")" ]
Compute each segment's number of active synapses for a given input. In the returned lists, a segment's active synapse count is stored at index ``segment.flatIdx``. :param activePresynapticCells: (iter) Active cells. :param connectedPermanence: (float) Permanence threshold for a synapse to be considered connected :returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list], ``numActivePotentialSynapsesForSegment`` [list])
[ "Compute", "each", "segment", "s", "number", "of", "active", "synapses", "for", "a", "given", "input", ".", "In", "the", "returned", "lists", "a", "segment", "s", "active", "synapse", "count", "is", "stored", "at", "index", "segment", ".", "flatIdx", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L365-L392
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.numSegments
def numSegments(self, cell=None): """ Returns the number of segments. :param cell: (int) Optional parameter to get the number of segments on a cell. :returns: (int) Number of segments on all cells if cell is not specified, or on a specific specified cell """ if cell is not None: return len(self._cells[cell]._segments) return self._nextFlatIdx - len(self._freeFlatIdxs)
python
def numSegments(self, cell=None): """ Returns the number of segments. :param cell: (int) Optional parameter to get the number of segments on a cell. :returns: (int) Number of segments on all cells if cell is not specified, or on a specific specified cell """ if cell is not None: return len(self._cells[cell]._segments) return self._nextFlatIdx - len(self._freeFlatIdxs)
[ "def", "numSegments", "(", "self", ",", "cell", "=", "None", ")", ":", "if", "cell", "is", "not", "None", ":", "return", "len", "(", "self", ".", "_cells", "[", "cell", "]", ".", "_segments", ")", "return", "self", ".", "_nextFlatIdx", "-", "len", "(", "self", ".", "_freeFlatIdxs", ")" ]
Returns the number of segments. :param cell: (int) Optional parameter to get the number of segments on a cell. :returns: (int) Number of segments on all cells if cell is not specified, or on a specific specified cell
[ "Returns", "the", "number", "of", "segments", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L395-L407
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.segmentPositionSortKey
def segmentPositionSortKey(self, segment): """ Return a numeric key for sorting this segment. This can be used with the python built-in ``sorted()`` function. :param segment: (:class:`Segment`) within this :class:`Connections` instance. :returns: (float) A numeric key for sorting. """ return segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal))
python
def segmentPositionSortKey(self, segment): """ Return a numeric key for sorting this segment. This can be used with the python built-in ``sorted()`` function. :param segment: (:class:`Segment`) within this :class:`Connections` instance. :returns: (float) A numeric key for sorting. """ return segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal))
[ "def", "segmentPositionSortKey", "(", "self", ",", "segment", ")", ":", "return", "segment", ".", "cell", "+", "(", "segment", ".", "_ordinal", "/", "float", "(", "self", ".", "_nextSegmentOrdinal", ")", ")" ]
Return a numeric key for sorting this segment. This can be used with the python built-in ``sorted()`` function. :param segment: (:class:`Segment`) within this :class:`Connections` instance. :returns: (float) A numeric key for sorting.
[ "Return", "a", "numeric", "key", "for", "sorting", "this", "segment", ".", "This", "can", "be", "used", "with", "the", "python", "built", "-", "in", "sorted", "()", "function", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L425-L434
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.write
def write(self, proto): """ Writes serialized data to proto object. :param proto: (DynamicStructBuilder) Proto object """ protoCells = proto.init('cells', self.numCells) for i in xrange(self.numCells): segments = self._cells[i]._segments protoSegments = protoCells[i].init('segments', len(segments)) for j, segment in enumerate(segments): synapses = segment._synapses protoSynapses = protoSegments[j].init('synapses', len(synapses)) for k, synapse in enumerate(sorted(synapses, key=lambda s: s._ordinal)): protoSynapses[k].presynapticCell = synapse.presynapticCell protoSynapses[k].permanence = synapse.permanence
python
def write(self, proto): """ Writes serialized data to proto object. :param proto: (DynamicStructBuilder) Proto object """ protoCells = proto.init('cells', self.numCells) for i in xrange(self.numCells): segments = self._cells[i]._segments protoSegments = protoCells[i].init('segments', len(segments)) for j, segment in enumerate(segments): synapses = segment._synapses protoSynapses = protoSegments[j].init('synapses', len(synapses)) for k, synapse in enumerate(sorted(synapses, key=lambda s: s._ordinal)): protoSynapses[k].presynapticCell = synapse.presynapticCell protoSynapses[k].permanence = synapse.permanence
[ "def", "write", "(", "self", ",", "proto", ")", ":", "protoCells", "=", "proto", ".", "init", "(", "'cells'", ",", "self", ".", "numCells", ")", "for", "i", "in", "xrange", "(", "self", ".", "numCells", ")", ":", "segments", "=", "self", ".", "_cells", "[", "i", "]", ".", "_segments", "protoSegments", "=", "protoCells", "[", "i", "]", ".", "init", "(", "'segments'", ",", "len", "(", "segments", ")", ")", "for", "j", ",", "segment", "in", "enumerate", "(", "segments", ")", ":", "synapses", "=", "segment", ".", "_synapses", "protoSynapses", "=", "protoSegments", "[", "j", "]", ".", "init", "(", "'synapses'", ",", "len", "(", "synapses", ")", ")", "for", "k", ",", "synapse", "in", "enumerate", "(", "sorted", "(", "synapses", ",", "key", "=", "lambda", "s", ":", "s", ".", "_ordinal", ")", ")", ":", "protoSynapses", "[", "k", "]", ".", "presynapticCell", "=", "synapse", ".", "presynapticCell", "protoSynapses", "[", "k", "]", ".", "permanence", "=", "synapse", ".", "permanence" ]
Writes serialized data to proto object. :param proto: (DynamicStructBuilder) Proto object
[ "Writes", "serialized", "data", "to", "proto", "object", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L437-L455
valid
numenta/nupic
src/nupic/algorithms/connections.py
Connections.read
def read(cls, proto): """ Reads deserialized data from proto object :param proto: (DynamicStructBuilder) Proto object :returns: (:class:`Connections`) instance """ #pylint: disable=W0212 protoCells = proto.cells connections = cls(len(protoCells)) for cellIdx, protoCell in enumerate(protoCells): protoCell = protoCells[cellIdx] protoSegments = protoCell.segments connections._cells[cellIdx] = CellData() segments = connections._cells[cellIdx]._segments for segmentIdx, protoSegment in enumerate(protoSegments): segment = Segment(cellIdx, connections._nextFlatIdx, connections._nextSegmentOrdinal) segments.append(segment) connections._segmentForFlatIdx.append(segment) connections._nextFlatIdx += 1 connections._nextSegmentOrdinal += 1 synapses = segment._synapses protoSynapses = protoSegment.synapses for synapseIdx, protoSynapse in enumerate(protoSynapses): presynapticCell = protoSynapse.presynapticCell synapse = Synapse(segment, presynapticCell, protoSynapse.permanence, ordinal=connections._nextSynapseOrdinal) connections._nextSynapseOrdinal += 1 synapses.add(synapse) connections._synapsesForPresynapticCell[presynapticCell].add(synapse) connections._numSynapses += 1 #pylint: enable=W0212 return connections
python
def read(cls, proto): """ Reads deserialized data from proto object :param proto: (DynamicStructBuilder) Proto object :returns: (:class:`Connections`) instance """ #pylint: disable=W0212 protoCells = proto.cells connections = cls(len(protoCells)) for cellIdx, protoCell in enumerate(protoCells): protoCell = protoCells[cellIdx] protoSegments = protoCell.segments connections._cells[cellIdx] = CellData() segments = connections._cells[cellIdx]._segments for segmentIdx, protoSegment in enumerate(protoSegments): segment = Segment(cellIdx, connections._nextFlatIdx, connections._nextSegmentOrdinal) segments.append(segment) connections._segmentForFlatIdx.append(segment) connections._nextFlatIdx += 1 connections._nextSegmentOrdinal += 1 synapses = segment._synapses protoSynapses = protoSegment.synapses for synapseIdx, protoSynapse in enumerate(protoSynapses): presynapticCell = protoSynapse.presynapticCell synapse = Synapse(segment, presynapticCell, protoSynapse.permanence, ordinal=connections._nextSynapseOrdinal) connections._nextSynapseOrdinal += 1 synapses.add(synapse) connections._synapsesForPresynapticCell[presynapticCell].add(synapse) connections._numSynapses += 1 #pylint: enable=W0212 return connections
[ "def", "read", "(", "cls", ",", "proto", ")", ":", "#pylint: disable=W0212", "protoCells", "=", "proto", ".", "cells", "connections", "=", "cls", "(", "len", "(", "protoCells", ")", ")", "for", "cellIdx", ",", "protoCell", "in", "enumerate", "(", "protoCells", ")", ":", "protoCell", "=", "protoCells", "[", "cellIdx", "]", "protoSegments", "=", "protoCell", ".", "segments", "connections", ".", "_cells", "[", "cellIdx", "]", "=", "CellData", "(", ")", "segments", "=", "connections", ".", "_cells", "[", "cellIdx", "]", ".", "_segments", "for", "segmentIdx", ",", "protoSegment", "in", "enumerate", "(", "protoSegments", ")", ":", "segment", "=", "Segment", "(", "cellIdx", ",", "connections", ".", "_nextFlatIdx", ",", "connections", ".", "_nextSegmentOrdinal", ")", "segments", ".", "append", "(", "segment", ")", "connections", ".", "_segmentForFlatIdx", ".", "append", "(", "segment", ")", "connections", ".", "_nextFlatIdx", "+=", "1", "connections", ".", "_nextSegmentOrdinal", "+=", "1", "synapses", "=", "segment", ".", "_synapses", "protoSynapses", "=", "protoSegment", ".", "synapses", "for", "synapseIdx", ",", "protoSynapse", "in", "enumerate", "(", "protoSynapses", ")", ":", "presynapticCell", "=", "protoSynapse", ".", "presynapticCell", "synapse", "=", "Synapse", "(", "segment", ",", "presynapticCell", ",", "protoSynapse", ".", "permanence", ",", "ordinal", "=", "connections", ".", "_nextSynapseOrdinal", ")", "connections", ".", "_nextSynapseOrdinal", "+=", "1", "synapses", ".", "add", "(", "synapse", ")", "connections", ".", "_synapsesForPresynapticCell", "[", "presynapticCell", "]", ".", "add", "(", "synapse", ")", "connections", ".", "_numSynapses", "+=", "1", "#pylint: enable=W0212", "return", "connections" ]
Reads deserialized data from proto object :param proto: (DynamicStructBuilder) Proto object :returns: (:class:`Connections`) instance
[ "Reads", "deserialized", "data", "from", "proto", "object" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/connections.py#L464-L505
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.getString
def getString(cls, prop): """ Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :raises: KeyError :returns: (string) property value """ if cls._properties is None: cls._readStdConfigFiles() # Allow configuration properties to be overridden via environment variables envValue = os.environ.get("%s%s" % (cls.envPropPrefix, prop.replace('.', '_')), None) if envValue is not None: return envValue return cls._properties[prop]
python
def getString(cls, prop): """ Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :raises: KeyError :returns: (string) property value """ if cls._properties is None: cls._readStdConfigFiles() # Allow configuration properties to be overridden via environment variables envValue = os.environ.get("%s%s" % (cls.envPropPrefix, prop.replace('.', '_')), None) if envValue is not None: return envValue return cls._properties[prop]
[ "def", "getString", "(", "cls", ",", "prop", ")", ":", "if", "cls", ".", "_properties", "is", "None", ":", "cls", ".", "_readStdConfigFiles", "(", ")", "# Allow configuration properties to be overridden via environment variables", "envValue", "=", "os", ".", "environ", ".", "get", "(", "\"%s%s\"", "%", "(", "cls", ".", "envPropPrefix", ",", "prop", ".", "replace", "(", "'.'", ",", "'_'", ")", ")", ",", "None", ")", "if", "envValue", "is", "not", "None", ":", "return", "envValue", "return", "cls", ".", "_properties", "[", "prop", "]" ]
Retrieve the requested property as a string. If property does not exist, then KeyError will be raised. :param prop: (string) name of the property :raises: KeyError :returns: (string) property value
[ "Retrieve", "the", "requested", "property", "as", "a", "string", ".", "If", "property", "does", "not", "exist", "then", "KeyError", "will", "be", "raised", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L75-L92
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.getBool
def getBool(cls, prop): """ Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised :param prop: (string) name of the property :raises: KeyError, ValueError :returns: (bool) property value """ value = cls.getInt(prop) if value not in (0, 1): raise ValueError("Expected 0 or 1, but got %r in config property %s" % ( value, prop)) return bool(value)
python
def getBool(cls, prop): """ Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised :param prop: (string) name of the property :raises: KeyError, ValueError :returns: (bool) property value """ value = cls.getInt(prop) if value not in (0, 1): raise ValueError("Expected 0 or 1, but got %r in config property %s" % ( value, prop)) return bool(value)
[ "def", "getBool", "(", "cls", ",", "prop", ")", ":", "value", "=", "cls", ".", "getInt", "(", "prop", ")", "if", "value", "not", "in", "(", "0", ",", "1", ")", ":", "raise", "ValueError", "(", "\"Expected 0 or 1, but got %r in config property %s\"", "%", "(", "value", ",", "prop", ")", ")", "return", "bool", "(", "value", ")" ]
Retrieve the requested property and return it as a bool. If property does not exist, then KeyError will be raised. If the property value is neither 0 nor 1, then ValueError will be raised :param prop: (string) name of the property :raises: KeyError, ValueError :returns: (bool) property value
[ "Retrieve", "the", "requested", "property", "and", "return", "it", "as", "a", "bool", ".", "If", "property", "does", "not", "exist", "then", "KeyError", "will", "be", "raised", ".", "If", "the", "property", "value", "is", "neither", "0", "nor", "1", "then", "ValueError", "will", "be", "raised" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L96-L112
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.set
def set(cls, prop, value): """ Set the value of the given configuration property. :param prop: (string) name of the property :param value: (object) value to set """ if cls._properties is None: cls._readStdConfigFiles() cls._properties[prop] = str(value)
python
def set(cls, prop, value): """ Set the value of the given configuration property. :param prop: (string) name of the property :param value: (object) value to set """ if cls._properties is None: cls._readStdConfigFiles() cls._properties[prop] = str(value)
[ "def", "set", "(", "cls", ",", "prop", ",", "value", ")", ":", "if", "cls", ".", "_properties", "is", "None", ":", "cls", ".", "_readStdConfigFiles", "(", ")", "cls", ".", "_properties", "[", "prop", "]", "=", "str", "(", "value", ")" ]
Set the value of the given configuration property. :param prop: (string) name of the property :param value: (object) value to set
[ "Set", "the", "value", "of", "the", "given", "configuration", "property", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L163-L173
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.dict
def dict(cls): """ Return a dict containing all of the configuration properties :returns: (dict) containing all configuration properties. """ if cls._properties is None: cls._readStdConfigFiles() # Make a copy so we can update any current values obtained from environment # variables result = dict(cls._properties) keys = os.environ.keys() replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix), keys) for envKey in replaceKeys: key = envKey[len(cls.envPropPrefix):] key = key.replace('_', '.') result[key] = os.environ[envKey] return result
python
def dict(cls): """ Return a dict containing all of the configuration properties :returns: (dict) containing all configuration properties. """ if cls._properties is None: cls._readStdConfigFiles() # Make a copy so we can update any current values obtained from environment # variables result = dict(cls._properties) keys = os.environ.keys() replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix), keys) for envKey in replaceKeys: key = envKey[len(cls.envPropPrefix):] key = key.replace('_', '.') result[key] = os.environ[envKey] return result
[ "def", "dict", "(", "cls", ")", ":", "if", "cls", ".", "_properties", "is", "None", ":", "cls", ".", "_readStdConfigFiles", "(", ")", "# Make a copy so we can update any current values obtained from environment", "# variables", "result", "=", "dict", "(", "cls", ".", "_properties", ")", "keys", "=", "os", ".", "environ", ".", "keys", "(", ")", "replaceKeys", "=", "filter", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "cls", ".", "envPropPrefix", ")", ",", "keys", ")", "for", "envKey", "in", "replaceKeys", ":", "key", "=", "envKey", "[", "len", "(", "cls", ".", "envPropPrefix", ")", ":", "]", "key", "=", "key", ".", "replace", "(", "'_'", ",", "'.'", ")", "result", "[", "key", "]", "=", "os", ".", "environ", "[", "envKey", "]", "return", "result" ]
Return a dict containing all of the configuration properties :returns: (dict) containing all configuration properties.
[ "Return", "a", "dict", "containing", "all", "of", "the", "configuration", "properties" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L177-L197
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.readConfigFile
def readConfigFile(cls, filename, path=None): """ Parse the given XML file and store all properties it describes. :param filename: (string) name of XML file to parse (no path) :param path: (string) path of the XML file. If None, then use the standard configuration search path. """ properties = cls._readConfigFile(filename, path) # Create properties dict if necessary if cls._properties is None: cls._properties = dict() for name in properties: if 'value' in properties[name]: cls._properties[name] = properties[name]['value']
python
def readConfigFile(cls, filename, path=None): """ Parse the given XML file and store all properties it describes. :param filename: (string) name of XML file to parse (no path) :param path: (string) path of the XML file. If None, then use the standard configuration search path. """ properties = cls._readConfigFile(filename, path) # Create properties dict if necessary if cls._properties is None: cls._properties = dict() for name in properties: if 'value' in properties[name]: cls._properties[name] = properties[name]['value']
[ "def", "readConfigFile", "(", "cls", ",", "filename", ",", "path", "=", "None", ")", ":", "properties", "=", "cls", ".", "_readConfigFile", "(", "filename", ",", "path", ")", "# Create properties dict if necessary", "if", "cls", ".", "_properties", "is", "None", ":", "cls", ".", "_properties", "=", "dict", "(", ")", "for", "name", "in", "properties", ":", "if", "'value'", "in", "properties", "[", "name", "]", ":", "cls", ".", "_properties", "[", "name", "]", "=", "properties", "[", "name", "]", "[", "'value'", "]" ]
Parse the given XML file and store all properties it describes. :param filename: (string) name of XML file to parse (no path) :param path: (string) path of the XML file. If None, then use the standard configuration search path.
[ "Parse", "the", "given", "XML", "file", "and", "store", "all", "properties", "it", "describes", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L201-L216
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.findConfigFile
def findConfigFile(cls, filename): """ Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. :param filename: (string) name of file to locate """ paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
python
def findConfigFile(cls, filename): """ Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. :param filename: (string) name of file to locate """ paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
[ "def", "findConfigFile", "(", "cls", ",", "filename", ")", ":", "paths", "=", "cls", ".", "getConfigPaths", "(", ")", "for", "p", "in", "paths", ":", "testPath", "=", "os", ".", "path", ".", "join", "(", "p", ",", "filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "testPath", ")", ":", "return", "os", ".", "path", ".", "join", "(", "p", ",", "filename", ")" ]
Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. :param filename: (string) name of file to locate
[ "Search", "the", "configuration", "path", "(", "specified", "via", "the", "NTA_CONF_PATH", "environment", "variable", ")", "for", "the", "given", "filename", ".", "If", "found", "return", "the", "complete", "path", "to", "the", "file", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L359-L371
valid
numenta/nupic
src/nupic/support/configuration_base.py
Configuration.getConfigPaths
def getConfigPaths(cls): """ Return the list of paths to search for configuration files. :returns: (list) of paths """ configPaths = [] if cls._configPaths is not None: return cls._configPaths else: if 'NTA_CONF_PATH' in os.environ: configVar = os.environ['NTA_CONF_PATH'] # Return as a list of paths configPaths = configVar.split(os.pathsep) return configPaths
python
def getConfigPaths(cls): """ Return the list of paths to search for configuration files. :returns: (list) of paths """ configPaths = [] if cls._configPaths is not None: return cls._configPaths else: if 'NTA_CONF_PATH' in os.environ: configVar = os.environ['NTA_CONF_PATH'] # Return as a list of paths configPaths = configVar.split(os.pathsep) return configPaths
[ "def", "getConfigPaths", "(", "cls", ")", ":", "configPaths", "=", "[", "]", "if", "cls", ".", "_configPaths", "is", "not", "None", ":", "return", "cls", ".", "_configPaths", "else", ":", "if", "'NTA_CONF_PATH'", "in", "os", ".", "environ", ":", "configVar", "=", "os", ".", "environ", "[", "'NTA_CONF_PATH'", "]", "# Return as a list of paths", "configPaths", "=", "configVar", ".", "split", "(", "os", ".", "pathsep", ")", "return", "configPaths" ]
Return the list of paths to search for configuration files. :returns: (list) of paths
[ "Return", "the", "list", "of", "paths", "to", "search", "for", "configuration", "files", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/configuration_base.py#L375-L390
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
addNoise
def addNoise(input, noise=0.1, doForeground=True, doBackground=True): """ Add noise to the given input. Parameters: ----------------------------------------------- input: the input to add noise to noise: how much noise to add doForeground: If true, turn off some of the 1 bits in the input doBackground: If true, turn on some of the 0 bits in the input """ if doForeground and doBackground: return numpy.abs(input - (numpy.random.random(input.shape) < noise)) else: if doForeground: return numpy.logical_and(input, numpy.random.random(input.shape) > noise) if doBackground: return numpy.logical_or(input, numpy.random.random(input.shape) < noise) return input
python
def addNoise(input, noise=0.1, doForeground=True, doBackground=True): """ Add noise to the given input. Parameters: ----------------------------------------------- input: the input to add noise to noise: how much noise to add doForeground: If true, turn off some of the 1 bits in the input doBackground: If true, turn on some of the 0 bits in the input """ if doForeground and doBackground: return numpy.abs(input - (numpy.random.random(input.shape) < noise)) else: if doForeground: return numpy.logical_and(input, numpy.random.random(input.shape) > noise) if doBackground: return numpy.logical_or(input, numpy.random.random(input.shape) < noise) return input
[ "def", "addNoise", "(", "input", ",", "noise", "=", "0.1", ",", "doForeground", "=", "True", ",", "doBackground", "=", "True", ")", ":", "if", "doForeground", "and", "doBackground", ":", "return", "numpy", ".", "abs", "(", "input", "-", "(", "numpy", ".", "random", ".", "random", "(", "input", ".", "shape", ")", "<", "noise", ")", ")", "else", ":", "if", "doForeground", ":", "return", "numpy", ".", "logical_and", "(", "input", ",", "numpy", ".", "random", ".", "random", "(", "input", ".", "shape", ")", ">", "noise", ")", "if", "doBackground", ":", "return", "numpy", ".", "logical_or", "(", "input", ",", "numpy", ".", "random", ".", "random", "(", "input", ".", "shape", ")", "<", "noise", ")", "return", "input" ]
Add noise to the given input. Parameters: ----------------------------------------------- input: the input to add noise to noise: how much noise to add doForeground: If true, turn off some of the 1 bits in the input doBackground: If true, turn on some of the 0 bits in the input
[ "Add", "noise", "to", "the", "given", "input", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L42-L61
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateCoincMatrix
def generateCoincMatrix(nCoinc=10, length=500, activity=50): """ Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row. """ coincMatrix0 = SM32(int(nCoinc), int(length)) theOnes = numpy.array([1.0] * activity, dtype=numpy.float32) for rowIdx in xrange(nCoinc): coinc = numpy.array(random.sample(xrange(length), activity), dtype=numpy.uint32) coinc.sort() coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes) # This is the right code to use, it's faster, but it derails the unit # testing of the pooling for now. coincMatrix = SM32(int(nCoinc), int(length)) coincMatrix.initializeWithFixedNNZR(activity) return coincMatrix0
python
def generateCoincMatrix(nCoinc=10, length=500, activity=50): """ Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row. """ coincMatrix0 = SM32(int(nCoinc), int(length)) theOnes = numpy.array([1.0] * activity, dtype=numpy.float32) for rowIdx in xrange(nCoinc): coinc = numpy.array(random.sample(xrange(length), activity), dtype=numpy.uint32) coinc.sort() coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes) # This is the right code to use, it's faster, but it derails the unit # testing of the pooling for now. coincMatrix = SM32(int(nCoinc), int(length)) coincMatrix.initializeWithFixedNNZR(activity) return coincMatrix0
[ "def", "generateCoincMatrix", "(", "nCoinc", "=", "10", ",", "length", "=", "500", ",", "activity", "=", "50", ")", ":", "coincMatrix0", "=", "SM32", "(", "int", "(", "nCoinc", ")", ",", "int", "(", "length", ")", ")", "theOnes", "=", "numpy", ".", "array", "(", "[", "1.0", "]", "*", "activity", ",", "dtype", "=", "numpy", ".", "float32", ")", "for", "rowIdx", "in", "xrange", "(", "nCoinc", ")", ":", "coinc", "=", "numpy", ".", "array", "(", "random", ".", "sample", "(", "xrange", "(", "length", ")", ",", "activity", ")", ",", "dtype", "=", "numpy", ".", "uint32", ")", "coinc", ".", "sort", "(", ")", "coincMatrix0", ".", "setRowFromSparse", "(", "rowIdx", ",", "coinc", ",", "theOnes", ")", "# This is the right code to use, it's faster, but it derails the unit", "# testing of the pooling for now.", "coincMatrix", "=", "SM32", "(", "int", "(", "nCoinc", ")", ",", "int", "(", "length", ")", ")", "coincMatrix", ".", "initializeWithFixedNNZR", "(", "activity", ")", "return", "coincMatrix0" ]
Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row.
[ "Generate", "a", "coincidence", "matrix", ".", "This", "is", "used", "to", "generate", "random", "inputs", "to", "the", "temporal", "learner", "and", "to", "compare", "the", "predicted", "output", "against", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L65-L94
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateVectors
def generateVectors(numVectors=100, length=500, activity=50): """ Generate a list of random sparse distributed vectors. This is used to generate training vectors to the spatial or temporal learner and to compare the predicted output against. It generates a list of 'numVectors' elements, each element has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- numVectors: the number of vectors to generate length: the length of each row activity: the number of ones to put into each row. """ vectors = [] coinc = numpy.zeros(length, dtype='int32') indexList = range(length) for i in xrange(numVectors): coinc[:] = 0 coinc[random.sample(indexList, activity)] = 1 vectors.append(coinc.copy()) return vectors
python
def generateVectors(numVectors=100, length=500, activity=50): """ Generate a list of random sparse distributed vectors. This is used to generate training vectors to the spatial or temporal learner and to compare the predicted output against. It generates a list of 'numVectors' elements, each element has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- numVectors: the number of vectors to generate length: the length of each row activity: the number of ones to put into each row. """ vectors = [] coinc = numpy.zeros(length, dtype='int32') indexList = range(length) for i in xrange(numVectors): coinc[:] = 0 coinc[random.sample(indexList, activity)] = 1 vectors.append(coinc.copy()) return vectors
[ "def", "generateVectors", "(", "numVectors", "=", "100", ",", "length", "=", "500", ",", "activity", "=", "50", ")", ":", "vectors", "=", "[", "]", "coinc", "=", "numpy", ".", "zeros", "(", "length", ",", "dtype", "=", "'int32'", ")", "indexList", "=", "range", "(", "length", ")", "for", "i", "in", "xrange", "(", "numVectors", ")", ":", "coinc", "[", ":", "]", "=", "0", "coinc", "[", "random", ".", "sample", "(", "indexList", ",", "activity", ")", "]", "=", "1", "vectors", ".", "append", "(", "coinc", ".", "copy", "(", ")", ")", "return", "vectors" ]
Generate a list of random sparse distributed vectors. This is used to generate training vectors to the spatial or temporal learner and to compare the predicted output against. It generates a list of 'numVectors' elements, each element has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- numVectors: the number of vectors to generate length: the length of each row activity: the number of ones to put into each row.
[ "Generate", "a", "list", "of", "random", "sparse", "distributed", "vectors", ".", "This", "is", "used", "to", "generate", "training", "vectors", "to", "the", "spatial", "or", "temporal", "learner", "and", "to", "compare", "the", "predicted", "output", "against", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L98-L124
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateSimpleSequences
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100): """ Generate a set of simple sequences. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence. """ coincList = range(nCoinc) seqList = [] for i in xrange(nSeq): if max(seqLength) <= nCoinc: seqList.append(random.sample(coincList, random.choice(seqLength))) else: len = random.choice(seqLength) seq = [] for x in xrange(len): seq.append(random.choice(coincList)) seqList.append(seq) return seqList
python
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100): """ Generate a set of simple sequences. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence. """ coincList = range(nCoinc) seqList = [] for i in xrange(nSeq): if max(seqLength) <= nCoinc: seqList.append(random.sample(coincList, random.choice(seqLength))) else: len = random.choice(seqLength) seq = [] for x in xrange(len): seq.append(random.choice(coincList)) seqList.append(seq) return seqList
[ "def", "generateSimpleSequences", "(", "nCoinc", "=", "10", ",", "seqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nSeq", "=", "100", ")", ":", "coincList", "=", "range", "(", "nCoinc", ")", "seqList", "=", "[", "]", "for", "i", "in", "xrange", "(", "nSeq", ")", ":", "if", "max", "(", "seqLength", ")", "<=", "nCoinc", ":", "seqList", ".", "append", "(", "random", ".", "sample", "(", "coincList", ",", "random", ".", "choice", "(", "seqLength", ")", ")", ")", "else", ":", "len", "=", "random", ".", "choice", "(", "seqLength", ")", "seq", "=", "[", "]", "for", "x", "in", "xrange", "(", "len", ")", ":", "seq", ".", "append", "(", "random", ".", "choice", "(", "coincList", ")", ")", "seqList", ".", "append", "(", "seq", ")", "return", "seqList" ]
Generate a set of simple sequences. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence.
[ "Generate", "a", "set", "of", "simple", "sequences", ".", "The", "elements", "of", "the", "sequences", "will", "be", "integers", "from", "0", "to", "nCoinc", "-", "1", ".", "The", "length", "of", "each", "sequence", "will", "be", "randomly", "chosen", "from", "the", "seqLength", "list", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L128-L158
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateHubSequences
def generateHubSequences(nCoinc=10, hubs = [2,6], seqLength=[5,6,7], nSeq=100): """ Generate a set of hub sequences. These are sequences which contain a hub element in the middle. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of each sequence. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence. """ coincList = range(nCoinc) for hub in hubs: coincList.remove(hub) seqList = [] for i in xrange(nSeq): length = random.choice(seqLength)-1 seq = random.sample(coincList,length) seq.insert(length//2, random.choice(hubs)) seqList.append(seq) return seqList
python
def generateHubSequences(nCoinc=10, hubs = [2,6], seqLength=[5,6,7], nSeq=100): """ Generate a set of hub sequences. These are sequences which contain a hub element in the middle. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of each sequence. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence. """ coincList = range(nCoinc) for hub in hubs: coincList.remove(hub) seqList = [] for i in xrange(nSeq): length = random.choice(seqLength)-1 seq = random.sample(coincList,length) seq.insert(length//2, random.choice(hubs)) seqList.append(seq) return seqList
[ "def", "generateHubSequences", "(", "nCoinc", "=", "10", ",", "hubs", "=", "[", "2", ",", "6", "]", ",", "seqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nSeq", "=", "100", ")", ":", "coincList", "=", "range", "(", "nCoinc", ")", "for", "hub", "in", "hubs", ":", "coincList", ".", "remove", "(", "hub", ")", "seqList", "=", "[", "]", "for", "i", "in", "xrange", "(", "nSeq", ")", ":", "length", "=", "random", ".", "choice", "(", "seqLength", ")", "-", "1", "seq", "=", "random", ".", "sample", "(", "coincList", ",", "length", ")", "seq", ".", "insert", "(", "length", "//", "2", ",", "random", ".", "choice", "(", "hubs", ")", ")", "seqList", ".", "append", "(", "seq", ")", "return", "seqList" ]
Generate a set of hub sequences. These are sequences which contain a hub element in the middle. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of each sequence. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence.
[ "Generate", "a", "set", "of", "hub", "sequences", ".", "These", "are", "sequences", "which", "contain", "a", "hub", "element", "in", "the", "middle", ".", "The", "elements", "of", "the", "sequences", "will", "be", "integers", "from", "0", "to", "nCoinc", "-", "1", ".", "The", "hub", "elements", "will", "only", "appear", "in", "the", "middle", "of", "each", "sequence", ".", "The", "length", "of", "each", "sequence", "will", "be", "randomly", "chosen", "from", "the", "seqLength", "list", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L162-L194
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateSimpleCoincMatrix
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50): """ Generate a non overlapping coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row. """ assert nCoinc*activity<=length, "can't generate non-overlapping coincidences" coincMatrix = SM32(0, length) coinc = numpy.zeros(length, dtype='int32') for i in xrange(nCoinc): coinc[:] = 0 coinc[i*activity:(i+1)*activity] = 1 coincMatrix.addRow(coinc) return coincMatrix
python
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50): """ Generate a non overlapping coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row. """ assert nCoinc*activity<=length, "can't generate non-overlapping coincidences" coincMatrix = SM32(0, length) coinc = numpy.zeros(length, dtype='int32') for i in xrange(nCoinc): coinc[:] = 0 coinc[i*activity:(i+1)*activity] = 1 coincMatrix.addRow(coinc) return coincMatrix
[ "def", "generateSimpleCoincMatrix", "(", "nCoinc", "=", "10", ",", "length", "=", "500", ",", "activity", "=", "50", ")", ":", "assert", "nCoinc", "*", "activity", "<=", "length", ",", "\"can't generate non-overlapping coincidences\"", "coincMatrix", "=", "SM32", "(", "0", ",", "length", ")", "coinc", "=", "numpy", ".", "zeros", "(", "length", ",", "dtype", "=", "'int32'", ")", "for", "i", "in", "xrange", "(", "nCoinc", ")", ":", "coinc", "[", ":", "]", "=", "0", "coinc", "[", "i", "*", "activity", ":", "(", "i", "+", "1", ")", "*", "activity", "]", "=", "1", "coincMatrix", ".", "addRow", "(", "coinc", ")", "return", "coincMatrix" ]
Generate a non overlapping coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row.
[ "Generate", "a", "non", "overlapping", "coincidence", "matrix", ".", "This", "is", "used", "to", "generate", "random", "inputs", "to", "the", "temporal", "learner", "and", "to", "compare", "the", "predicted", "output", "against", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L250-L274
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateSequences
def generateSequences(nPatterns=10, patternLen=500, patternActivity=50, hubs=[2,6], seqLength=[5,6,7], nSimpleSequences=50, nHubSequences=50): """ Generate a set of simple and hub sequences. A simple sequence contains a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence always contains a hub element in the middle of it. Parameters: ----------------------------------------------- nPatterns: the number of patterns to use in the sequences. patternLen: The number of elements in each pattern patternActivity: The number of elements that should be active in each pattern hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSimpleSequences: The number of simple sequences to generate nHubSequences: The number of hub sequences to generate retval: (seqList, patterns) seqList: a list of sequences. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the seqList. """ # Create the input patterns patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen, activity=patternActivity) # Create the raw sequences seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength, nSeq=nSimpleSequences) + \ generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength, nSeq=nHubSequences) # Return results return (seqList, patterns)
python
def generateSequences(nPatterns=10, patternLen=500, patternActivity=50, hubs=[2,6], seqLength=[5,6,7], nSimpleSequences=50, nHubSequences=50): """ Generate a set of simple and hub sequences. A simple sequence contains a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence always contains a hub element in the middle of it. Parameters: ----------------------------------------------- nPatterns: the number of patterns to use in the sequences. patternLen: The number of elements in each pattern patternActivity: The number of elements that should be active in each pattern hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSimpleSequences: The number of simple sequences to generate nHubSequences: The number of hub sequences to generate retval: (seqList, patterns) seqList: a list of sequences. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the seqList. """ # Create the input patterns patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen, activity=patternActivity) # Create the raw sequences seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength, nSeq=nSimpleSequences) + \ generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength, nSeq=nHubSequences) # Return results return (seqList, patterns)
[ "def", "generateSequences", "(", "nPatterns", "=", "10", ",", "patternLen", "=", "500", ",", "patternActivity", "=", "50", ",", "hubs", "=", "[", "2", ",", "6", "]", ",", "seqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nSimpleSequences", "=", "50", ",", "nHubSequences", "=", "50", ")", ":", "# Create the input patterns", "patterns", "=", "generateCoincMatrix", "(", "nCoinc", "=", "nPatterns", ",", "length", "=", "patternLen", ",", "activity", "=", "patternActivity", ")", "# Create the raw sequences", "seqList", "=", "generateSimpleSequences", "(", "nCoinc", "=", "nPatterns", ",", "seqLength", "=", "seqLength", ",", "nSeq", "=", "nSimpleSequences", ")", "+", "generateHubSequences", "(", "nCoinc", "=", "nPatterns", ",", "hubs", "=", "hubs", ",", "seqLength", "=", "seqLength", ",", "nSeq", "=", "nHubSequences", ")", "# Return results", "return", "(", "seqList", ",", "patterns", ")" ]
Generate a set of simple and hub sequences. A simple sequence contains a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence always contains a hub element in the middle of it. Parameters: ----------------------------------------------- nPatterns: the number of patterns to use in the sequences. patternLen: The number of elements in each pattern patternActivity: The number of elements that should be active in each pattern hubs: which of the elements will be used as hubs. seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSimpleSequences: The number of simple sequences to generate nHubSequences: The number of hub sequences to generate retval: (seqList, patterns) seqList: a list of sequences. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the seqList.
[ "Generate", "a", "set", "of", "simple", "and", "hub", "sequences", ".", "A", "simple", "sequence", "contains", "a", "randomly", "chosen", "set", "of", "elements", "from", "0", "to", "nCoinc", "-", "1", ".", "A", "hub", "sequence", "always", "contains", "a", "hub", "element", "in", "the", "middle", "of", "it", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L278-L315
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
generateL2Sequences
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7], nL1SimpleSequences=50, nL1HubSequences=50, l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0, patternLen=500, patternActivity=50): """ Generate the simulated output from a spatial pooler that's sitting on top of another spatial pooler / temporal memory pair. The average on-time of the outputs from the simulated TM is given by the l1Pooling argument. In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: ----------------------------------------------- nL1Patterns: the number of patterns to use in the L1 sequences. l1Hubs: which of the elements will be used as hubs. l1SeqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nL1SimpleSequences: The number of simple sequences to generate for L1 nL1HubSequences: The number of hub sequences to generate for L1 l1Pooling: The number of time steps to pool over in the L1 temporal pooler perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. Only used when perfectStability is False patternLen: The number of elements in each pattern output by L2 patternActivity: The number of elements that should be active in each pattern @retval: (seqList, patterns) seqList: a list of sequences output from L2. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the L2 seqList. """ # First, generate the L1 sequences l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength, nSeq=nL1SimpleSequences) + \ generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs, seqLength=l1SeqLength, nSeq=nL1HubSequences) # Generate the L2 SP output from those spOutput = generateSlowSPOutput(seqListBelow = l1SeqList, poolingTimeBelow=l1Pooling, outputWidth=patternLen, activity=patternActivity, perfectStability=perfectStability, spHysteresisFactor=spHysteresisFactor) # Map the spOutput patterns into indices into a pattern matrix which we # generate now. outSeq = None outSeqList = [] outPatterns = SM32(0, patternLen) for pattern in spOutput: # If we have a reset vector start a new sequence if pattern.sum() == 0: if outSeq is not None: outSeqList.append(outSeq) outSeq = [] continue # See if this vector matches a pattern we've already seen before patternIdx = None if outPatterns.nRows() > 0: # Find most matching 1's. matches = outPatterns.rightVecSumAtNZ(pattern) outCoinc = matches.argmax().astype('uint32') # See if its number of 1's is the same in the pattern and in the # coincidence row. If so, it is an exact match numOnes = pattern.sum() if matches[outCoinc] == numOnes \ and outPatterns.getRow(int(outCoinc)).sum() == numOnes: patternIdx = outCoinc # If no match, add this pattern to our matrix if patternIdx is None: outPatterns.addRow(pattern) patternIdx = outPatterns.nRows() - 1 # Store the pattern index into the sequence outSeq.append(patternIdx) # Put in last finished sequence if outSeq is not None: outSeqList.append(outSeq) # Return with the seqList and patterns matrix return (outSeqList, outPatterns)
python
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7], nL1SimpleSequences=50, nL1HubSequences=50, l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0, patternLen=500, patternActivity=50): """ Generate the simulated output from a spatial pooler that's sitting on top of another spatial pooler / temporal memory pair. The average on-time of the outputs from the simulated TM is given by the l1Pooling argument. In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: ----------------------------------------------- nL1Patterns: the number of patterns to use in the L1 sequences. l1Hubs: which of the elements will be used as hubs. l1SeqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nL1SimpleSequences: The number of simple sequences to generate for L1 nL1HubSequences: The number of hub sequences to generate for L1 l1Pooling: The number of time steps to pool over in the L1 temporal pooler perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. Only used when perfectStability is False patternLen: The number of elements in each pattern output by L2 patternActivity: The number of elements that should be active in each pattern @retval: (seqList, patterns) seqList: a list of sequences output from L2. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the L2 seqList. """ # First, generate the L1 sequences l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength, nSeq=nL1SimpleSequences) + \ generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs, seqLength=l1SeqLength, nSeq=nL1HubSequences) # Generate the L2 SP output from those spOutput = generateSlowSPOutput(seqListBelow = l1SeqList, poolingTimeBelow=l1Pooling, outputWidth=patternLen, activity=patternActivity, perfectStability=perfectStability, spHysteresisFactor=spHysteresisFactor) # Map the spOutput patterns into indices into a pattern matrix which we # generate now. outSeq = None outSeqList = [] outPatterns = SM32(0, patternLen) for pattern in spOutput: # If we have a reset vector start a new sequence if pattern.sum() == 0: if outSeq is not None: outSeqList.append(outSeq) outSeq = [] continue # See if this vector matches a pattern we've already seen before patternIdx = None if outPatterns.nRows() > 0: # Find most matching 1's. matches = outPatterns.rightVecSumAtNZ(pattern) outCoinc = matches.argmax().astype('uint32') # See if its number of 1's is the same in the pattern and in the # coincidence row. If so, it is an exact match numOnes = pattern.sum() if matches[outCoinc] == numOnes \ and outPatterns.getRow(int(outCoinc)).sum() == numOnes: patternIdx = outCoinc # If no match, add this pattern to our matrix if patternIdx is None: outPatterns.addRow(pattern) patternIdx = outPatterns.nRows() - 1 # Store the pattern index into the sequence outSeq.append(patternIdx) # Put in last finished sequence if outSeq is not None: outSeqList.append(outSeq) # Return with the seqList and patterns matrix return (outSeqList, outPatterns)
[ "def", "generateL2Sequences", "(", "nL1Patterns", "=", "10", ",", "l1Hubs", "=", "[", "2", ",", "6", "]", ",", "l1SeqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nL1SimpleSequences", "=", "50", ",", "nL1HubSequences", "=", "50", ",", "l1Pooling", "=", "4", ",", "perfectStability", "=", "False", ",", "spHysteresisFactor", "=", "1.0", ",", "patternLen", "=", "500", ",", "patternActivity", "=", "50", ")", ":", "# First, generate the L1 sequences", "l1SeqList", "=", "generateSimpleSequences", "(", "nCoinc", "=", "nL1Patterns", ",", "seqLength", "=", "l1SeqLength", ",", "nSeq", "=", "nL1SimpleSequences", ")", "+", "generateHubSequences", "(", "nCoinc", "=", "nL1Patterns", ",", "hubs", "=", "l1Hubs", ",", "seqLength", "=", "l1SeqLength", ",", "nSeq", "=", "nL1HubSequences", ")", "# Generate the L2 SP output from those", "spOutput", "=", "generateSlowSPOutput", "(", "seqListBelow", "=", "l1SeqList", ",", "poolingTimeBelow", "=", "l1Pooling", ",", "outputWidth", "=", "patternLen", ",", "activity", "=", "patternActivity", ",", "perfectStability", "=", "perfectStability", ",", "spHysteresisFactor", "=", "spHysteresisFactor", ")", "# Map the spOutput patterns into indices into a pattern matrix which we", "# generate now.", "outSeq", "=", "None", "outSeqList", "=", "[", "]", "outPatterns", "=", "SM32", "(", "0", ",", "patternLen", ")", "for", "pattern", "in", "spOutput", ":", "# If we have a reset vector start a new sequence", "if", "pattern", ".", "sum", "(", ")", "==", "0", ":", "if", "outSeq", "is", "not", "None", ":", "outSeqList", ".", "append", "(", "outSeq", ")", "outSeq", "=", "[", "]", "continue", "# See if this vector matches a pattern we've already seen before", "patternIdx", "=", "None", "if", "outPatterns", ".", "nRows", "(", ")", ">", "0", ":", "# Find most matching 1's.", "matches", "=", "outPatterns", ".", "rightVecSumAtNZ", "(", "pattern", ")", "outCoinc", "=", "matches", ".", "argmax", "(", ")", ".", "astype", "(", "'uint32'", ")", "# See if its number of 1's is the same in the pattern and in the", "# coincidence row. If so, it is an exact match", "numOnes", "=", "pattern", ".", "sum", "(", ")", "if", "matches", "[", "outCoinc", "]", "==", "numOnes", "and", "outPatterns", ".", "getRow", "(", "int", "(", "outCoinc", ")", ")", ".", "sum", "(", ")", "==", "numOnes", ":", "patternIdx", "=", "outCoinc", "# If no match, add this pattern to our matrix", "if", "patternIdx", "is", "None", ":", "outPatterns", ".", "addRow", "(", "pattern", ")", "patternIdx", "=", "outPatterns", ".", "nRows", "(", ")", "-", "1", "# Store the pattern index into the sequence", "outSeq", ".", "append", "(", "patternIdx", ")", "# Put in last finished sequence", "if", "outSeq", "is", "not", "None", ":", "outSeqList", ".", "append", "(", "outSeq", ")", "# Return with the seqList and patterns matrix", "return", "(", "outSeqList", ",", "outPatterns", ")" ]
Generate the simulated output from a spatial pooler that's sitting on top of another spatial pooler / temporal memory pair. The average on-time of the outputs from the simulated TM is given by the l1Pooling argument. In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: ----------------------------------------------- nL1Patterns: the number of patterns to use in the L1 sequences. l1Hubs: which of the elements will be used as hubs. l1SeqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nL1SimpleSequences: The number of simple sequences to generate for L1 nL1HubSequences: The number of hub sequences to generate for L1 l1Pooling: The number of time steps to pool over in the L1 temporal pooler perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. Only used when perfectStability is False patternLen: The number of elements in each pattern output by L2 patternActivity: The number of elements that should be active in each pattern @retval: (seqList, patterns) seqList: a list of sequences output from L2. Each sequence is itself a list containing the input pattern indices for that sequence. patterns: the input patterns used in the L2 seqList.
[ "Generate", "the", "simulated", "output", "from", "a", "spatial", "pooler", "that", "s", "sitting", "on", "top", "of", "another", "spatial", "pooler", "/", "temporal", "memory", "pair", ".", "The", "average", "on", "-", "time", "of", "the", "outputs", "from", "the", "simulated", "TM", "is", "given", "by", "the", "l1Pooling", "argument", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L319-L411
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
vectorsFromSeqList
def vectorsFromSeqList(seqList, patternMatrix): """ Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- seq: the sequence, given as indices into the patternMatrix patternMatrix: a SparseMatrix contaning the possible patterns used in the sequence. """ totalLen = 0 for seq in seqList: totalLen += len(seq) vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool') vecOffset = 0 for seq in seqList: seq = numpy.array(seq, dtype='uint32') for idx,coinc in enumerate(seq): vectors[vecOffset] = patternMatrix.getRow(int(coinc)) vecOffset += 1 return vectors
python
def vectorsFromSeqList(seqList, patternMatrix): """ Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- seq: the sequence, given as indices into the patternMatrix patternMatrix: a SparseMatrix contaning the possible patterns used in the sequence. """ totalLen = 0 for seq in seqList: totalLen += len(seq) vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool') vecOffset = 0 for seq in seqList: seq = numpy.array(seq, dtype='uint32') for idx,coinc in enumerate(seq): vectors[vecOffset] = patternMatrix.getRow(int(coinc)) vecOffset += 1 return vectors
[ "def", "vectorsFromSeqList", "(", "seqList", ",", "patternMatrix", ")", ":", "totalLen", "=", "0", "for", "seq", "in", "seqList", ":", "totalLen", "+=", "len", "(", "seq", ")", "vectors", "=", "numpy", ".", "zeros", "(", "(", "totalLen", ",", "patternMatrix", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "'bool'", ")", "vecOffset", "=", "0", "for", "seq", "in", "seqList", ":", "seq", "=", "numpy", ".", "array", "(", "seq", ",", "dtype", "=", "'uint32'", ")", "for", "idx", ",", "coinc", "in", "enumerate", "(", "seq", ")", ":", "vectors", "[", "vecOffset", "]", "=", "patternMatrix", ".", "getRow", "(", "int", "(", "coinc", ")", ")", "vecOffset", "+=", "1", "return", "vectors" ]
Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- seq: the sequence, given as indices into the patternMatrix patternMatrix: a SparseMatrix contaning the possible patterns used in the sequence.
[ "Convert", "a", "list", "of", "sequences", "of", "pattern", "indices", "and", "a", "pattern", "lookup", "table", "into", "a", "an", "array", "of", "patterns" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L415-L439
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
sameTMParams
def sameTMParams(tp1, tp2): """Given two TM instances, see if any parameters are different.""" result = True for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm", "minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec", "permanenceMax", "globalDecay", "activationThreshold", "doPooling", "segUpdateValidDuration", "burnIn", "pamLength", "maxAge"]: if getattr(tp1, param) != getattr(tp2,param): print param,"is different" print getattr(tp1, param), "vs", getattr(tp2,param) result = False return result
python
def sameTMParams(tp1, tp2): """Given two TM instances, see if any parameters are different.""" result = True for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm", "minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec", "permanenceMax", "globalDecay", "activationThreshold", "doPooling", "segUpdateValidDuration", "burnIn", "pamLength", "maxAge"]: if getattr(tp1, param) != getattr(tp2,param): print param,"is different" print getattr(tp1, param), "vs", getattr(tp2,param) result = False return result
[ "def", "sameTMParams", "(", "tp1", ",", "tp2", ")", ":", "result", "=", "True", "for", "param", "in", "[", "\"numberOfCols\"", ",", "\"cellsPerColumn\"", ",", "\"initialPerm\"", ",", "\"connectedPerm\"", ",", "\"minThreshold\"", ",", "\"newSynapseCount\"", ",", "\"permanenceInc\"", ",", "\"permanenceDec\"", ",", "\"permanenceMax\"", ",", "\"globalDecay\"", ",", "\"activationThreshold\"", ",", "\"doPooling\"", ",", "\"segUpdateValidDuration\"", ",", "\"burnIn\"", ",", "\"pamLength\"", ",", "\"maxAge\"", "]", ":", "if", "getattr", "(", "tp1", ",", "param", ")", "!=", "getattr", "(", "tp2", ",", "param", ")", ":", "print", "param", ",", "\"is different\"", "print", "getattr", "(", "tp1", ",", "param", ")", ",", "\"vs\"", ",", "getattr", "(", "tp2", ",", "param", ")", "result", "=", "False", "return", "result" ]
Given two TM instances, see if any parameters are different.
[ "Given", "two", "TM", "instances", "see", "if", "any", "parameters", "are", "different", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L445-L457
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
sameSynapse
def sameSynapse(syn, synapses): """Given a synapse and a list of synapses, check whether this synapse exist in the list. A synapse is represented as [col, cell, permanence]. A synapse matches if col and cell are identical and the permanence value is within 0.001.""" for s in synapses: if (s[0]==syn[0]) and (s[1]==syn[1]) and (abs(s[2]-syn[2]) <= 0.001): return True return False
python
def sameSynapse(syn, synapses): """Given a synapse and a list of synapses, check whether this synapse exist in the list. A synapse is represented as [col, cell, permanence]. A synapse matches if col and cell are identical and the permanence value is within 0.001.""" for s in synapses: if (s[0]==syn[0]) and (s[1]==syn[1]) and (abs(s[2]-syn[2]) <= 0.001): return True return False
[ "def", "sameSynapse", "(", "syn", ",", "synapses", ")", ":", "for", "s", "in", "synapses", ":", "if", "(", "s", "[", "0", "]", "==", "syn", "[", "0", "]", ")", "and", "(", "s", "[", "1", "]", "==", "syn", "[", "1", "]", ")", "and", "(", "abs", "(", "s", "[", "2", "]", "-", "syn", "[", "2", "]", ")", "<=", "0.001", ")", ":", "return", "True", "return", "False" ]
Given a synapse and a list of synapses, check whether this synapse exist in the list. A synapse is represented as [col, cell, permanence]. A synapse matches if col and cell are identical and the permanence value is within 0.001.
[ "Given", "a", "synapse", "and", "a", "list", "of", "synapses", "check", "whether", "this", "synapse", "exist", "in", "the", "list", ".", "A", "synapse", "is", "represented", "as", "[", "col", "cell", "permanence", "]", ".", "A", "synapse", "matches", "if", "col", "and", "cell", "are", "identical", "and", "the", "permanence", "value", "is", "within", "0", ".", "001", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L459-L467
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
sameSegment
def sameSegment(seg1, seg2): """Return True if seg1 and seg2 are identical, ignoring order of synapses""" result = True # check sequence segment, total activations etc. In case any are floats, # check that they are within 0.001. for field in [1, 2, 3, 4, 5, 6]: if abs(seg1[0][field] - seg2[0][field]) > 0.001: result = False # Compare number of synapses if len(seg1[1:]) != len(seg2[1:]): result = False # Now compare synapses, ignoring order of synapses for syn in seg2[1:]: if syn[2] <= 0: print "A synapse with zero permanence encountered" result = False if result == True: for syn in seg1[1:]: if syn[2] <= 0: print "A synapse with zero permanence encountered" result = False res = sameSynapse(syn, seg2[1:]) if res == False: result = False return result
python
def sameSegment(seg1, seg2): """Return True if seg1 and seg2 are identical, ignoring order of synapses""" result = True # check sequence segment, total activations etc. In case any are floats, # check that they are within 0.001. for field in [1, 2, 3, 4, 5, 6]: if abs(seg1[0][field] - seg2[0][field]) > 0.001: result = False # Compare number of synapses if len(seg1[1:]) != len(seg2[1:]): result = False # Now compare synapses, ignoring order of synapses for syn in seg2[1:]: if syn[2] <= 0: print "A synapse with zero permanence encountered" result = False if result == True: for syn in seg1[1:]: if syn[2] <= 0: print "A synapse with zero permanence encountered" result = False res = sameSynapse(syn, seg2[1:]) if res == False: result = False return result
[ "def", "sameSegment", "(", "seg1", ",", "seg2", ")", ":", "result", "=", "True", "# check sequence segment, total activations etc. In case any are floats,", "# check that they are within 0.001.", "for", "field", "in", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", "]", ":", "if", "abs", "(", "seg1", "[", "0", "]", "[", "field", "]", "-", "seg2", "[", "0", "]", "[", "field", "]", ")", ">", "0.001", ":", "result", "=", "False", "# Compare number of synapses", "if", "len", "(", "seg1", "[", "1", ":", "]", ")", "!=", "len", "(", "seg2", "[", "1", ":", "]", ")", ":", "result", "=", "False", "# Now compare synapses, ignoring order of synapses", "for", "syn", "in", "seg2", "[", "1", ":", "]", ":", "if", "syn", "[", "2", "]", "<=", "0", ":", "print", "\"A synapse with zero permanence encountered\"", "result", "=", "False", "if", "result", "==", "True", ":", "for", "syn", "in", "seg1", "[", "1", ":", "]", ":", "if", "syn", "[", "2", "]", "<=", "0", ":", "print", "\"A synapse with zero permanence encountered\"", "result", "=", "False", "res", "=", "sameSynapse", "(", "syn", ",", "seg2", "[", "1", ":", "]", ")", "if", "res", "==", "False", ":", "result", "=", "False", "return", "result" ]
Return True if seg1 and seg2 are identical, ignoring order of synapses
[ "Return", "True", "if", "seg1", "and", "seg2", "are", "identical", "ignoring", "order", "of", "synapses" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L469-L497
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
tmDiff
def tmDiff(tm1, tm2, verbosity = 0, relaxSegmentTests =True): """ Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's. """ # First check basic parameters. If we fail here, don't continue if sameTMParams(tm1, tm2) == False: print "Two TM's have different parameters" return False result = True # Compare states at t first, they usually diverge before the structure of the # cells starts diverging if (tm1.activeState['t'] != tm2.activeState['t']).any(): print 'Active states diverge', numpy.where(tm1.activeState['t'] != tm2.activeState['t']) result = False if (tm1.predictedState['t'] - tm2.predictedState['t']).any(): print 'Predicted states diverge', numpy.where(tm1.predictedState['t'] != tm2.predictedState['t']) result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. if tm1.getNumSegments() != tm2.getNumSegments(): print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False if tm1.getNumSynapses() != tm2.getNumSynapses(): print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() tm1.printCells() tm2.printCells() result = False # Check that each cell has the same number of segments and synapses for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences # Note that segments in tm1 can be in a different order than tm2. Here we # make sure that, for each segment in tm1, there is an identical segment # in tm2. if result == True and not relaxSegmentTests: for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): tm1seg = tm1.getSegmentOnCell(c, i, segIdx) # Loop through all segments in tm2seg and see if any of them match tm1seg res = False for tm2segIdx in xrange(nSegs): tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i if verbosity >= 1: print "C++" tm1.printCell(c, i) print "Py" tm2.printCell(c, i) result = False if result == True and (verbosity > 1): print "TM's match" return result
python
def tmDiff(tm1, tm2, verbosity = 0, relaxSegmentTests =True): """ Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's. """ # First check basic parameters. If we fail here, don't continue if sameTMParams(tm1, tm2) == False: print "Two TM's have different parameters" return False result = True # Compare states at t first, they usually diverge before the structure of the # cells starts diverging if (tm1.activeState['t'] != tm2.activeState['t']).any(): print 'Active states diverge', numpy.where(tm1.activeState['t'] != tm2.activeState['t']) result = False if (tm1.predictedState['t'] - tm2.predictedState['t']).any(): print 'Predicted states diverge', numpy.where(tm1.predictedState['t'] != tm2.predictedState['t']) result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. if tm1.getNumSegments() != tm2.getNumSegments(): print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False if tm1.getNumSynapses() != tm2.getNumSynapses(): print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() tm1.printCells() tm2.printCells() result = False # Check that each cell has the same number of segments and synapses for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences # Note that segments in tm1 can be in a different order than tm2. Here we # make sure that, for each segment in tm1, there is an identical segment # in tm2. if result == True and not relaxSegmentTests: for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): tm1seg = tm1.getSegmentOnCell(c, i, segIdx) # Loop through all segments in tm2seg and see if any of them match tm1seg res = False for tm2segIdx in xrange(nSegs): tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i if verbosity >= 1: print "C++" tm1.printCell(c, i) print "Py" tm2.printCell(c, i) result = False if result == True and (verbosity > 1): print "TM's match" return result
[ "def", "tmDiff", "(", "tm1", ",", "tm2", ",", "verbosity", "=", "0", ",", "relaxSegmentTests", "=", "True", ")", ":", "# First check basic parameters. If we fail here, don't continue", "if", "sameTMParams", "(", "tm1", ",", "tm2", ")", "==", "False", ":", "print", "\"Two TM's have different parameters\"", "return", "False", "result", "=", "True", "# Compare states at t first, they usually diverge before the structure of the", "# cells starts diverging", "if", "(", "tm1", ".", "activeState", "[", "'t'", "]", "!=", "tm2", ".", "activeState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'Active states diverge'", ",", "numpy", ".", "where", "(", "tm1", ".", "activeState", "[", "'t'", "]", "!=", "tm2", ".", "activeState", "[", "'t'", "]", ")", "result", "=", "False", "if", "(", "tm1", ".", "predictedState", "[", "'t'", "]", "-", "tm2", ".", "predictedState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'Predicted states diverge'", ",", "numpy", ".", "where", "(", "tm1", ".", "predictedState", "[", "'t'", "]", "!=", "tm2", ".", "predictedState", "[", "'t'", "]", ")", "result", "=", "False", "# TODO: check confidence at T (confT)", "# Now check some high level learned parameters.", "if", "tm1", ".", "getNumSegments", "(", ")", "!=", "tm2", ".", "getNumSegments", "(", ")", ":", "print", "\"Number of segments are different\"", ",", "tm1", ".", "getNumSegments", "(", ")", ",", "tm2", ".", "getNumSegments", "(", ")", "result", "=", "False", "if", "tm1", ".", "getNumSynapses", "(", ")", "!=", "tm2", ".", "getNumSynapses", "(", ")", ":", "print", "\"Number of synapses are different\"", ",", "tm1", ".", "getNumSynapses", "(", ")", ",", "tm2", ".", "getNumSynapses", "(", ")", "tm1", ".", "printCells", "(", ")", "tm2", ".", "printCells", "(", ")", "result", "=", "False", "# Check that each cell has the same number of segments and synapses", "for", "c", "in", "xrange", "(", "tm1", ".", "numberOfCols", ")", ":", "for", "i", "in", "xrange", "(", "tm2", ".", "cellsPerColumn", ")", ":", "if", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "!=", "tm2", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", ":", "print", "\"Num segments different in cell:\"", ",", "c", ",", "i", ",", "print", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", ",", "tm2", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "result", "=", "False", "# If the above tests pass, then check each segment and report differences", "# Note that segments in tm1 can be in a different order than tm2. Here we", "# make sure that, for each segment in tm1, there is an identical segment", "# in tm2.", "if", "result", "==", "True", "and", "not", "relaxSegmentTests", ":", "for", "c", "in", "xrange", "(", "tm1", ".", "numberOfCols", ")", ":", "for", "i", "in", "xrange", "(", "tm2", ".", "cellsPerColumn", ")", ":", "nSegs", "=", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "for", "segIdx", "in", "xrange", "(", "nSegs", ")", ":", "tm1seg", "=", "tm1", ".", "getSegmentOnCell", "(", "c", ",", "i", ",", "segIdx", ")", "# Loop through all segments in tm2seg and see if any of them match tm1seg", "res", "=", "False", "for", "tm2segIdx", "in", "xrange", "(", "nSegs", ")", ":", "tm2seg", "=", "tm2", ".", "getSegmentOnCell", "(", "c", ",", "i", ",", "tm2segIdx", ")", "if", "sameSegment", "(", "tm1seg", ",", "tm2seg", ")", "==", "True", ":", "res", "=", "True", "break", "if", "res", "==", "False", ":", "print", "\"\\nSegments are different for cell:\"", ",", "c", ",", "i", "if", "verbosity", ">=", "1", ":", "print", "\"C++\"", "tm1", ".", "printCell", "(", "c", ",", "i", ")", "print", "\"Py\"", "tm2", ".", "printCell", "(", "c", ",", "i", ")", "result", "=", "False", "if", "result", "==", "True", "and", "(", "verbosity", ">", "1", ")", ":", "print", "\"TM's match\"", "return", "result" ]
Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's.
[ "Given", "two", "TM", "instances", "list", "the", "difference", "between", "them", "and", "returns", "False", "if", "there", "is", "a", "difference", ".", "This", "function", "checks", "the", "major", "parameters", ".", "If", "this", "passes", "(", "and", "checkLearn", "is", "true", ")", "it", "checks", "the", "number", "of", "segments", "on", "each", "cell", ".", "If", "this", "passes", "checks", "each", "synapse", "on", "each", "segment", ".", "When", "comparing", "C", "++", "and", "Py", "the", "segments", "are", "usually", "in", "different", "orders", "in", "the", "cells", ".", "tmDiff", "ignores", "segment", "order", "when", "comparing", "TM", "s", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L499-L579
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
tmDiff2
def tmDiff2(tm1, tm2, verbosity = 0, relaxSegmentTests =True, checkLearn = True, checkStates = True): """ Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's. If checkLearn is True, will check learn states as well as all the segments If checkStates is True, will check the various state arrays """ # First check basic parameters. If we fail here, don't continue if sameTMParams(tm1, tm2) == False: print "Two TM's have different parameters" return False tm1Label = "<tm_1 (%s)>" % tm1.__class__.__name__ tm2Label = "<tm_2 (%s)>" % tm2.__class__.__name__ result = True if checkStates: # Compare states at t first, they usually diverge before the structure of the # cells starts diverging if (tm1.infActiveState['t'] != tm2.infActiveState['t']).any(): print 'Active states diverged', numpy.where(tm1.infActiveState['t'] != tm2.infActiveState['t']) result = False if (tm1.infPredictedState['t'] - tm2.infPredictedState['t']).any(): print 'Predicted states diverged', numpy.where(tm1.infPredictedState['t'] != tm2.infPredictedState['t']) result = False if checkLearn and (tm1.lrnActiveState['t'] - tm2.lrnActiveState['t']).any(): print 'lrnActiveState[t] diverged', numpy.where(tm1.lrnActiveState['t'] != tm2.lrnActiveState['t']) result = False if checkLearn and (tm1.lrnPredictedState['t'] - tm2.lrnPredictedState['t']).any(): print 'lrnPredictedState[t] diverged', numpy.where(tm1.lrnPredictedState['t'] != tm2.lrnPredictedState['t']) result = False if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > 0.01: print "Average learned sequence lengths differ: ", print tm1.getAvgLearnedSeqLength(), " vs ", tm2.getAvgLearnedSeqLength() result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. if tm1.getNumSegments() != tm2.getNumSegments(): print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False if tm1.getNumSynapses() != tm2.getNumSynapses(): print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() if verbosity >= 3: print "%s: " % tm1Label, tm1.printCells() print "\n%s : " % tm2Label, tm2.printCells() #result = False # Check that each cell has the same number of segments and synapses for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences # Note that segments in tm1 can be in a different order than tm2. Here we # make sure that, for each segment in tm1, there is an identical segment # in tm2. if result == True and not relaxSegmentTests and checkLearn: for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): tm1seg = tm1.getSegmentOnCell(c, i, segIdx) # Loop through all segments in tm2seg and see if any of them match tm1seg res = False for tm2segIdx in xrange(nSegs): tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i result = False if verbosity >= 0: print "%s : " % tm1Label, tm1.printCell(c, i) print "\n%s : " % tm2Label, tm2.printCell(c, i) if result == True and (verbosity > 1): print "TM's match" return result
python
def tmDiff2(tm1, tm2, verbosity = 0, relaxSegmentTests =True, checkLearn = True, checkStates = True): """ Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's. If checkLearn is True, will check learn states as well as all the segments If checkStates is True, will check the various state arrays """ # First check basic parameters. If we fail here, don't continue if sameTMParams(tm1, tm2) == False: print "Two TM's have different parameters" return False tm1Label = "<tm_1 (%s)>" % tm1.__class__.__name__ tm2Label = "<tm_2 (%s)>" % tm2.__class__.__name__ result = True if checkStates: # Compare states at t first, they usually diverge before the structure of the # cells starts diverging if (tm1.infActiveState['t'] != tm2.infActiveState['t']).any(): print 'Active states diverged', numpy.where(tm1.infActiveState['t'] != tm2.infActiveState['t']) result = False if (tm1.infPredictedState['t'] - tm2.infPredictedState['t']).any(): print 'Predicted states diverged', numpy.where(tm1.infPredictedState['t'] != tm2.infPredictedState['t']) result = False if checkLearn and (tm1.lrnActiveState['t'] - tm2.lrnActiveState['t']).any(): print 'lrnActiveState[t] diverged', numpy.where(tm1.lrnActiveState['t'] != tm2.lrnActiveState['t']) result = False if checkLearn and (tm1.lrnPredictedState['t'] - tm2.lrnPredictedState['t']).any(): print 'lrnPredictedState[t] diverged', numpy.where(tm1.lrnPredictedState['t'] != tm2.lrnPredictedState['t']) result = False if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > 0.01: print "Average learned sequence lengths differ: ", print tm1.getAvgLearnedSeqLength(), " vs ", tm2.getAvgLearnedSeqLength() result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. if tm1.getNumSegments() != tm2.getNumSegments(): print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False if tm1.getNumSynapses() != tm2.getNumSynapses(): print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() if verbosity >= 3: print "%s: " % tm1Label, tm1.printCells() print "\n%s : " % tm2Label, tm2.printCells() #result = False # Check that each cell has the same number of segments and synapses for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences # Note that segments in tm1 can be in a different order than tm2. Here we # make sure that, for each segment in tm1, there is an identical segment # in tm2. if result == True and not relaxSegmentTests and checkLearn: for c in xrange(tm1.numberOfCols): for i in xrange(tm2.cellsPerColumn): nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): tm1seg = tm1.getSegmentOnCell(c, i, segIdx) # Loop through all segments in tm2seg and see if any of them match tm1seg res = False for tm2segIdx in xrange(nSegs): tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i result = False if verbosity >= 0: print "%s : " % tm1Label, tm1.printCell(c, i) print "\n%s : " % tm2Label, tm2.printCell(c, i) if result == True and (verbosity > 1): print "TM's match" return result
[ "def", "tmDiff2", "(", "tm1", ",", "tm2", ",", "verbosity", "=", "0", ",", "relaxSegmentTests", "=", "True", ",", "checkLearn", "=", "True", ",", "checkStates", "=", "True", ")", ":", "# First check basic parameters. If we fail here, don't continue", "if", "sameTMParams", "(", "tm1", ",", "tm2", ")", "==", "False", ":", "print", "\"Two TM's have different parameters\"", "return", "False", "tm1Label", "=", "\"<tm_1 (%s)>\"", "%", "tm1", ".", "__class__", ".", "__name__", "tm2Label", "=", "\"<tm_2 (%s)>\"", "%", "tm2", ".", "__class__", ".", "__name__", "result", "=", "True", "if", "checkStates", ":", "# Compare states at t first, they usually diverge before the structure of the", "# cells starts diverging", "if", "(", "tm1", ".", "infActiveState", "[", "'t'", "]", "!=", "tm2", ".", "infActiveState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'Active states diverged'", ",", "numpy", ".", "where", "(", "tm1", ".", "infActiveState", "[", "'t'", "]", "!=", "tm2", ".", "infActiveState", "[", "'t'", "]", ")", "result", "=", "False", "if", "(", "tm1", ".", "infPredictedState", "[", "'t'", "]", "-", "tm2", ".", "infPredictedState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'Predicted states diverged'", ",", "numpy", ".", "where", "(", "tm1", ".", "infPredictedState", "[", "'t'", "]", "!=", "tm2", ".", "infPredictedState", "[", "'t'", "]", ")", "result", "=", "False", "if", "checkLearn", "and", "(", "tm1", ".", "lrnActiveState", "[", "'t'", "]", "-", "tm2", ".", "lrnActiveState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'lrnActiveState[t] diverged'", ",", "numpy", ".", "where", "(", "tm1", ".", "lrnActiveState", "[", "'t'", "]", "!=", "tm2", ".", "lrnActiveState", "[", "'t'", "]", ")", "result", "=", "False", "if", "checkLearn", "and", "(", "tm1", ".", "lrnPredictedState", "[", "'t'", "]", "-", "tm2", ".", "lrnPredictedState", "[", "'t'", "]", ")", ".", "any", "(", ")", ":", "print", "'lrnPredictedState[t] diverged'", ",", "numpy", ".", "where", "(", "tm1", ".", "lrnPredictedState", "[", "'t'", "]", "!=", "tm2", ".", "lrnPredictedState", "[", "'t'", "]", ")", "result", "=", "False", "if", "checkLearn", "and", "abs", "(", "tm1", ".", "getAvgLearnedSeqLength", "(", ")", "-", "tm2", ".", "getAvgLearnedSeqLength", "(", ")", ")", ">", "0.01", ":", "print", "\"Average learned sequence lengths differ: \"", ",", "print", "tm1", ".", "getAvgLearnedSeqLength", "(", ")", ",", "\" vs \"", ",", "tm2", ".", "getAvgLearnedSeqLength", "(", ")", "result", "=", "False", "# TODO: check confidence at T (confT)", "# Now check some high level learned parameters.", "if", "tm1", ".", "getNumSegments", "(", ")", "!=", "tm2", ".", "getNumSegments", "(", ")", ":", "print", "\"Number of segments are different\"", ",", "tm1", ".", "getNumSegments", "(", ")", ",", "tm2", ".", "getNumSegments", "(", ")", "result", "=", "False", "if", "tm1", ".", "getNumSynapses", "(", ")", "!=", "tm2", ".", "getNumSynapses", "(", ")", ":", "print", "\"Number of synapses are different\"", ",", "tm1", ".", "getNumSynapses", "(", ")", ",", "tm2", ".", "getNumSynapses", "(", ")", "if", "verbosity", ">=", "3", ":", "print", "\"%s: \"", "%", "tm1Label", ",", "tm1", ".", "printCells", "(", ")", "print", "\"\\n%s : \"", "%", "tm2Label", ",", "tm2", ".", "printCells", "(", ")", "#result = False", "# Check that each cell has the same number of segments and synapses", "for", "c", "in", "xrange", "(", "tm1", ".", "numberOfCols", ")", ":", "for", "i", "in", "xrange", "(", "tm2", ".", "cellsPerColumn", ")", ":", "if", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "!=", "tm2", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", ":", "print", "\"Num segments different in cell:\"", ",", "c", ",", "i", ",", "print", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", ",", "tm2", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "result", "=", "False", "# If the above tests pass, then check each segment and report differences", "# Note that segments in tm1 can be in a different order than tm2. Here we", "# make sure that, for each segment in tm1, there is an identical segment", "# in tm2.", "if", "result", "==", "True", "and", "not", "relaxSegmentTests", "and", "checkLearn", ":", "for", "c", "in", "xrange", "(", "tm1", ".", "numberOfCols", ")", ":", "for", "i", "in", "xrange", "(", "tm2", ".", "cellsPerColumn", ")", ":", "nSegs", "=", "tm1", ".", "getNumSegmentsInCell", "(", "c", ",", "i", ")", "for", "segIdx", "in", "xrange", "(", "nSegs", ")", ":", "tm1seg", "=", "tm1", ".", "getSegmentOnCell", "(", "c", ",", "i", ",", "segIdx", ")", "# Loop through all segments in tm2seg and see if any of them match tm1seg", "res", "=", "False", "for", "tm2segIdx", "in", "xrange", "(", "nSegs", ")", ":", "tm2seg", "=", "tm2", ".", "getSegmentOnCell", "(", "c", ",", "i", ",", "tm2segIdx", ")", "if", "sameSegment", "(", "tm1seg", ",", "tm2seg", ")", "==", "True", ":", "res", "=", "True", "break", "if", "res", "==", "False", ":", "print", "\"\\nSegments are different for cell:\"", ",", "c", ",", "i", "result", "=", "False", "if", "verbosity", ">=", "0", ":", "print", "\"%s : \"", "%", "tm1Label", ",", "tm1", ".", "printCell", "(", "c", ",", "i", ")", "print", "\"\\n%s : \"", "%", "tm2Label", ",", "tm2", ".", "printCell", "(", "c", ",", "i", ")", "if", "result", "==", "True", "and", "(", "verbosity", ">", "1", ")", ":", "print", "\"TM's match\"", "return", "result" ]
Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the cells. tmDiff ignores segment order when comparing TM's. If checkLearn is True, will check learn states as well as all the segments If checkStates is True, will check the various state arrays
[ "Given", "two", "TM", "instances", "list", "the", "difference", "between", "them", "and", "returns", "False", "if", "there", "is", "a", "difference", ".", "This", "function", "checks", "the", "major", "parameters", ".", "If", "this", "passes", "(", "and", "checkLearn", "is", "true", ")", "it", "checks", "the", "number", "of", "segments", "on", "each", "cell", ".", "If", "this", "passes", "checks", "each", "synapse", "on", "each", "segment", ".", "When", "comparing", "C", "++", "and", "Py", "the", "segments", "are", "usually", "in", "different", "orders", "in", "the", "cells", ".", "tmDiff", "ignores", "segment", "order", "when", "comparing", "TM", "s", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L581-L686
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
spDiff
def spDiff(SP1,SP2): """ Function that compares two spatial pooler instances. Compares the static variables between the two poolers to make sure that they are equivalent. Parameters ----------------------------------------- SP1 first spatial pooler to be compared SP2 second spatial pooler to be compared To establish equality, this function does the following: 1.Compares the connected synapse matrices for each coincidence 2.Compare the potential synapse matrices for each coincidence 3.Compare the permanence matrices for each coincidence 4.Compare the firing boosts between the two poolers. 5.Compare the duty cycles before and after inhibition for both poolers """ if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)): print "Connected synapse matrices are different sizes" return False if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)): print "Potential synapse matrices are different sizes" return False if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)): print "Permanence matrices are different sizes" return False #iterate over cells for i in range(0,len(SP1._masterConnectedM)): #grab the Coincidence Matrices and compare them connected1 = SP1._masterConnectedM[i] connected2 = SP2._masterConnectedM[i] if(connected1!=connected2): print "Connected Matrices for cell %d different" % (i) return False #grab permanence Matrices and compare them permanences1 = SP1._masterPermanenceM[i]; permanences2 = SP2._masterPermanenceM[i]; if(permanences1!=permanences2): print "Permanence Matrices for cell %d different" % (i) return False #grab the potential connection Matrices and compare them potential1 = SP1._masterPotentialM[i]; potential2 = SP2._masterPotentialM[i]; if(potential1!=potential2): print "Potential Matrices for cell %d different" % (i) return False #Check firing boosts if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)): print "Firing boost factors are different between spatial poolers" return False #Check duty cycles after inhibiton if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)): print "Duty cycles after inhibition are different between spatial poolers" return False #Check duty cycles before inhibition if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)): print "Duty cycles before inhibition are different between spatial poolers" return False print("Spatial Poolers are equivalent") return True
python
def spDiff(SP1,SP2): """ Function that compares two spatial pooler instances. Compares the static variables between the two poolers to make sure that they are equivalent. Parameters ----------------------------------------- SP1 first spatial pooler to be compared SP2 second spatial pooler to be compared To establish equality, this function does the following: 1.Compares the connected synapse matrices for each coincidence 2.Compare the potential synapse matrices for each coincidence 3.Compare the permanence matrices for each coincidence 4.Compare the firing boosts between the two poolers. 5.Compare the duty cycles before and after inhibition for both poolers """ if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)): print "Connected synapse matrices are different sizes" return False if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)): print "Potential synapse matrices are different sizes" return False if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)): print "Permanence matrices are different sizes" return False #iterate over cells for i in range(0,len(SP1._masterConnectedM)): #grab the Coincidence Matrices and compare them connected1 = SP1._masterConnectedM[i] connected2 = SP2._masterConnectedM[i] if(connected1!=connected2): print "Connected Matrices for cell %d different" % (i) return False #grab permanence Matrices and compare them permanences1 = SP1._masterPermanenceM[i]; permanences2 = SP2._masterPermanenceM[i]; if(permanences1!=permanences2): print "Permanence Matrices for cell %d different" % (i) return False #grab the potential connection Matrices and compare them potential1 = SP1._masterPotentialM[i]; potential2 = SP2._masterPotentialM[i]; if(potential1!=potential2): print "Potential Matrices for cell %d different" % (i) return False #Check firing boosts if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)): print "Firing boost factors are different between spatial poolers" return False #Check duty cycles after inhibiton if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)): print "Duty cycles after inhibition are different between spatial poolers" return False #Check duty cycles before inhibition if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)): print "Duty cycles before inhibition are different between spatial poolers" return False print("Spatial Poolers are equivalent") return True
[ "def", "spDiff", "(", "SP1", ",", "SP2", ")", ":", "if", "(", "len", "(", "SP1", ".", "_masterConnectedM", ")", "!=", "len", "(", "SP2", ".", "_masterConnectedM", ")", ")", ":", "print", "\"Connected synapse matrices are different sizes\"", "return", "False", "if", "(", "len", "(", "SP1", ".", "_masterPotentialM", ")", "!=", "len", "(", "SP2", ".", "_masterPotentialM", ")", ")", ":", "print", "\"Potential synapse matrices are different sizes\"", "return", "False", "if", "(", "len", "(", "SP1", ".", "_masterPermanenceM", ")", "!=", "len", "(", "SP2", ".", "_masterPermanenceM", ")", ")", ":", "print", "\"Permanence matrices are different sizes\"", "return", "False", "#iterate over cells", "for", "i", "in", "range", "(", "0", ",", "len", "(", "SP1", ".", "_masterConnectedM", ")", ")", ":", "#grab the Coincidence Matrices and compare them", "connected1", "=", "SP1", ".", "_masterConnectedM", "[", "i", "]", "connected2", "=", "SP2", ".", "_masterConnectedM", "[", "i", "]", "if", "(", "connected1", "!=", "connected2", ")", ":", "print", "\"Connected Matrices for cell %d different\"", "%", "(", "i", ")", "return", "False", "#grab permanence Matrices and compare them", "permanences1", "=", "SP1", ".", "_masterPermanenceM", "[", "i", "]", "permanences2", "=", "SP2", ".", "_masterPermanenceM", "[", "i", "]", "if", "(", "permanences1", "!=", "permanences2", ")", ":", "print", "\"Permanence Matrices for cell %d different\"", "%", "(", "i", ")", "return", "False", "#grab the potential connection Matrices and compare them", "potential1", "=", "SP1", ".", "_masterPotentialM", "[", "i", "]", "potential2", "=", "SP2", ".", "_masterPotentialM", "[", "i", "]", "if", "(", "potential1", "!=", "potential2", ")", ":", "print", "\"Potential Matrices for cell %d different\"", "%", "(", "i", ")", "return", "False", "#Check firing boosts", "if", "(", "not", "numpy", ".", "array_equal", "(", "SP1", ".", "_firingBoostFactors", ",", "SP2", ".", "_firingBoostFactors", ")", ")", ":", "print", "\"Firing boost factors are different between spatial poolers\"", "return", "False", "#Check duty cycles after inhibiton", "if", "(", "not", "numpy", ".", "array_equal", "(", "SP1", ".", "_dutyCycleAfterInh", ",", "SP2", ".", "_dutyCycleAfterInh", ")", ")", ":", "print", "\"Duty cycles after inhibition are different between spatial poolers\"", "return", "False", "#Check duty cycles before inhibition", "if", "(", "not", "numpy", ".", "array_equal", "(", "SP1", ".", "_dutyCycleBeforeInh", ",", "SP2", ".", "_dutyCycleBeforeInh", ")", ")", ":", "print", "\"Duty cycles before inhibition are different between spatial poolers\"", "return", "False", "print", "(", "\"Spatial Poolers are equivalent\"", ")", "return", "True" ]
Function that compares two spatial pooler instances. Compares the static variables between the two poolers to make sure that they are equivalent. Parameters ----------------------------------------- SP1 first spatial pooler to be compared SP2 second spatial pooler to be compared To establish equality, this function does the following: 1.Compares the connected synapse matrices for each coincidence 2.Compare the potential synapse matrices for each coincidence 3.Compare the permanence matrices for each coincidence 4.Compare the firing boosts between the two poolers. 5.Compare the duty cycles before and after inhibition for both poolers
[ "Function", "that", "compares", "two", "spatial", "pooler", "instances", ".", "Compares", "the", "static", "variables", "between", "the", "two", "poolers", "to", "make", "sure", "that", "they", "are", "equivalent", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L690-L767
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
removeSeqStarts
def removeSeqStarts(vectors, resets, numSteps=1): """ Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- vectors: the data vectors. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. resets: the reset signal. This is a vector of booleans the same length as the number of rows in 'vectors'. It has a 1 where a sequence started and a 0 otherwise. The first 'numSteps' rows of 'vectors' of each sequence will not be included in the return result. numSteps Number of samples to remove from the start of each sequence retval: copy of vectors, with the first 'numSteps' samples at the start of each sequence removed. """ # Do nothing if numSteps is 0 if numSteps == 0: return vectors resetIndices = resets.nonzero()[0] removeRows = resetIndices for i in range(numSteps-1): removeRows = numpy.hstack((removeRows, resetIndices+i+1)) return numpy.delete(vectors, removeRows, axis=0)
python
def removeSeqStarts(vectors, resets, numSteps=1): """ Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- vectors: the data vectors. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. resets: the reset signal. This is a vector of booleans the same length as the number of rows in 'vectors'. It has a 1 where a sequence started and a 0 otherwise. The first 'numSteps' rows of 'vectors' of each sequence will not be included in the return result. numSteps Number of samples to remove from the start of each sequence retval: copy of vectors, with the first 'numSteps' samples at the start of each sequence removed. """ # Do nothing if numSteps is 0 if numSteps == 0: return vectors resetIndices = resets.nonzero()[0] removeRows = resetIndices for i in range(numSteps-1): removeRows = numpy.hstack((removeRows, resetIndices+i+1)) return numpy.delete(vectors, removeRows, axis=0)
[ "def", "removeSeqStarts", "(", "vectors", ",", "resets", ",", "numSteps", "=", "1", ")", ":", "# Do nothing if numSteps is 0", "if", "numSteps", "==", "0", ":", "return", "vectors", "resetIndices", "=", "resets", ".", "nonzero", "(", ")", "[", "0", "]", "removeRows", "=", "resetIndices", "for", "i", "in", "range", "(", "numSteps", "-", "1", ")", ":", "removeRows", "=", "numpy", ".", "hstack", "(", "(", "removeRows", ",", "resetIndices", "+", "i", "+", "1", ")", ")", "return", "numpy", ".", "delete", "(", "vectors", ",", "removeRows", ",", "axis", "=", "0", ")" ]
Convert a list of sequences of pattern indices, and a pattern lookup table into a an array of patterns Parameters: ----------------------------------------------- vectors: the data vectors. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. resets: the reset signal. This is a vector of booleans the same length as the number of rows in 'vectors'. It has a 1 where a sequence started and a 0 otherwise. The first 'numSteps' rows of 'vectors' of each sequence will not be included in the return result. numSteps Number of samples to remove from the start of each sequence retval: copy of vectors, with the first 'numSteps' samples at the start of each sequence removed.
[ "Convert", "a", "list", "of", "sequences", "of", "pattern", "indices", "and", "a", "pattern", "lookup", "table", "into", "a", "an", "array", "of", "patterns" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L771-L800
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
_accumulateFrequencyCounts
def _accumulateFrequencyCounts(values, freqCounts=None): """ Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none """ # How big does our freqCounts vector need to be? values = numpy.array(values) numEntries = values.max() + 1 if freqCounts is not None: numEntries = max(numEntries, freqCounts.size) # Where do we accumulate the results? if freqCounts is not None: if freqCounts.size != numEntries: newCounts = numpy.zeros(numEntries, dtype='int32') newCounts[0:freqCounts.size] = freqCounts else: newCounts = freqCounts else: newCounts = numpy.zeros(numEntries, dtype='int32') # Accumulate the new values for v in values: newCounts[v] += 1 return newCounts
python
def _accumulateFrequencyCounts(values, freqCounts=None): """ Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none """ # How big does our freqCounts vector need to be? values = numpy.array(values) numEntries = values.max() + 1 if freqCounts is not None: numEntries = max(numEntries, freqCounts.size) # Where do we accumulate the results? if freqCounts is not None: if freqCounts.size != numEntries: newCounts = numpy.zeros(numEntries, dtype='int32') newCounts[0:freqCounts.size] = freqCounts else: newCounts = freqCounts else: newCounts = numpy.zeros(numEntries, dtype='int32') # Accumulate the new values for v in values: newCounts[v] += 1 return newCounts
[ "def", "_accumulateFrequencyCounts", "(", "values", ",", "freqCounts", "=", "None", ")", ":", "# How big does our freqCounts vector need to be?", "values", "=", "numpy", ".", "array", "(", "values", ")", "numEntries", "=", "values", ".", "max", "(", ")", "+", "1", "if", "freqCounts", "is", "not", "None", ":", "numEntries", "=", "max", "(", "numEntries", ",", "freqCounts", ".", "size", ")", "# Where do we accumulate the results?", "if", "freqCounts", "is", "not", "None", ":", "if", "freqCounts", ".", "size", "!=", "numEntries", ":", "newCounts", "=", "numpy", ".", "zeros", "(", "numEntries", ",", "dtype", "=", "'int32'", ")", "newCounts", "[", "0", ":", "freqCounts", ".", "size", "]", "=", "freqCounts", "else", ":", "newCounts", "=", "freqCounts", "else", ":", "newCounts", "=", "numpy", ".", "zeros", "(", "numEntries", ",", "dtype", "=", "'int32'", ")", "# Accumulate the new values", "for", "v", "in", "values", ":", "newCounts", "[", "v", "]", "+=", "1", "return", "newCounts" ]
Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none
[ "Accumulate", "a", "list", "of", "values", "values", "into", "the", "frequency", "counts", "freqCounts", "and", "return", "the", "updated", "frequency", "counts" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L804-L844
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
_listOfOnTimesInVec
def _listOfOnTimesInVec(vector): """ Returns 3 things for a vector: * the total on time * the number of runs * a list of the durations of each run. Parameters: ----------------------------------------------- input stream: 11100000001100000000011111100000 return value: (11, 3, [3, 2, 6]) """ # init counters durations = [] numOnTimes = 0 totalOnTime = 0 # Find where the nonzeros are nonzeros = numpy.array(vector).nonzero()[0] # Nothing to do if vector is empty if len(nonzeros) == 0: return (0, 0, []) # Special case of only 1 on bit if len(nonzeros) == 1: return (1, 1, [1]) # Count the consecutive non-zeros prev = nonzeros[0] onTime = 1 endIdx = nonzeros[-1] for idx in nonzeros[1:]: if idx != prev+1: totalOnTime += onTime numOnTimes += 1 durations.append(onTime) onTime = 1 else: onTime += 1 prev = idx # Add in the last one totalOnTime += onTime numOnTimes += 1 durations.append(onTime) return (totalOnTime, numOnTimes, durations)
python
def _listOfOnTimesInVec(vector): """ Returns 3 things for a vector: * the total on time * the number of runs * a list of the durations of each run. Parameters: ----------------------------------------------- input stream: 11100000001100000000011111100000 return value: (11, 3, [3, 2, 6]) """ # init counters durations = [] numOnTimes = 0 totalOnTime = 0 # Find where the nonzeros are nonzeros = numpy.array(vector).nonzero()[0] # Nothing to do if vector is empty if len(nonzeros) == 0: return (0, 0, []) # Special case of only 1 on bit if len(nonzeros) == 1: return (1, 1, [1]) # Count the consecutive non-zeros prev = nonzeros[0] onTime = 1 endIdx = nonzeros[-1] for idx in nonzeros[1:]: if idx != prev+1: totalOnTime += onTime numOnTimes += 1 durations.append(onTime) onTime = 1 else: onTime += 1 prev = idx # Add in the last one totalOnTime += onTime numOnTimes += 1 durations.append(onTime) return (totalOnTime, numOnTimes, durations)
[ "def", "_listOfOnTimesInVec", "(", "vector", ")", ":", "# init counters", "durations", "=", "[", "]", "numOnTimes", "=", "0", "totalOnTime", "=", "0", "# Find where the nonzeros are", "nonzeros", "=", "numpy", ".", "array", "(", "vector", ")", ".", "nonzero", "(", ")", "[", "0", "]", "# Nothing to do if vector is empty", "if", "len", "(", "nonzeros", ")", "==", "0", ":", "return", "(", "0", ",", "0", ",", "[", "]", ")", "# Special case of only 1 on bit", "if", "len", "(", "nonzeros", ")", "==", "1", ":", "return", "(", "1", ",", "1", ",", "[", "1", "]", ")", "# Count the consecutive non-zeros", "prev", "=", "nonzeros", "[", "0", "]", "onTime", "=", "1", "endIdx", "=", "nonzeros", "[", "-", "1", "]", "for", "idx", "in", "nonzeros", "[", "1", ":", "]", ":", "if", "idx", "!=", "prev", "+", "1", ":", "totalOnTime", "+=", "onTime", "numOnTimes", "+=", "1", "durations", ".", "append", "(", "onTime", ")", "onTime", "=", "1", "else", ":", "onTime", "+=", "1", "prev", "=", "idx", "# Add in the last one", "totalOnTime", "+=", "onTime", "numOnTimes", "+=", "1", "durations", ".", "append", "(", "onTime", ")", "return", "(", "totalOnTime", ",", "numOnTimes", ",", "durations", ")" ]
Returns 3 things for a vector: * the total on time * the number of runs * a list of the durations of each run. Parameters: ----------------------------------------------- input stream: 11100000001100000000011111100000 return value: (11, 3, [3, 2, 6])
[ "Returns", "3", "things", "for", "a", "vector", ":", "*", "the", "total", "on", "time", "*", "the", "number", "of", "runs", "*", "a", "list", "of", "the", "durations", "of", "each", "run", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L848-L896
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
_fillInOnTimes
def _fillInOnTimes(vector, durations): """ Helper function used by averageOnTimePerTimestep. 'durations' is a vector which must be the same len as vector. For each "on" in vector, it fills in the corresponding element of duration with the duration of that "on" signal up until that time Parameters: ----------------------------------------------- vector: vector of output values over time durations: vector same length as 'vector', initialized to 0's. This is filled in with the durations of each 'on" signal. Example: vector: 11100000001100000000011111100000 durations: 12300000001200000000012345600000 """ # Find where the nonzeros are nonzeros = numpy.array(vector).nonzero()[0] # Nothing to do if vector is empty if len(nonzeros) == 0: return # Special case of only 1 on bit if len(nonzeros) == 1: durations[nonzeros[0]] = 1 return # Count the consecutive non-zeros prev = nonzeros[0] onTime = 1 onStartIdx = prev endIdx = nonzeros[-1] for idx in nonzeros[1:]: if idx != prev+1: # Fill in the durations durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1) onTime = 1 onStartIdx = idx else: onTime += 1 prev = idx # Fill in the last one durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
python
def _fillInOnTimes(vector, durations): """ Helper function used by averageOnTimePerTimestep. 'durations' is a vector which must be the same len as vector. For each "on" in vector, it fills in the corresponding element of duration with the duration of that "on" signal up until that time Parameters: ----------------------------------------------- vector: vector of output values over time durations: vector same length as 'vector', initialized to 0's. This is filled in with the durations of each 'on" signal. Example: vector: 11100000001100000000011111100000 durations: 12300000001200000000012345600000 """ # Find where the nonzeros are nonzeros = numpy.array(vector).nonzero()[0] # Nothing to do if vector is empty if len(nonzeros) == 0: return # Special case of only 1 on bit if len(nonzeros) == 1: durations[nonzeros[0]] = 1 return # Count the consecutive non-zeros prev = nonzeros[0] onTime = 1 onStartIdx = prev endIdx = nonzeros[-1] for idx in nonzeros[1:]: if idx != prev+1: # Fill in the durations durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1) onTime = 1 onStartIdx = idx else: onTime += 1 prev = idx # Fill in the last one durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
[ "def", "_fillInOnTimes", "(", "vector", ",", "durations", ")", ":", "# Find where the nonzeros are", "nonzeros", "=", "numpy", ".", "array", "(", "vector", ")", ".", "nonzero", "(", ")", "[", "0", "]", "# Nothing to do if vector is empty", "if", "len", "(", "nonzeros", ")", "==", "0", ":", "return", "# Special case of only 1 on bit", "if", "len", "(", "nonzeros", ")", "==", "1", ":", "durations", "[", "nonzeros", "[", "0", "]", "]", "=", "1", "return", "# Count the consecutive non-zeros", "prev", "=", "nonzeros", "[", "0", "]", "onTime", "=", "1", "onStartIdx", "=", "prev", "endIdx", "=", "nonzeros", "[", "-", "1", "]", "for", "idx", "in", "nonzeros", "[", "1", ":", "]", ":", "if", "idx", "!=", "prev", "+", "1", ":", "# Fill in the durations", "durations", "[", "onStartIdx", ":", "onStartIdx", "+", "onTime", "]", "=", "range", "(", "1", ",", "onTime", "+", "1", ")", "onTime", "=", "1", "onStartIdx", "=", "idx", "else", ":", "onTime", "+=", "1", "prev", "=", "idx", "# Fill in the last one", "durations", "[", "onStartIdx", ":", "onStartIdx", "+", "onTime", "]", "=", "range", "(", "1", ",", "onTime", "+", "1", ")" ]
Helper function used by averageOnTimePerTimestep. 'durations' is a vector which must be the same len as vector. For each "on" in vector, it fills in the corresponding element of duration with the duration of that "on" signal up until that time Parameters: ----------------------------------------------- vector: vector of output values over time durations: vector same length as 'vector', initialized to 0's. This is filled in with the durations of each 'on" signal. Example: vector: 11100000001100000000011111100000 durations: 12300000001200000000012345600000
[ "Helper", "function", "used", "by", "averageOnTimePerTimestep", ".", "durations", "is", "a", "vector", "which", "must", "be", "the", "same", "len", "as", "vector", ".", "For", "each", "on", "in", "vector", "it", "fills", "in", "the", "corresponding", "element", "of", "duration", "with", "the", "duration", "of", "that", "on", "signal", "up", "until", "that", "time" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L900-L946
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
averageOnTimePerTimestep
def averageOnTimePerTimestep(vectors, numSamples=None): """ Computes the average on-time of the outputs that are on at each time step, and then averages this over all time steps. This metric is resiliant to the number of outputs that are on at each time step. That is, if time step 0 has many more outputs on than time step 100, it won't skew the results. This is particularly useful when measuring the average on-time of things like the temporal memory output where you might have many columns bursting at the start of a sequence - you don't want those start of sequence bursts to over-influence the calculated average on-time. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns (scalar average on-time over all time steps, list containing frequency counts of each encountered on-time) """ # Special case given a 1 dimensional vector: it represents a single column if vectors.ndim == 1: vectors.shape = (-1,1) numTimeSteps = len(vectors) numElements = len(vectors[0]) # How many samples will we look at? if numSamples is not None: import pdb; pdb.set_trace() # Test this.... countOn = numpy.random.randint(0, numElements, numSamples) vectors = vectors[:, countOn] # Fill in each non-zero of vectors with the on-time that that output was # on for. durations = numpy.zeros(vectors.shape, dtype='int32') for col in xrange(vectors.shape[1]): _fillInOnTimes(vectors[:,col], durations[:,col]) # Compute the average on time for each time step sums = vectors.sum(axis=1) sums.clip(min=1, max=numpy.inf, out=sums) avgDurations = durations.sum(axis=1, dtype='float64') / sums avgOnTime = avgDurations.sum() / (avgDurations > 0).sum() # Generate the frequency counts for each duration freqCounts = _accumulateFrequencyCounts(avgDurations) return (avgOnTime, freqCounts)
python
def averageOnTimePerTimestep(vectors, numSamples=None): """ Computes the average on-time of the outputs that are on at each time step, and then averages this over all time steps. This metric is resiliant to the number of outputs that are on at each time step. That is, if time step 0 has many more outputs on than time step 100, it won't skew the results. This is particularly useful when measuring the average on-time of things like the temporal memory output where you might have many columns bursting at the start of a sequence - you don't want those start of sequence bursts to over-influence the calculated average on-time. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns (scalar average on-time over all time steps, list containing frequency counts of each encountered on-time) """ # Special case given a 1 dimensional vector: it represents a single column if vectors.ndim == 1: vectors.shape = (-1,1) numTimeSteps = len(vectors) numElements = len(vectors[0]) # How many samples will we look at? if numSamples is not None: import pdb; pdb.set_trace() # Test this.... countOn = numpy.random.randint(0, numElements, numSamples) vectors = vectors[:, countOn] # Fill in each non-zero of vectors with the on-time that that output was # on for. durations = numpy.zeros(vectors.shape, dtype='int32') for col in xrange(vectors.shape[1]): _fillInOnTimes(vectors[:,col], durations[:,col]) # Compute the average on time for each time step sums = vectors.sum(axis=1) sums.clip(min=1, max=numpy.inf, out=sums) avgDurations = durations.sum(axis=1, dtype='float64') / sums avgOnTime = avgDurations.sum() / (avgDurations > 0).sum() # Generate the frequency counts for each duration freqCounts = _accumulateFrequencyCounts(avgDurations) return (avgOnTime, freqCounts)
[ "def", "averageOnTimePerTimestep", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "# Special case given a 1 dimensional vector: it represents a single column", "if", "vectors", ".", "ndim", "==", "1", ":", "vectors", ".", "shape", "=", "(", "-", "1", ",", "1", ")", "numTimeSteps", "=", "len", "(", "vectors", ")", "numElements", "=", "len", "(", "vectors", "[", "0", "]", ")", "# How many samples will we look at?", "if", "numSamples", "is", "not", "None", ":", "import", "pdb", "pdb", ".", "set_trace", "(", ")", "# Test this....", "countOn", "=", "numpy", ".", "random", ".", "randint", "(", "0", ",", "numElements", ",", "numSamples", ")", "vectors", "=", "vectors", "[", ":", ",", "countOn", "]", "# Fill in each non-zero of vectors with the on-time that that output was", "# on for.", "durations", "=", "numpy", ".", "zeros", "(", "vectors", ".", "shape", ",", "dtype", "=", "'int32'", ")", "for", "col", "in", "xrange", "(", "vectors", ".", "shape", "[", "1", "]", ")", ":", "_fillInOnTimes", "(", "vectors", "[", ":", ",", "col", "]", ",", "durations", "[", ":", ",", "col", "]", ")", "# Compute the average on time for each time step", "sums", "=", "vectors", ".", "sum", "(", "axis", "=", "1", ")", "sums", ".", "clip", "(", "min", "=", "1", ",", "max", "=", "numpy", ".", "inf", ",", "out", "=", "sums", ")", "avgDurations", "=", "durations", ".", "sum", "(", "axis", "=", "1", ",", "dtype", "=", "'float64'", ")", "/", "sums", "avgOnTime", "=", "avgDurations", ".", "sum", "(", ")", "/", "(", "avgDurations", ">", "0", ")", ".", "sum", "(", ")", "# Generate the frequency counts for each duration", "freqCounts", "=", "_accumulateFrequencyCounts", "(", "avgDurations", ")", "return", "(", "avgOnTime", ",", "freqCounts", ")" ]
Computes the average on-time of the outputs that are on at each time step, and then averages this over all time steps. This metric is resiliant to the number of outputs that are on at each time step. That is, if time step 0 has many more outputs on than time step 100, it won't skew the results. This is particularly useful when measuring the average on-time of things like the temporal memory output where you might have many columns bursting at the start of a sequence - you don't want those start of sequence bursts to over-influence the calculated average on-time. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns (scalar average on-time over all time steps, list containing frequency counts of each encountered on-time)
[ "Computes", "the", "average", "on", "-", "time", "of", "the", "outputs", "that", "are", "on", "at", "each", "time", "step", "and", "then", "averages", "this", "over", "all", "time", "steps", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L950-L1002
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
averageOnTime
def averageOnTime(vectors, numSamples=None): """ Returns the average on-time, averaged over all on-time runs. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns: (scalar average on-time of all outputs, list containing frequency counts of each encountered on-time) """ # Special case given a 1 dimensional vector: it represents a single column if vectors.ndim == 1: vectors.shape = (-1,1) numTimeSteps = len(vectors) numElements = len(vectors[0]) # How many samples will we look at? if numSamples is None: numSamples = numElements countOn = range(numElements) else: countOn = numpy.random.randint(0, numElements, numSamples) # Compute the on-times and accumulate the frequency counts of each on-time # encountered sumOfLengths = 0.0 onTimeFreqCounts = None n = 0 for i in countOn: (onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i]) if onTime != 0.0: sumOfLengths += onTime n += segments onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts) # Return the average on time of each element that was on. if n > 0: return (sumOfLengths/n, onTimeFreqCounts) else: return (0.0, onTimeFreqCounts)
python
def averageOnTime(vectors, numSamples=None): """ Returns the average on-time, averaged over all on-time runs. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns: (scalar average on-time of all outputs, list containing frequency counts of each encountered on-time) """ # Special case given a 1 dimensional vector: it represents a single column if vectors.ndim == 1: vectors.shape = (-1,1) numTimeSteps = len(vectors) numElements = len(vectors[0]) # How many samples will we look at? if numSamples is None: numSamples = numElements countOn = range(numElements) else: countOn = numpy.random.randint(0, numElements, numSamples) # Compute the on-times and accumulate the frequency counts of each on-time # encountered sumOfLengths = 0.0 onTimeFreqCounts = None n = 0 for i in countOn: (onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i]) if onTime != 0.0: sumOfLengths += onTime n += segments onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts) # Return the average on time of each element that was on. if n > 0: return (sumOfLengths/n, onTimeFreqCounts) else: return (0.0, onTimeFreqCounts)
[ "def", "averageOnTime", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "# Special case given a 1 dimensional vector: it represents a single column", "if", "vectors", ".", "ndim", "==", "1", ":", "vectors", ".", "shape", "=", "(", "-", "1", ",", "1", ")", "numTimeSteps", "=", "len", "(", "vectors", ")", "numElements", "=", "len", "(", "vectors", "[", "0", "]", ")", "# How many samples will we look at?", "if", "numSamples", "is", "None", ":", "numSamples", "=", "numElements", "countOn", "=", "range", "(", "numElements", ")", "else", ":", "countOn", "=", "numpy", ".", "random", ".", "randint", "(", "0", ",", "numElements", ",", "numSamples", ")", "# Compute the on-times and accumulate the frequency counts of each on-time", "# encountered", "sumOfLengths", "=", "0.0", "onTimeFreqCounts", "=", "None", "n", "=", "0", "for", "i", "in", "countOn", ":", "(", "onTime", ",", "segments", ",", "durations", ")", "=", "_listOfOnTimesInVec", "(", "vectors", "[", ":", ",", "i", "]", ")", "if", "onTime", "!=", "0.0", ":", "sumOfLengths", "+=", "onTime", "n", "+=", "segments", "onTimeFreqCounts", "=", "_accumulateFrequencyCounts", "(", "durations", ",", "onTimeFreqCounts", ")", "# Return the average on time of each element that was on.", "if", "n", ">", "0", ":", "return", "(", "sumOfLengths", "/", "n", ",", "onTimeFreqCounts", ")", "else", ":", "return", "(", "0.0", ",", "onTimeFreqCounts", ")" ]
Returns the average on-time, averaged over all on-time runs. Parameters: ----------------------------------------------- vectors: the vectors for which the onTime is calculated. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. numSamples: the number of elements for which on-time is calculated. If not specified, then all elements are looked at. Returns: (scalar average on-time of all outputs, list containing frequency counts of each encountered on-time)
[ "Returns", "the", "average", "on", "-", "time", "averaged", "over", "all", "on", "-", "time", "runs", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1006-L1053
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
plotOutputsOverTime
def plotOutputsOverTime(vectors, buVectors=None, title='On-times'): """ Generate a figure that shows each output over time. Time goes left to right, and each output is plotted on a different line, allowing you to see the overlap in the outputs, when they turn on/off, etc. Parameters: ------------------------------------------------------------ vectors: the vectors to plot buVectors: These are normally specified when plotting the pooling outputs of the temporal memory over time. The 'buVectors' are the sequence outputs and the 'vectors' are the pooling outputs. The buVector (sequence) outputs will be drawn in a darker color than the vector (pooling) outputs to distinguish where the cell is outputting due to pooling vs. sequence memory. title: title for the plot avgOnTime: The average on-time measurement. If not supplied, then it will be calculated from the passed in vectors. """ # Produce the plot import pylab pylab.ion() pylab.figure() imData = vectors.transpose() if buVectors is not None: assert(buVectors.shape == vectors.shape) imData = imData.copy() imData[buVectors.transpose().astype('bool')] = 2 pylab.imshow(imData, aspect='auto', cmap=pylab.cm.gray_r, interpolation='nearest') pylab.title(title)
python
def plotOutputsOverTime(vectors, buVectors=None, title='On-times'): """ Generate a figure that shows each output over time. Time goes left to right, and each output is plotted on a different line, allowing you to see the overlap in the outputs, when they turn on/off, etc. Parameters: ------------------------------------------------------------ vectors: the vectors to plot buVectors: These are normally specified when plotting the pooling outputs of the temporal memory over time. The 'buVectors' are the sequence outputs and the 'vectors' are the pooling outputs. The buVector (sequence) outputs will be drawn in a darker color than the vector (pooling) outputs to distinguish where the cell is outputting due to pooling vs. sequence memory. title: title for the plot avgOnTime: The average on-time measurement. If not supplied, then it will be calculated from the passed in vectors. """ # Produce the plot import pylab pylab.ion() pylab.figure() imData = vectors.transpose() if buVectors is not None: assert(buVectors.shape == vectors.shape) imData = imData.copy() imData[buVectors.transpose().astype('bool')] = 2 pylab.imshow(imData, aspect='auto', cmap=pylab.cm.gray_r, interpolation='nearest') pylab.title(title)
[ "def", "plotOutputsOverTime", "(", "vectors", ",", "buVectors", "=", "None", ",", "title", "=", "'On-times'", ")", ":", "# Produce the plot", "import", "pylab", "pylab", ".", "ion", "(", ")", "pylab", ".", "figure", "(", ")", "imData", "=", "vectors", ".", "transpose", "(", ")", "if", "buVectors", "is", "not", "None", ":", "assert", "(", "buVectors", ".", "shape", "==", "vectors", ".", "shape", ")", "imData", "=", "imData", ".", "copy", "(", ")", "imData", "[", "buVectors", ".", "transpose", "(", ")", ".", "astype", "(", "'bool'", ")", "]", "=", "2", "pylab", ".", "imshow", "(", "imData", ",", "aspect", "=", "'auto'", ",", "cmap", "=", "pylab", ".", "cm", ".", "gray_r", ",", "interpolation", "=", "'nearest'", ")", "pylab", ".", "title", "(", "title", ")" ]
Generate a figure that shows each output over time. Time goes left to right, and each output is plotted on a different line, allowing you to see the overlap in the outputs, when they turn on/off, etc. Parameters: ------------------------------------------------------------ vectors: the vectors to plot buVectors: These are normally specified when plotting the pooling outputs of the temporal memory over time. The 'buVectors' are the sequence outputs and the 'vectors' are the pooling outputs. The buVector (sequence) outputs will be drawn in a darker color than the vector (pooling) outputs to distinguish where the cell is outputting due to pooling vs. sequence memory. title: title for the plot avgOnTime: The average on-time measurement. If not supplied, then it will be calculated from the passed in vectors.
[ "Generate", "a", "figure", "that", "shows", "each", "output", "over", "time", ".", "Time", "goes", "left", "to", "right", "and", "each", "output", "is", "plotted", "on", "a", "different", "line", "allowing", "you", "to", "see", "the", "overlap", "in", "the", "outputs", "when", "they", "turn", "on", "/", "off", "etc", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1057-L1091
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
plotHistogram
def plotHistogram(freqCounts, title='On-Times Histogram', xLabel='On-Time'): """ This is usually used to display a histogram of the on-times encountered in a particular output. The freqCounts is a vector containg the frequency counts of each on-time (starting at an on-time of 0 and going to an on-time = len(freqCounts)-1) The freqCounts are typically generated from the averageOnTimePerTimestep or averageOnTime methods of this module. Parameters: ----------------------------------------------- freqCounts: The frequency counts to plot title: Title of the plot """ import pylab pylab.ion() pylab.figure() pylab.bar(numpy.arange(len(freqCounts)) - 0.5, freqCounts) pylab.title(title) pylab.xlabel(xLabel)
python
def plotHistogram(freqCounts, title='On-Times Histogram', xLabel='On-Time'): """ This is usually used to display a histogram of the on-times encountered in a particular output. The freqCounts is a vector containg the frequency counts of each on-time (starting at an on-time of 0 and going to an on-time = len(freqCounts)-1) The freqCounts are typically generated from the averageOnTimePerTimestep or averageOnTime methods of this module. Parameters: ----------------------------------------------- freqCounts: The frequency counts to plot title: Title of the plot """ import pylab pylab.ion() pylab.figure() pylab.bar(numpy.arange(len(freqCounts)) - 0.5, freqCounts) pylab.title(title) pylab.xlabel(xLabel)
[ "def", "plotHistogram", "(", "freqCounts", ",", "title", "=", "'On-Times Histogram'", ",", "xLabel", "=", "'On-Time'", ")", ":", "import", "pylab", "pylab", ".", "ion", "(", ")", "pylab", ".", "figure", "(", ")", "pylab", ".", "bar", "(", "numpy", ".", "arange", "(", "len", "(", "freqCounts", ")", ")", "-", "0.5", ",", "freqCounts", ")", "pylab", ".", "title", "(", "title", ")", "pylab", ".", "xlabel", "(", "xLabel", ")" ]
This is usually used to display a histogram of the on-times encountered in a particular output. The freqCounts is a vector containg the frequency counts of each on-time (starting at an on-time of 0 and going to an on-time = len(freqCounts)-1) The freqCounts are typically generated from the averageOnTimePerTimestep or averageOnTime methods of this module. Parameters: ----------------------------------------------- freqCounts: The frequency counts to plot title: Title of the plot
[ "This", "is", "usually", "used", "to", "display", "a", "histogram", "of", "the", "on", "-", "times", "encountered", "in", "a", "particular", "output", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1095-L1119
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
populationStability
def populationStability(vectors, numSamples=None): """ Returns the stability for the population averaged over multiple time steps Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples the number of time steps where stability is counted At each time step, count the fraction of the active elements which are stable from the previous step Average all the fraction """ # ---------------------------------------------------------------------- # Calculate the stability numVectors = len(vectors) if numSamples is None: numSamples = numVectors-1 countOn = range(numVectors-1) else: countOn = numpy.random.randint(0, numVectors-1, numSamples) sigmap = 0.0 for i in countOn: match = checkMatch(vectors[i], vectors[i+1], sparse=False) # Ignore reset vectors (all 0's) if match[1] != 0: sigmap += float(match[0])/match[1] return sigmap / numSamples
python
def populationStability(vectors, numSamples=None): """ Returns the stability for the population averaged over multiple time steps Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples the number of time steps where stability is counted At each time step, count the fraction of the active elements which are stable from the previous step Average all the fraction """ # ---------------------------------------------------------------------- # Calculate the stability numVectors = len(vectors) if numSamples is None: numSamples = numVectors-1 countOn = range(numVectors-1) else: countOn = numpy.random.randint(0, numVectors-1, numSamples) sigmap = 0.0 for i in countOn: match = checkMatch(vectors[i], vectors[i+1], sparse=False) # Ignore reset vectors (all 0's) if match[1] != 0: sigmap += float(match[0])/match[1] return sigmap / numSamples
[ "def", "populationStability", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "# ----------------------------------------------------------------------", "# Calculate the stability", "numVectors", "=", "len", "(", "vectors", ")", "if", "numSamples", "is", "None", ":", "numSamples", "=", "numVectors", "-", "1", "countOn", "=", "range", "(", "numVectors", "-", "1", ")", "else", ":", "countOn", "=", "numpy", ".", "random", ".", "randint", "(", "0", ",", "numVectors", "-", "1", ",", "numSamples", ")", "sigmap", "=", "0.0", "for", "i", "in", "countOn", ":", "match", "=", "checkMatch", "(", "vectors", "[", "i", "]", ",", "vectors", "[", "i", "+", "1", "]", ",", "sparse", "=", "False", ")", "# Ignore reset vectors (all 0's)", "if", "match", "[", "1", "]", "!=", "0", ":", "sigmap", "+=", "float", "(", "match", "[", "0", "]", ")", "/", "match", "[", "1", "]", "return", "sigmap", "/", "numSamples" ]
Returns the stability for the population averaged over multiple time steps Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples the number of time steps where stability is counted At each time step, count the fraction of the active elements which are stable from the previous step Average all the fraction
[ "Returns", "the", "stability", "for", "the", "population", "averaged", "over", "multiple", "time", "steps" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1123-L1156
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
percentOutputsStableOverNTimeSteps
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None): """ Returns the percent of the outputs that remain completely stable over N time steps. Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples: the number of time steps where stability is counted For each window of numSamples, count how many outputs are active during the entire window. """ # ---------------------------------------------------------------------- # Calculate the stability totalSamples = len(vectors) windowSize = numSamples # Process each window numWindows = 0 pctStable = 0 for wStart in range(0, totalSamples-windowSize+1): # Count how many elements are active for the entire time data = vectors[wStart:wStart+windowSize] outputSums = data.sum(axis=0) stableOutputs = (outputSums == windowSize).sum() # Accumulated samplePctStable = float(stableOutputs) / data[0].sum() print samplePctStable pctStable += samplePctStable numWindows += 1 # Return percent average over all possible windows return float(pctStable) / numWindows
python
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None): """ Returns the percent of the outputs that remain completely stable over N time steps. Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples: the number of time steps where stability is counted For each window of numSamples, count how many outputs are active during the entire window. """ # ---------------------------------------------------------------------- # Calculate the stability totalSamples = len(vectors) windowSize = numSamples # Process each window numWindows = 0 pctStable = 0 for wStart in range(0, totalSamples-windowSize+1): # Count how many elements are active for the entire time data = vectors[wStart:wStart+windowSize] outputSums = data.sum(axis=0) stableOutputs = (outputSums == windowSize).sum() # Accumulated samplePctStable = float(stableOutputs) / data[0].sum() print samplePctStable pctStable += samplePctStable numWindows += 1 # Return percent average over all possible windows return float(pctStable) / numWindows
[ "def", "percentOutputsStableOverNTimeSteps", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "# ----------------------------------------------------------------------", "# Calculate the stability", "totalSamples", "=", "len", "(", "vectors", ")", "windowSize", "=", "numSamples", "# Process each window", "numWindows", "=", "0", "pctStable", "=", "0", "for", "wStart", "in", "range", "(", "0", ",", "totalSamples", "-", "windowSize", "+", "1", ")", ":", "# Count how many elements are active for the entire time", "data", "=", "vectors", "[", "wStart", ":", "wStart", "+", "windowSize", "]", "outputSums", "=", "data", ".", "sum", "(", "axis", "=", "0", ")", "stableOutputs", "=", "(", "outputSums", "==", "windowSize", ")", ".", "sum", "(", ")", "# Accumulated", "samplePctStable", "=", "float", "(", "stableOutputs", ")", "/", "data", "[", "0", "]", ".", "sum", "(", ")", "print", "samplePctStable", "pctStable", "+=", "samplePctStable", "numWindows", "+=", "1", "# Return percent average over all possible windows", "return", "float", "(", "pctStable", ")", "/", "numWindows" ]
Returns the percent of the outputs that remain completely stable over N time steps. Parameters: ----------------------------------------------- vectors: the vectors for which the stability is calculated numSamples: the number of time steps where stability is counted For each window of numSamples, count how many outputs are active during the entire window.
[ "Returns", "the", "percent", "of", "the", "outputs", "that", "remain", "completely", "stable", "over", "N", "time", "steps", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1160-L1197
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
computeSaturationLevels
def computeSaturationLevels(outputs, outputsShape, sparseForm=False): """ Compute the saturation for a continuous level. This breaks the level into multiple regions and computes the saturation level for each region. Parameters: -------------------------------------------- outputs: output of the level. If sparseForm is True, this is a list of the non-zeros. If sparseForm is False, it is the dense representation outputsShape: The shape of the outputs of the level (height, width) retval: (sat, innerSat): sat: list of the saturation levels of each non-empty region of the level (each 0 -> 1.0) innerSat: list of the saturation level of each non-empty region that is not near an edge (each 0 -> 1.0) """ # Get the outputs into a SparseBinaryMatrix if not sparseForm: outputs = outputs.reshape(outputsShape) spOut = SM32(outputs) else: if len(outputs) > 0: assert (outputs.max() < outputsShape[0] * outputsShape[1]) spOut = SM32(1, outputsShape[0] * outputsShape[1]) spOut.setRowFromSparse(0, outputs, [1]*len(outputs)) spOut.reshape(outputsShape[0], outputsShape[1]) # Get the activity in each local region using the nNonZerosPerBox method # This method takes a list of the end row indices and a list of the end # column indices. # We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution # on saturation. regionSize = 15 rows = xrange(regionSize+1, outputsShape[0]+1, regionSize) cols = xrange(regionSize+1, outputsShape[1]+1, regionSize) regionSums = spOut.nNonZerosPerBox(rows, cols) # Get all the nonzeros out - those are our saturation sums (locations, values) = regionSums.tolist() values /= float(regionSize * regionSize) sat = list(values) # Now, to compute which are the inner regions, we will only take the ones that # are surrounded by activity above, below, left and right innerSat = [] locationSet = set(locations) for (location, value) in itertools.izip(locations, values): (row, col) = location if (row-1,col) in locationSet and (row, col-1) in locationSet \ and (row+1, col) in locationSet and (row, col+1) in locationSet: innerSat.append(value) return (sat, innerSat)
python
def computeSaturationLevels(outputs, outputsShape, sparseForm=False): """ Compute the saturation for a continuous level. This breaks the level into multiple regions and computes the saturation level for each region. Parameters: -------------------------------------------- outputs: output of the level. If sparseForm is True, this is a list of the non-zeros. If sparseForm is False, it is the dense representation outputsShape: The shape of the outputs of the level (height, width) retval: (sat, innerSat): sat: list of the saturation levels of each non-empty region of the level (each 0 -> 1.0) innerSat: list of the saturation level of each non-empty region that is not near an edge (each 0 -> 1.0) """ # Get the outputs into a SparseBinaryMatrix if not sparseForm: outputs = outputs.reshape(outputsShape) spOut = SM32(outputs) else: if len(outputs) > 0: assert (outputs.max() < outputsShape[0] * outputsShape[1]) spOut = SM32(1, outputsShape[0] * outputsShape[1]) spOut.setRowFromSparse(0, outputs, [1]*len(outputs)) spOut.reshape(outputsShape[0], outputsShape[1]) # Get the activity in each local region using the nNonZerosPerBox method # This method takes a list of the end row indices and a list of the end # column indices. # We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution # on saturation. regionSize = 15 rows = xrange(regionSize+1, outputsShape[0]+1, regionSize) cols = xrange(regionSize+1, outputsShape[1]+1, regionSize) regionSums = spOut.nNonZerosPerBox(rows, cols) # Get all the nonzeros out - those are our saturation sums (locations, values) = regionSums.tolist() values /= float(regionSize * regionSize) sat = list(values) # Now, to compute which are the inner regions, we will only take the ones that # are surrounded by activity above, below, left and right innerSat = [] locationSet = set(locations) for (location, value) in itertools.izip(locations, values): (row, col) = location if (row-1,col) in locationSet and (row, col-1) in locationSet \ and (row+1, col) in locationSet and (row, col+1) in locationSet: innerSat.append(value) return (sat, innerSat)
[ "def", "computeSaturationLevels", "(", "outputs", ",", "outputsShape", ",", "sparseForm", "=", "False", ")", ":", "# Get the outputs into a SparseBinaryMatrix", "if", "not", "sparseForm", ":", "outputs", "=", "outputs", ".", "reshape", "(", "outputsShape", ")", "spOut", "=", "SM32", "(", "outputs", ")", "else", ":", "if", "len", "(", "outputs", ")", ">", "0", ":", "assert", "(", "outputs", ".", "max", "(", ")", "<", "outputsShape", "[", "0", "]", "*", "outputsShape", "[", "1", "]", ")", "spOut", "=", "SM32", "(", "1", ",", "outputsShape", "[", "0", "]", "*", "outputsShape", "[", "1", "]", ")", "spOut", ".", "setRowFromSparse", "(", "0", ",", "outputs", ",", "[", "1", "]", "*", "len", "(", "outputs", ")", ")", "spOut", ".", "reshape", "(", "outputsShape", "[", "0", "]", ",", "outputsShape", "[", "1", "]", ")", "# Get the activity in each local region using the nNonZerosPerBox method", "# This method takes a list of the end row indices and a list of the end", "# column indices.", "# We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution", "# on saturation.", "regionSize", "=", "15", "rows", "=", "xrange", "(", "regionSize", "+", "1", ",", "outputsShape", "[", "0", "]", "+", "1", ",", "regionSize", ")", "cols", "=", "xrange", "(", "regionSize", "+", "1", ",", "outputsShape", "[", "1", "]", "+", "1", ",", "regionSize", ")", "regionSums", "=", "spOut", ".", "nNonZerosPerBox", "(", "rows", ",", "cols", ")", "# Get all the nonzeros out - those are our saturation sums", "(", "locations", ",", "values", ")", "=", "regionSums", ".", "tolist", "(", ")", "values", "/=", "float", "(", "regionSize", "*", "regionSize", ")", "sat", "=", "list", "(", "values", ")", "# Now, to compute which are the inner regions, we will only take the ones that", "# are surrounded by activity above, below, left and right", "innerSat", "=", "[", "]", "locationSet", "=", "set", "(", "locations", ")", "for", "(", "location", ",", "value", ")", "in", "itertools", ".", "izip", "(", "locations", ",", "values", ")", ":", "(", "row", ",", "col", ")", "=", "location", "if", "(", "row", "-", "1", ",", "col", ")", "in", "locationSet", "and", "(", "row", ",", "col", "-", "1", ")", "in", "locationSet", "and", "(", "row", "+", "1", ",", "col", ")", "in", "locationSet", "and", "(", "row", ",", "col", "+", "1", ")", "in", "locationSet", ":", "innerSat", ".", "append", "(", "value", ")", "return", "(", "sat", ",", "innerSat", ")" ]
Compute the saturation for a continuous level. This breaks the level into multiple regions and computes the saturation level for each region. Parameters: -------------------------------------------- outputs: output of the level. If sparseForm is True, this is a list of the non-zeros. If sparseForm is False, it is the dense representation outputsShape: The shape of the outputs of the level (height, width) retval: (sat, innerSat): sat: list of the saturation levels of each non-empty region of the level (each 0 -> 1.0) innerSat: list of the saturation level of each non-empty region that is not near an edge (each 0 -> 1.0)
[ "Compute", "the", "saturation", "for", "a", "continuous", "level", ".", "This", "breaks", "the", "level", "into", "multiple", "regions", "and", "computes", "the", "saturation", "level", "for", "each", "region", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1201-L1257
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
checkMatch
def checkMatch(input, prediction, sparse=True, verbosity=0): """ Compares the actual input with the predicted input and returns results Parameters: ----------------------------------------------- input: The actual input prediction: the predicted input verbosity: If > 0, print debugging messages sparse: If true, they are in sparse form (list of active indices) retval (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction) foundInInput: The number of predicted active elements that were found in the actual input totalActiveInInput: The total number of active elements in the input. missingFromInput: The number of predicted active elements that were not found in the actual input totalActiveInPrediction: The total number of active elements in the prediction """ if sparse: activeElementsInInput = set(input) activeElementsInPrediction = set(prediction) else: activeElementsInInput = set(input.nonzero()[0]) activeElementsInPrediction = set(prediction.nonzero()[0]) totalActiveInPrediction = len(activeElementsInPrediction) totalActiveInInput = len(activeElementsInInput) foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput)) missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput)) missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction)) if verbosity >= 1: print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction, print "; preds. missing from input:", missingFromInput, "out of", \ totalActiveInPrediction, print "; unexpected active in input:", missingFromPrediction, "out of", \ totalActiveInInput return (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction)
python
def checkMatch(input, prediction, sparse=True, verbosity=0): """ Compares the actual input with the predicted input and returns results Parameters: ----------------------------------------------- input: The actual input prediction: the predicted input verbosity: If > 0, print debugging messages sparse: If true, they are in sparse form (list of active indices) retval (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction) foundInInput: The number of predicted active elements that were found in the actual input totalActiveInInput: The total number of active elements in the input. missingFromInput: The number of predicted active elements that were not found in the actual input totalActiveInPrediction: The total number of active elements in the prediction """ if sparse: activeElementsInInput = set(input) activeElementsInPrediction = set(prediction) else: activeElementsInInput = set(input.nonzero()[0]) activeElementsInPrediction = set(prediction.nonzero()[0]) totalActiveInPrediction = len(activeElementsInPrediction) totalActiveInInput = len(activeElementsInInput) foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput)) missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput)) missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction)) if verbosity >= 1: print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction, print "; preds. missing from input:", missingFromInput, "out of", \ totalActiveInPrediction, print "; unexpected active in input:", missingFromPrediction, "out of", \ totalActiveInInput return (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction)
[ "def", "checkMatch", "(", "input", ",", "prediction", ",", "sparse", "=", "True", ",", "verbosity", "=", "0", ")", ":", "if", "sparse", ":", "activeElementsInInput", "=", "set", "(", "input", ")", "activeElementsInPrediction", "=", "set", "(", "prediction", ")", "else", ":", "activeElementsInInput", "=", "set", "(", "input", ".", "nonzero", "(", ")", "[", "0", "]", ")", "activeElementsInPrediction", "=", "set", "(", "prediction", ".", "nonzero", "(", ")", "[", "0", "]", ")", "totalActiveInPrediction", "=", "len", "(", "activeElementsInPrediction", ")", "totalActiveInInput", "=", "len", "(", "activeElementsInInput", ")", "foundInInput", "=", "len", "(", "activeElementsInPrediction", ".", "intersection", "(", "activeElementsInInput", ")", ")", "missingFromInput", "=", "len", "(", "activeElementsInPrediction", ".", "difference", "(", "activeElementsInInput", ")", ")", "missingFromPrediction", "=", "len", "(", "activeElementsInInput", ".", "difference", "(", "activeElementsInPrediction", ")", ")", "if", "verbosity", ">=", "1", ":", "print", "\"preds. found in input:\"", ",", "foundInInput", ",", "\"out of\"", ",", "totalActiveInPrediction", ",", "print", "\"; preds. missing from input:\"", ",", "missingFromInput", ",", "\"out of\"", ",", "totalActiveInPrediction", ",", "print", "\"; unexpected active in input:\"", ",", "missingFromPrediction", ",", "\"out of\"", ",", "totalActiveInInput", "return", "(", "foundInInput", ",", "totalActiveInInput", ",", "missingFromInput", ",", "totalActiveInPrediction", ")" ]
Compares the actual input with the predicted input and returns results Parameters: ----------------------------------------------- input: The actual input prediction: the predicted input verbosity: If > 0, print debugging messages sparse: If true, they are in sparse form (list of active indices) retval (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction) foundInInput: The number of predicted active elements that were found in the actual input totalActiveInInput: The total number of active elements in the input. missingFromInput: The number of predicted active elements that were not found in the actual input totalActiveInPrediction: The total number of active elements in the prediction
[ "Compares", "the", "actual", "input", "with", "the", "predicted", "input", "and", "returns", "results" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1261-L1307
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
predictionExtent
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0): """ Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided by the TM. It accepts as input the inputs, outputs, and resets provided to the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how many time steps it can go forward on the input before the input overlap with the manifold is less then 'minOverlapPct'. It returns the average number of time steps calculated for each output. Parameters: ----------------------------------------------- inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. retval: (Average number of time steps of prediction over all output samples, Average number of time steps of prediction when we aren't cut short by the end of the sequence, List containing frequency counts of each encountered prediction time) """ # List of how many times we encountered each prediction amount. Element 0 # is how many times we successfully predicted 0 steps in advance, element 1 # is how many times we predicted 1 step in advance, etc. predCounts = None # Total steps of prediction over all samples predTotal = 0 # Total number of samples nSamples = len(outputs) # Total steps of prediction for samples at the start of the sequence, or # for samples whose prediction runs aren't cut short by the end of the # sequence. predTotalNotLimited = 0 nSamplesNotLimited = 0 # Compute how many cells/column we have nCols = len(inputs[0]) nCellsPerCol = len(outputs[0]) // nCols # Evalulate prediction for each output sample for idx in xrange(nSamples): # What are the active columns for this output? activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1) # How many steps of prediction do we have? steps = 0 while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0): overlap = numpy.logical_and(inputs[idx+steps+1], activeCols) overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum() if overlapPct >= minOverlapPct: steps += 1 else: break # print "idx:", idx, "steps:", steps # Accumulate into our total predCounts = _accumulateFrequencyCounts([steps], predCounts) predTotal += steps # If this sample was not cut short by the end of the sequence, include # it into the "NotLimited" runs if resets[idx] or \ ((idx+steps+1 < nSamples) and (not resets[idx+steps+1])): predTotalNotLimited += steps nSamplesNotLimited += 1 # Return results return (float(predTotal) / nSamples, float(predTotalNotLimited) / nSamplesNotLimited, predCounts)
python
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0): """ Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided by the TM. It accepts as input the inputs, outputs, and resets provided to the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how many time steps it can go forward on the input before the input overlap with the manifold is less then 'minOverlapPct'. It returns the average number of time steps calculated for each output. Parameters: ----------------------------------------------- inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. retval: (Average number of time steps of prediction over all output samples, Average number of time steps of prediction when we aren't cut short by the end of the sequence, List containing frequency counts of each encountered prediction time) """ # List of how many times we encountered each prediction amount. Element 0 # is how many times we successfully predicted 0 steps in advance, element 1 # is how many times we predicted 1 step in advance, etc. predCounts = None # Total steps of prediction over all samples predTotal = 0 # Total number of samples nSamples = len(outputs) # Total steps of prediction for samples at the start of the sequence, or # for samples whose prediction runs aren't cut short by the end of the # sequence. predTotalNotLimited = 0 nSamplesNotLimited = 0 # Compute how many cells/column we have nCols = len(inputs[0]) nCellsPerCol = len(outputs[0]) // nCols # Evalulate prediction for each output sample for idx in xrange(nSamples): # What are the active columns for this output? activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1) # How many steps of prediction do we have? steps = 0 while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0): overlap = numpy.logical_and(inputs[idx+steps+1], activeCols) overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum() if overlapPct >= minOverlapPct: steps += 1 else: break # print "idx:", idx, "steps:", steps # Accumulate into our total predCounts = _accumulateFrequencyCounts([steps], predCounts) predTotal += steps # If this sample was not cut short by the end of the sequence, include # it into the "NotLimited" runs if resets[idx] or \ ((idx+steps+1 < nSamples) and (not resets[idx+steps+1])): predTotalNotLimited += steps nSamplesNotLimited += 1 # Return results return (float(predTotal) / nSamples, float(predTotalNotLimited) / nSamplesNotLimited, predCounts)
[ "def", "predictionExtent", "(", "inputs", ",", "resets", ",", "outputs", ",", "minOverlapPct", "=", "100.0", ")", ":", "# List of how many times we encountered each prediction amount. Element 0", "# is how many times we successfully predicted 0 steps in advance, element 1", "# is how many times we predicted 1 step in advance, etc.", "predCounts", "=", "None", "# Total steps of prediction over all samples", "predTotal", "=", "0", "# Total number of samples", "nSamples", "=", "len", "(", "outputs", ")", "# Total steps of prediction for samples at the start of the sequence, or", "# for samples whose prediction runs aren't cut short by the end of the", "# sequence.", "predTotalNotLimited", "=", "0", "nSamplesNotLimited", "=", "0", "# Compute how many cells/column we have", "nCols", "=", "len", "(", "inputs", "[", "0", "]", ")", "nCellsPerCol", "=", "len", "(", "outputs", "[", "0", "]", ")", "//", "nCols", "# Evalulate prediction for each output sample", "for", "idx", "in", "xrange", "(", "nSamples", ")", ":", "# What are the active columns for this output?", "activeCols", "=", "outputs", "[", "idx", "]", ".", "reshape", "(", "nCols", ",", "nCellsPerCol", ")", ".", "max", "(", "axis", "=", "1", ")", "# How many steps of prediction do we have?", "steps", "=", "0", "while", "(", "idx", "+", "steps", "+", "1", "<", "nSamples", ")", "and", "(", "resets", "[", "idx", "+", "steps", "+", "1", "]", "==", "0", ")", ":", "overlap", "=", "numpy", ".", "logical_and", "(", "inputs", "[", "idx", "+", "steps", "+", "1", "]", ",", "activeCols", ")", "overlapPct", "=", "100.0", "*", "float", "(", "overlap", ".", "sum", "(", ")", ")", "/", "inputs", "[", "idx", "+", "steps", "+", "1", "]", ".", "sum", "(", ")", "if", "overlapPct", ">=", "minOverlapPct", ":", "steps", "+=", "1", "else", ":", "break", "# print \"idx:\", idx, \"steps:\", steps", "# Accumulate into our total", "predCounts", "=", "_accumulateFrequencyCounts", "(", "[", "steps", "]", ",", "predCounts", ")", "predTotal", "+=", "steps", "# If this sample was not cut short by the end of the sequence, include", "# it into the \"NotLimited\" runs", "if", "resets", "[", "idx", "]", "or", "(", "(", "idx", "+", "steps", "+", "1", "<", "nSamples", ")", "and", "(", "not", "resets", "[", "idx", "+", "steps", "+", "1", "]", ")", ")", ":", "predTotalNotLimited", "+=", "steps", "nSamplesNotLimited", "+=", "1", "# Return results", "return", "(", "float", "(", "predTotal", ")", "/", "nSamples", ",", "float", "(", "predTotalNotLimited", ")", "/", "nSamplesNotLimited", ",", "predCounts", ")" ]
Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided by the TM. It accepts as input the inputs, outputs, and resets provided to the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how many time steps it can go forward on the input before the input overlap with the manifold is less then 'minOverlapPct'. It returns the average number of time steps calculated for each output. Parameters: ----------------------------------------------- inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. retval: (Average number of time steps of prediction over all output samples, Average number of time steps of prediction when we aren't cut short by the end of the sequence, List containing frequency counts of each encountered prediction time)
[ "Computes", "the", "predictive", "ability", "of", "a", "temporal", "memory", "(", "TM", ")", ".", "This", "routine", "returns", "a", "value", "which", "is", "the", "average", "number", "of", "time", "steps", "of", "prediction", "provided", "by", "the", "TM", ".", "It", "accepts", "as", "input", "the", "inputs", "outputs", "and", "resets", "provided", "to", "the", "TM", "as", "well", "as", "a", "minOverlapPct", "used", "to", "evalulate", "whether", "or", "not", "a", "prediction", "is", "a", "good", "enough", "match", "to", "the", "actual", "input", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1311-L1399
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
getCentreAndSpreadOffsets
def getCentreAndSpreadOffsets(spaceShape, spreadShape, stepSize=1): """ Generates centre offsets and spread offsets for block-mode based training regimes - star, cross, block. Parameters: ----------------------------------------------- spaceShape: The (height, width) of the 2-D space to explore. This sets the number of center-points. spreadShape: The shape (height, width) of the area around each center-point to explore. stepSize: The step size. How big each step is, in pixels. This controls *both* the spacing of the center-points within the block and the points we explore around each center-point retval: (centreOffsets, spreadOffsets) """ from nupic.math.cross import cross # ===================================================================== # Init data structures # What is the range on the X and Y offsets of the center points? shape = spaceShape # If the shape is (1,1), special case of just 1 center point if shape[0] == 1 and shape[1] == 1: centerOffsets = [(0,0)] else: xMin = -1 * (shape[1] // 2) xMax = xMin + shape[1] - 1 xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize) yMin = -1 * (shape[0] // 2) yMax = yMin + shape[0] - 1 yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize) centerOffsets = list(cross(yPositions, xPositions)) numCenterOffsets = len(centerOffsets) print "centerOffsets:", centerOffsets # What is the range on the X and Y offsets of the spread points? shape = spreadShape # If the shape is (1,1), special case of no spreading around each center # point if shape[0] == 1 and shape[1] == 1: spreadOffsets = [(0,0)] else: xMin = -1 * (shape[1] // 2) xMax = xMin + shape[1] - 1 xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize) yMin = -1 * (shape[0] // 2) yMax = yMin + shape[0] - 1 yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize) spreadOffsets = list(cross(yPositions, xPositions)) # Put the (0,0) entry first spreadOffsets.remove((0,0)) spreadOffsets.insert(0, (0,0)) numSpreadOffsets = len(spreadOffsets) print "spreadOffsets:", spreadOffsets return centerOffsets, spreadOffsets
python
def getCentreAndSpreadOffsets(spaceShape, spreadShape, stepSize=1): """ Generates centre offsets and spread offsets for block-mode based training regimes - star, cross, block. Parameters: ----------------------------------------------- spaceShape: The (height, width) of the 2-D space to explore. This sets the number of center-points. spreadShape: The shape (height, width) of the area around each center-point to explore. stepSize: The step size. How big each step is, in pixels. This controls *both* the spacing of the center-points within the block and the points we explore around each center-point retval: (centreOffsets, spreadOffsets) """ from nupic.math.cross import cross # ===================================================================== # Init data structures # What is the range on the X and Y offsets of the center points? shape = spaceShape # If the shape is (1,1), special case of just 1 center point if shape[0] == 1 and shape[1] == 1: centerOffsets = [(0,0)] else: xMin = -1 * (shape[1] // 2) xMax = xMin + shape[1] - 1 xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize) yMin = -1 * (shape[0] // 2) yMax = yMin + shape[0] - 1 yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize) centerOffsets = list(cross(yPositions, xPositions)) numCenterOffsets = len(centerOffsets) print "centerOffsets:", centerOffsets # What is the range on the X and Y offsets of the spread points? shape = spreadShape # If the shape is (1,1), special case of no spreading around each center # point if shape[0] == 1 and shape[1] == 1: spreadOffsets = [(0,0)] else: xMin = -1 * (shape[1] // 2) xMax = xMin + shape[1] - 1 xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize) yMin = -1 * (shape[0] // 2) yMax = yMin + shape[0] - 1 yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize) spreadOffsets = list(cross(yPositions, xPositions)) # Put the (0,0) entry first spreadOffsets.remove((0,0)) spreadOffsets.insert(0, (0,0)) numSpreadOffsets = len(spreadOffsets) print "spreadOffsets:", spreadOffsets return centerOffsets, spreadOffsets
[ "def", "getCentreAndSpreadOffsets", "(", "spaceShape", ",", "spreadShape", ",", "stepSize", "=", "1", ")", ":", "from", "nupic", ".", "math", ".", "cross", "import", "cross", "# =====================================================================", "# Init data structures", "# What is the range on the X and Y offsets of the center points?", "shape", "=", "spaceShape", "# If the shape is (1,1), special case of just 1 center point", "if", "shape", "[", "0", "]", "==", "1", "and", "shape", "[", "1", "]", "==", "1", ":", "centerOffsets", "=", "[", "(", "0", ",", "0", ")", "]", "else", ":", "xMin", "=", "-", "1", "*", "(", "shape", "[", "1", "]", "//", "2", ")", "xMax", "=", "xMin", "+", "shape", "[", "1", "]", "-", "1", "xPositions", "=", "range", "(", "stepSize", "*", "xMin", ",", "stepSize", "*", "xMax", "+", "1", ",", "stepSize", ")", "yMin", "=", "-", "1", "*", "(", "shape", "[", "0", "]", "//", "2", ")", "yMax", "=", "yMin", "+", "shape", "[", "0", "]", "-", "1", "yPositions", "=", "range", "(", "stepSize", "*", "yMin", ",", "stepSize", "*", "yMax", "+", "1", ",", "stepSize", ")", "centerOffsets", "=", "list", "(", "cross", "(", "yPositions", ",", "xPositions", ")", ")", "numCenterOffsets", "=", "len", "(", "centerOffsets", ")", "print", "\"centerOffsets:\"", ",", "centerOffsets", "# What is the range on the X and Y offsets of the spread points?", "shape", "=", "spreadShape", "# If the shape is (1,1), special case of no spreading around each center", "# point", "if", "shape", "[", "0", "]", "==", "1", "and", "shape", "[", "1", "]", "==", "1", ":", "spreadOffsets", "=", "[", "(", "0", ",", "0", ")", "]", "else", ":", "xMin", "=", "-", "1", "*", "(", "shape", "[", "1", "]", "//", "2", ")", "xMax", "=", "xMin", "+", "shape", "[", "1", "]", "-", "1", "xPositions", "=", "range", "(", "stepSize", "*", "xMin", ",", "stepSize", "*", "xMax", "+", "1", ",", "stepSize", ")", "yMin", "=", "-", "1", "*", "(", "shape", "[", "0", "]", "//", "2", ")", "yMax", "=", "yMin", "+", "shape", "[", "0", "]", "-", "1", "yPositions", "=", "range", "(", "stepSize", "*", "yMin", ",", "stepSize", "*", "yMax", "+", "1", ",", "stepSize", ")", "spreadOffsets", "=", "list", "(", "cross", "(", "yPositions", ",", "xPositions", ")", ")", "# Put the (0,0) entry first", "spreadOffsets", ".", "remove", "(", "(", "0", ",", "0", ")", ")", "spreadOffsets", ".", "insert", "(", "0", ",", "(", "0", ",", "0", ")", ")", "numSpreadOffsets", "=", "len", "(", "spreadOffsets", ")", "print", "\"spreadOffsets:\"", ",", "spreadOffsets", "return", "centerOffsets", ",", "spreadOffsets" ]
Generates centre offsets and spread offsets for block-mode based training regimes - star, cross, block. Parameters: ----------------------------------------------- spaceShape: The (height, width) of the 2-D space to explore. This sets the number of center-points. spreadShape: The shape (height, width) of the area around each center-point to explore. stepSize: The step size. How big each step is, in pixels. This controls *both* the spacing of the center-points within the block and the points we explore around each center-point retval: (centreOffsets, spreadOffsets)
[ "Generates", "centre", "offsets", "and", "spread", "offsets", "for", "block", "-", "mode", "based", "training", "regimes", "-", "star", "cross", "block", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1403-L1469
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
makeCloneMap
def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-1): """Make a two-dimensional clone map mapping columns to clone master. This makes a map that is (numColumnsHigh, numColumnsWide) big that can be used to figure out which clone master to use for each column. Here are a few sample calls >>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4) (array([[ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3) (array([[0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9) >>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5) (array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5], [10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10], [15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15], [20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20], [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4) (array([[ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7], [ 9, 10, 11, 9, 10, 11, 9, 10], [ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12) The basic idea with this map is that, if you imagine things stretching off to infinity, every instance of a given clone master is seeing the exact same thing in all directions. That includes: - All neighbors must be the same - The "meaning" of the input to each of the instances of the same clone master must be the same. If input is pixels and we have translation invariance--this is easy. At higher levels where input is the output of lower levels, this can be much harder. - The "meaning" of the inputs to neighbors of a clone master must be the same for each instance of the same clone master. The best way to think of this might be in terms of 'inputCloningWidth' and 'outputCloningWidth'. - The 'outputCloningWidth' is the number of columns you'd have to move horizontally (or vertically) before you get back to the same the same clone that you started with. MUST BE INTEGRAL! - The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us. If we're getting input from an sensor where every element just represents a shift of every other element, this is 1. At a conceptual level, it means that if two different inputs are shown to the node and the only difference between them is that one is shifted horizontally (or vertically) by this many pixels, it means we are looking at the exact same real world input, but shifted by some number of pixels (doesn't have to be 1). MUST BE INTEGRAL! At level 1, I think you could have this: * inputCloningWidth = 1 * sqrt(coincToInputRatio^2) = 2.5 * outputCloningWidth = 5 ...in this case, you'd end up with 25 masters. Let's think about this case: input: - - - 0 1 2 3 4 5 - - - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, input 0 is fed to both column 0 and column 1. Input 1 is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll get the exact same output (except shifted) with: input: - - - - - 0 1 2 3 4 5 - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, we've shifted the input 2 spaces and the output shifted 5 spaces. *** The outputCloningWidth MUST ALWAYS be an integral multiple of the *** *** inputCloningWidth in order for all of our rules to apply. *** *** NOTE: inputCloningWidth isn't passed here, so it's the caller's *** *** responsibility to ensure that this is true. *** *** The outputCloningWidth MUST ALWAYS be an integral multiple of *** *** sqrt(coincToInputRatio^2), too. *** @param columnsShape The shape (height, width) of the columns. @param outputCloningWidth See docstring above. @param outputCloningHeight If non-negative, can be used to make rectangular (instead of square) cloning fields. @return cloneMap An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. @return numDistinctClones The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. """ if outputCloningHeight < 0: outputCloningHeight = outputCloningWidth columnsHeight, columnsWidth = columnsShape numDistinctMasters = outputCloningWidth * outputCloningHeight a = numpy.empty((columnsHeight, columnsWidth), 'uint32') for row in xrange(columnsHeight): for col in xrange(columnsWidth): a[row, col] = (col % outputCloningWidth) + \ (row % outputCloningHeight) * outputCloningWidth return a, numDistinctMasters
python
def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-1): """Make a two-dimensional clone map mapping columns to clone master. This makes a map that is (numColumnsHigh, numColumnsWide) big that can be used to figure out which clone master to use for each column. Here are a few sample calls >>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4) (array([[ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3) (array([[0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9) >>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5) (array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5], [10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10], [15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15], [20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20], [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4) (array([[ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7], [ 9, 10, 11, 9, 10, 11, 9, 10], [ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12) The basic idea with this map is that, if you imagine things stretching off to infinity, every instance of a given clone master is seeing the exact same thing in all directions. That includes: - All neighbors must be the same - The "meaning" of the input to each of the instances of the same clone master must be the same. If input is pixels and we have translation invariance--this is easy. At higher levels where input is the output of lower levels, this can be much harder. - The "meaning" of the inputs to neighbors of a clone master must be the same for each instance of the same clone master. The best way to think of this might be in terms of 'inputCloningWidth' and 'outputCloningWidth'. - The 'outputCloningWidth' is the number of columns you'd have to move horizontally (or vertically) before you get back to the same the same clone that you started with. MUST BE INTEGRAL! - The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us. If we're getting input from an sensor where every element just represents a shift of every other element, this is 1. At a conceptual level, it means that if two different inputs are shown to the node and the only difference between them is that one is shifted horizontally (or vertically) by this many pixels, it means we are looking at the exact same real world input, but shifted by some number of pixels (doesn't have to be 1). MUST BE INTEGRAL! At level 1, I think you could have this: * inputCloningWidth = 1 * sqrt(coincToInputRatio^2) = 2.5 * outputCloningWidth = 5 ...in this case, you'd end up with 25 masters. Let's think about this case: input: - - - 0 1 2 3 4 5 - - - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, input 0 is fed to both column 0 and column 1. Input 1 is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll get the exact same output (except shifted) with: input: - - - - - 0 1 2 3 4 5 - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, we've shifted the input 2 spaces and the output shifted 5 spaces. *** The outputCloningWidth MUST ALWAYS be an integral multiple of the *** *** inputCloningWidth in order for all of our rules to apply. *** *** NOTE: inputCloningWidth isn't passed here, so it's the caller's *** *** responsibility to ensure that this is true. *** *** The outputCloningWidth MUST ALWAYS be an integral multiple of *** *** sqrt(coincToInputRatio^2), too. *** @param columnsShape The shape (height, width) of the columns. @param outputCloningWidth See docstring above. @param outputCloningHeight If non-negative, can be used to make rectangular (instead of square) cloning fields. @return cloneMap An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. @return numDistinctClones The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight. """ if outputCloningHeight < 0: outputCloningHeight = outputCloningWidth columnsHeight, columnsWidth = columnsShape numDistinctMasters = outputCloningWidth * outputCloningHeight a = numpy.empty((columnsHeight, columnsWidth), 'uint32') for row in xrange(columnsHeight): for col in xrange(columnsWidth): a[row, col] = (col % outputCloningWidth) + \ (row % outputCloningHeight) * outputCloningWidth return a, numDistinctMasters
[ "def", "makeCloneMap", "(", "columnsShape", ",", "outputCloningWidth", ",", "outputCloningHeight", "=", "-", "1", ")", ":", "if", "outputCloningHeight", "<", "0", ":", "outputCloningHeight", "=", "outputCloningWidth", "columnsHeight", ",", "columnsWidth", "=", "columnsShape", "numDistinctMasters", "=", "outputCloningWidth", "*", "outputCloningHeight", "a", "=", "numpy", ".", "empty", "(", "(", "columnsHeight", ",", "columnsWidth", ")", ",", "'uint32'", ")", "for", "row", "in", "xrange", "(", "columnsHeight", ")", ":", "for", "col", "in", "xrange", "(", "columnsWidth", ")", ":", "a", "[", "row", ",", "col", "]", "=", "(", "col", "%", "outputCloningWidth", ")", "+", "(", "row", "%", "outputCloningHeight", ")", "*", "outputCloningWidth", "return", "a", ",", "numDistinctMasters" ]
Make a two-dimensional clone map mapping columns to clone master. This makes a map that is (numColumnsHigh, numColumnsWide) big that can be used to figure out which clone master to use for each column. Here are a few sample calls >>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4) (array([[ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5], [ 8, 9, 10, 11, 8, 9], [12, 13, 14, 15, 12, 13], [ 0, 1, 2, 3, 0, 1], [ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3) (array([[0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1], [3, 4, 5, 3, 4, 5, 3, 4], [6, 7, 8, 6, 7, 8, 6, 7], [0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9) >>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5) (array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5], [10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10], [15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15], [20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20], [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0], [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25) >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4) (array([[ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7], [ 9, 10, 11, 9, 10, 11, 9, 10], [ 0, 1, 2, 0, 1, 2, 0, 1], [ 3, 4, 5, 3, 4, 5, 3, 4], [ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12) The basic idea with this map is that, if you imagine things stretching off to infinity, every instance of a given clone master is seeing the exact same thing in all directions. That includes: - All neighbors must be the same - The "meaning" of the input to each of the instances of the same clone master must be the same. If input is pixels and we have translation invariance--this is easy. At higher levels where input is the output of lower levels, this can be much harder. - The "meaning" of the inputs to neighbors of a clone master must be the same for each instance of the same clone master. The best way to think of this might be in terms of 'inputCloningWidth' and 'outputCloningWidth'. - The 'outputCloningWidth' is the number of columns you'd have to move horizontally (or vertically) before you get back to the same the same clone that you started with. MUST BE INTEGRAL! - The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us. If we're getting input from an sensor where every element just represents a shift of every other element, this is 1. At a conceptual level, it means that if two different inputs are shown to the node and the only difference between them is that one is shifted horizontally (or vertically) by this many pixels, it means we are looking at the exact same real world input, but shifted by some number of pixels (doesn't have to be 1). MUST BE INTEGRAL! At level 1, I think you could have this: * inputCloningWidth = 1 * sqrt(coincToInputRatio^2) = 2.5 * outputCloningWidth = 5 ...in this case, you'd end up with 25 masters. Let's think about this case: input: - - - 0 1 2 3 4 5 - - - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, input 0 is fed to both column 0 and column 1. Input 1 is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll get the exact same output (except shifted) with: input: - - - - - 0 1 2 3 4 5 - - - columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 ...in other words, we've shifted the input 2 spaces and the output shifted 5 spaces. *** The outputCloningWidth MUST ALWAYS be an integral multiple of the *** *** inputCloningWidth in order for all of our rules to apply. *** *** NOTE: inputCloningWidth isn't passed here, so it's the caller's *** *** responsibility to ensure that this is true. *** *** The outputCloningWidth MUST ALWAYS be an integral multiple of *** *** sqrt(coincToInputRatio^2), too. *** @param columnsShape The shape (height, width) of the columns. @param outputCloningWidth See docstring above. @param outputCloningHeight If non-negative, can be used to make rectangular (instead of square) cloning fields. @return cloneMap An array (numColumnsHigh, numColumnsWide) that contains the clone index to use for each column. @return numDistinctClones The number of distinct clones in the map. This is just outputCloningWidth*outputCloningHeight.
[ "Make", "a", "two", "-", "dimensional", "clone", "map", "mapping", "columns", "to", "clone", "master", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1473-L1597
valid
numenta/nupic
src/nupic/algorithms/fdrutilities.py
numpyStr
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True): """ Pretty print a numpy matrix using the given format string for each value. Return the string representation Parameters: ------------------------------------------------------------ array: The numpy array to print. This can be either a 1D vector or 2D matrix format: The format string to use for each value includeIndices: If true, include [row,col] label for each value includeZeros: Can only be set to False if includeIndices is on. If True, include 0 values in the print-out If False, exclude 0 values from the print-out. """ shape = array.shape assert (len(shape) <= 2) items = ['['] if len(shape) == 1: if includeIndices: format = '%d:' + format if includeZeros: rowItems = [format % (c,x) for (c,x) in enumerate(array)] else: rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0] else: rowItems = [format % (x) for x in array] items.extend(rowItems) else: (rows, cols) = shape if includeIndices: format = '%d,%d:' + format for r in xrange(rows): if includeIndices: rowItems = [format % (r,c,x) for c,x in enumerate(array[r])] else: rowItems = [format % (x) for x in array[r]] if r > 0: items.append('') items.append('[') items.extend(rowItems) if r < rows-1: items.append(']\n') else: items.append(']') items.append(']') return ' '.join(items)
python
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True): """ Pretty print a numpy matrix using the given format string for each value. Return the string representation Parameters: ------------------------------------------------------------ array: The numpy array to print. This can be either a 1D vector or 2D matrix format: The format string to use for each value includeIndices: If true, include [row,col] label for each value includeZeros: Can only be set to False if includeIndices is on. If True, include 0 values in the print-out If False, exclude 0 values from the print-out. """ shape = array.shape assert (len(shape) <= 2) items = ['['] if len(shape) == 1: if includeIndices: format = '%d:' + format if includeZeros: rowItems = [format % (c,x) for (c,x) in enumerate(array)] else: rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0] else: rowItems = [format % (x) for x in array] items.extend(rowItems) else: (rows, cols) = shape if includeIndices: format = '%d,%d:' + format for r in xrange(rows): if includeIndices: rowItems = [format % (r,c,x) for c,x in enumerate(array[r])] else: rowItems = [format % (x) for x in array[r]] if r > 0: items.append('') items.append('[') items.extend(rowItems) if r < rows-1: items.append(']\n') else: items.append(']') items.append(']') return ' '.join(items)
[ "def", "numpyStr", "(", "array", ",", "format", "=", "'%f'", ",", "includeIndices", "=", "False", ",", "includeZeros", "=", "True", ")", ":", "shape", "=", "array", ".", "shape", "assert", "(", "len", "(", "shape", ")", "<=", "2", ")", "items", "=", "[", "'['", "]", "if", "len", "(", "shape", ")", "==", "1", ":", "if", "includeIndices", ":", "format", "=", "'%d:'", "+", "format", "if", "includeZeros", ":", "rowItems", "=", "[", "format", "%", "(", "c", ",", "x", ")", "for", "(", "c", ",", "x", ")", "in", "enumerate", "(", "array", ")", "]", "else", ":", "rowItems", "=", "[", "format", "%", "(", "c", ",", "x", ")", "for", "(", "c", ",", "x", ")", "in", "enumerate", "(", "array", ")", "if", "x", "!=", "0", "]", "else", ":", "rowItems", "=", "[", "format", "%", "(", "x", ")", "for", "x", "in", "array", "]", "items", ".", "extend", "(", "rowItems", ")", "else", ":", "(", "rows", ",", "cols", ")", "=", "shape", "if", "includeIndices", ":", "format", "=", "'%d,%d:'", "+", "format", "for", "r", "in", "xrange", "(", "rows", ")", ":", "if", "includeIndices", ":", "rowItems", "=", "[", "format", "%", "(", "r", ",", "c", ",", "x", ")", "for", "c", ",", "x", "in", "enumerate", "(", "array", "[", "r", "]", ")", "]", "else", ":", "rowItems", "=", "[", "format", "%", "(", "x", ")", "for", "x", "in", "array", "[", "r", "]", "]", "if", "r", ">", "0", ":", "items", ".", "append", "(", "''", ")", "items", ".", "append", "(", "'['", ")", "items", ".", "extend", "(", "rowItems", ")", "if", "r", "<", "rows", "-", "1", ":", "items", ".", "append", "(", "']\\n'", ")", "else", ":", "items", ".", "append", "(", "']'", ")", "items", ".", "append", "(", "']'", ")", "return", "' '", ".", "join", "(", "items", ")" ]
Pretty print a numpy matrix using the given format string for each value. Return the string representation Parameters: ------------------------------------------------------------ array: The numpy array to print. This can be either a 1D vector or 2D matrix format: The format string to use for each value includeIndices: If true, include [row,col] label for each value includeZeros: Can only be set to False if includeIndices is on. If True, include 0 values in the print-out If False, exclude 0 values from the print-out.
[ "Pretty", "print", "a", "numpy", "matrix", "using", "the", "given", "format", "string", "for", "each", "value", ".", "Return", "the", "string", "representation" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1601-L1653
valid
numenta/nupic
src/nupic/math/dist.py
DiscreteDistribution.sample
def sample(self, rgen): """Generates a random sample from the discrete probability distribution and returns its value and the log of the probability of sampling that value. """ rf = rgen.uniform(0, self.sum) index = bisect.bisect(self.cdf, rf) return self.keys[index], numpy.log(self.pmf[index])
python
def sample(self, rgen): """Generates a random sample from the discrete probability distribution and returns its value and the log of the probability of sampling that value. """ rf = rgen.uniform(0, self.sum) index = bisect.bisect(self.cdf, rf) return self.keys[index], numpy.log(self.pmf[index])
[ "def", "sample", "(", "self", ",", "rgen", ")", ":", "rf", "=", "rgen", ".", "uniform", "(", "0", ",", "self", ".", "sum", ")", "index", "=", "bisect", ".", "bisect", "(", "self", ".", "cdf", ",", "rf", ")", "return", "self", ".", "keys", "[", "index", "]", ",", "numpy", ".", "log", "(", "self", ".", "pmf", "[", "index", "]", ")" ]
Generates a random sample from the discrete probability distribution and returns its value and the log of the probability of sampling that value.
[ "Generates", "a", "random", "sample", "from", "the", "discrete", "probability", "distribution", "and", "returns", "its", "value", "and", "the", "log", "of", "the", "probability", "of", "sampling", "that", "value", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/dist.py#L73-L79
valid
numenta/nupic
src/nupic/math/dist.py
MultinomialDistribution.logProbability
def logProbability(self, distn): """Form of distribution must be an array of counts in order of self.keys.""" x = numpy.asarray(distn) n = x.sum() return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) + numpy.sum(x * numpy.log(self.dist.pmf)))
python
def logProbability(self, distn): """Form of distribution must be an array of counts in order of self.keys.""" x = numpy.asarray(distn) n = x.sum() return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) + numpy.sum(x * numpy.log(self.dist.pmf)))
[ "def", "logProbability", "(", "self", ",", "distn", ")", ":", "x", "=", "numpy", ".", "asarray", "(", "distn", ")", "n", "=", "x", ".", "sum", "(", ")", "return", "(", "logFactorial", "(", "n", ")", "-", "numpy", ".", "sum", "(", "[", "logFactorial", "(", "k", ")", "for", "k", "in", "x", "]", ")", "+", "numpy", ".", "sum", "(", "x", "*", "numpy", ".", "log", "(", "self", ".", "dist", ".", "pmf", ")", ")", ")" ]
Form of distribution must be an array of counts in order of self.keys.
[ "Form", "of", "distribution", "must", "be", "an", "array", "of", "counts", "in", "order", "of", "self", ".", "keys", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/dist.py#L106-L111
valid
numenta/nupic
src/nupic/math/dist.py
PoissonDistribution.sample
def sample(self, rgen): """Generates a random sample from the Poisson probability distribution and returns its value and the log of the probability of sampling that value. """ x = rgen.poisson(self.lambdaParameter) return x, self.logDensity(x)
python
def sample(self, rgen): """Generates a random sample from the Poisson probability distribution and returns its value and the log of the probability of sampling that value. """ x = rgen.poisson(self.lambdaParameter) return x, self.logDensity(x)
[ "def", "sample", "(", "self", ",", "rgen", ")", ":", "x", "=", "rgen", ".", "poisson", "(", "self", ".", "lambdaParameter", ")", "return", "x", ",", "self", ".", "logDensity", "(", "x", ")" ]
Generates a random sample from the Poisson probability distribution and returns its value and the log of the probability of sampling that value.
[ "Generates", "a", "random", "sample", "from", "the", "Poisson", "probability", "distribution", "and", "returns", "its", "value", "and", "the", "log", "of", "the", "probability", "of", "sampling", "that", "value", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/dist.py#L132-L137
valid
numenta/nupic
docs/examples/network/complete-network-example.py
createDataOutLink
def createDataOutLink(network, sensorRegionName, regionName): """Link sensor region to other region so that it can pass it data.""" network.link(sensorRegionName, regionName, "UniformLink", "", srcOutput="dataOut", destInput="bottomUpIn")
python
def createDataOutLink(network, sensorRegionName, regionName): """Link sensor region to other region so that it can pass it data.""" network.link(sensorRegionName, regionName, "UniformLink", "", srcOutput="dataOut", destInput="bottomUpIn")
[ "def", "createDataOutLink", "(", "network", ",", "sensorRegionName", ",", "regionName", ")", ":", "network", ".", "link", "(", "sensorRegionName", ",", "regionName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"dataOut\"", ",", "destInput", "=", "\"bottomUpIn\"", ")" ]
Link sensor region to other region so that it can pass it data.
[ "Link", "sensor", "region", "to", "other", "region", "so", "that", "it", "can", "pass", "it", "data", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L16-L19
valid
numenta/nupic
docs/examples/network/complete-network-example.py
createFeedForwardLink
def createFeedForwardLink(network, regionName1, regionName2): """Create a feed-forward link between 2 regions: regionName1 -> regionName2""" network.link(regionName1, regionName2, "UniformLink", "", srcOutput="bottomUpOut", destInput="bottomUpIn")
python
def createFeedForwardLink(network, regionName1, regionName2): """Create a feed-forward link between 2 regions: regionName1 -> regionName2""" network.link(regionName1, regionName2, "UniformLink", "", srcOutput="bottomUpOut", destInput="bottomUpIn")
[ "def", "createFeedForwardLink", "(", "network", ",", "regionName1", ",", "regionName2", ")", ":", "network", ".", "link", "(", "regionName1", ",", "regionName2", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"bottomUpOut\"", ",", "destInput", "=", "\"bottomUpIn\"", ")" ]
Create a feed-forward link between 2 regions: regionName1 -> regionName2
[ "Create", "a", "feed", "-", "forward", "link", "between", "2", "regions", ":", "regionName1", "-", ">", "regionName2" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L22-L25
valid
numenta/nupic
docs/examples/network/complete-network-example.py
createResetLink
def createResetLink(network, sensorRegionName, regionName): """Create a reset link from a sensor region: sensorRegionName -> regionName""" network.link(sensorRegionName, regionName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn")
python
def createResetLink(network, sensorRegionName, regionName): """Create a reset link from a sensor region: sensorRegionName -> regionName""" network.link(sensorRegionName, regionName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn")
[ "def", "createResetLink", "(", "network", ",", "sensorRegionName", ",", "regionName", ")", ":", "network", ".", "link", "(", "sensorRegionName", ",", "regionName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"resetOut\"", ",", "destInput", "=", "\"resetIn\"", ")" ]
Create a reset link from a sensor region: sensorRegionName -> regionName
[ "Create", "a", "reset", "link", "from", "a", "sensor", "region", ":", "sensorRegionName", "-", ">", "regionName" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L28-L31
valid
numenta/nupic
docs/examples/network/complete-network-example.py
createSensorToClassifierLinks
def createSensorToClassifierLinks(network, sensorRegionName, classifierRegionName): """Create required links from a sensor region to a classifier region.""" network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="actValueOut", destInput="actValueIn") network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="categoryOut", destInput="categoryIn")
python
def createSensorToClassifierLinks(network, sensorRegionName, classifierRegionName): """Create required links from a sensor region to a classifier region.""" network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="actValueOut", destInput="actValueIn") network.link(sensorRegionName, classifierRegionName, "UniformLink", "", srcOutput="categoryOut", destInput="categoryIn")
[ "def", "createSensorToClassifierLinks", "(", "network", ",", "sensorRegionName", ",", "classifierRegionName", ")", ":", "network", ".", "link", "(", "sensorRegionName", ",", "classifierRegionName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"bucketIdxOut\"", ",", "destInput", "=", "\"bucketIdxIn\"", ")", "network", ".", "link", "(", "sensorRegionName", ",", "classifierRegionName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"actValueOut\"", ",", "destInput", "=", "\"actValueIn\"", ")", "network", ".", "link", "(", "sensorRegionName", ",", "classifierRegionName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"categoryOut\"", ",", "destInput", "=", "\"categoryIn\"", ")" ]
Create required links from a sensor region to a classifier region.
[ "Create", "required", "links", "from", "a", "sensor", "region", "to", "a", "classifier", "region", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L34-L42
valid
numenta/nupic
docs/examples/network/complete-network-example.py
createNetwork
def createNetwork(dataSource): """Create and initialize a network.""" with open(_PARAMS_PATH, "r") as f: modelParams = yaml.safe_load(f)["modelParams"] # Create a network that will hold the regions. network = Network() # Add a sensor region. network.addRegion("sensor", "py.RecordSensor", '{}') # Set the encoder and data source of the sensor region. sensorRegion = network.regions["sensor"].getSelf() sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"]) sensorRegion.dataSource = dataSource # Make sure the SP input width matches the sensor region output width. modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth() # Add SP and TM regions. network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"])) network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"])) # Add a classifier region. clName = "py.%s" % modelParams["clParams"].pop("regionName") network.addRegion("classifier", clName, json.dumps(modelParams["clParams"])) # Add all links createSensorToClassifierLinks(network, "sensor", "classifier") createDataOutLink(network, "sensor", "SP") createFeedForwardLink(network, "SP", "TM") createFeedForwardLink(network, "TM", "classifier") # Reset links are optional, since the sensor region does not send resets. createResetLink(network, "sensor", "SP") createResetLink(network, "sensor", "TM") # Make sure all objects are initialized. network.initialize() return network
python
def createNetwork(dataSource): """Create and initialize a network.""" with open(_PARAMS_PATH, "r") as f: modelParams = yaml.safe_load(f)["modelParams"] # Create a network that will hold the regions. network = Network() # Add a sensor region. network.addRegion("sensor", "py.RecordSensor", '{}') # Set the encoder and data source of the sensor region. sensorRegion = network.regions["sensor"].getSelf() sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"]) sensorRegion.dataSource = dataSource # Make sure the SP input width matches the sensor region output width. modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth() # Add SP and TM regions. network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"])) network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"])) # Add a classifier region. clName = "py.%s" % modelParams["clParams"].pop("regionName") network.addRegion("classifier", clName, json.dumps(modelParams["clParams"])) # Add all links createSensorToClassifierLinks(network, "sensor", "classifier") createDataOutLink(network, "sensor", "SP") createFeedForwardLink(network, "SP", "TM") createFeedForwardLink(network, "TM", "classifier") # Reset links are optional, since the sensor region does not send resets. createResetLink(network, "sensor", "SP") createResetLink(network, "sensor", "TM") # Make sure all objects are initialized. network.initialize() return network
[ "def", "createNetwork", "(", "dataSource", ")", ":", "with", "open", "(", "_PARAMS_PATH", ",", "\"r\"", ")", "as", "f", ":", "modelParams", "=", "yaml", ".", "safe_load", "(", "f", ")", "[", "\"modelParams\"", "]", "# Create a network that will hold the regions.", "network", "=", "Network", "(", ")", "# Add a sensor region.", "network", ".", "addRegion", "(", "\"sensor\"", ",", "\"py.RecordSensor\"", ",", "'{}'", ")", "# Set the encoder and data source of the sensor region.", "sensorRegion", "=", "network", ".", "regions", "[", "\"sensor\"", "]", ".", "getSelf", "(", ")", "sensorRegion", ".", "encoder", "=", "createEncoder", "(", "modelParams", "[", "\"sensorParams\"", "]", "[", "\"encoders\"", "]", ")", "sensorRegion", ".", "dataSource", "=", "dataSource", "# Make sure the SP input width matches the sensor region output width.", "modelParams", "[", "\"spParams\"", "]", "[", "\"inputWidth\"", "]", "=", "sensorRegion", ".", "encoder", ".", "getWidth", "(", ")", "# Add SP and TM regions.", "network", ".", "addRegion", "(", "\"SP\"", ",", "\"py.SPRegion\"", ",", "json", ".", "dumps", "(", "modelParams", "[", "\"spParams\"", "]", ")", ")", "network", ".", "addRegion", "(", "\"TM\"", ",", "\"py.TMRegion\"", ",", "json", ".", "dumps", "(", "modelParams", "[", "\"tmParams\"", "]", ")", ")", "# Add a classifier region.", "clName", "=", "\"py.%s\"", "%", "modelParams", "[", "\"clParams\"", "]", ".", "pop", "(", "\"regionName\"", ")", "network", ".", "addRegion", "(", "\"classifier\"", ",", "clName", ",", "json", ".", "dumps", "(", "modelParams", "[", "\"clParams\"", "]", ")", ")", "# Add all links", "createSensorToClassifierLinks", "(", "network", ",", "\"sensor\"", ",", "\"classifier\"", ")", "createDataOutLink", "(", "network", ",", "\"sensor\"", ",", "\"SP\"", ")", "createFeedForwardLink", "(", "network", ",", "\"SP\"", ",", "\"TM\"", ")", "createFeedForwardLink", "(", "network", ",", "\"TM\"", ",", "\"classifier\"", ")", "# Reset links are optional, since the sensor region does not send resets.", "createResetLink", "(", "network", ",", "\"sensor\"", ",", "\"SP\"", ")", "createResetLink", "(", "network", ",", "\"sensor\"", ",", "\"TM\"", ")", "# Make sure all objects are initialized.", "network", ".", "initialize", "(", ")", "return", "network" ]
Create and initialize a network.
[ "Create", "and", "initialize", "a", "network", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L52-L91
valid
numenta/nupic
docs/examples/network/complete-network-example.py
getPredictionResults
def getPredictionResults(network, clRegionName): """Get prediction results for all prediction steps.""" classifierRegion = network.regions[clRegionName] actualValues = classifierRegion.getOutputData("actualValues") probabilities = classifierRegion.getOutputData("probabilities") steps = classifierRegion.getSelf().stepsList N = classifierRegion.getSelf().maxCategoryCount results = {step: {} for step in steps} for i in range(len(steps)): # stepProbabilities are probabilities for this prediction step only. stepProbabilities = probabilities[i * N:(i + 1) * N - 1] mostLikelyCategoryIdx = stepProbabilities.argmax() predictedValue = actualValues[mostLikelyCategoryIdx] predictionConfidence = stepProbabilities[mostLikelyCategoryIdx] results[steps[i]]["predictedValue"] = predictedValue results[steps[i]]["predictionConfidence"] = predictionConfidence return results
python
def getPredictionResults(network, clRegionName): """Get prediction results for all prediction steps.""" classifierRegion = network.regions[clRegionName] actualValues = classifierRegion.getOutputData("actualValues") probabilities = classifierRegion.getOutputData("probabilities") steps = classifierRegion.getSelf().stepsList N = classifierRegion.getSelf().maxCategoryCount results = {step: {} for step in steps} for i in range(len(steps)): # stepProbabilities are probabilities for this prediction step only. stepProbabilities = probabilities[i * N:(i + 1) * N - 1] mostLikelyCategoryIdx = stepProbabilities.argmax() predictedValue = actualValues[mostLikelyCategoryIdx] predictionConfidence = stepProbabilities[mostLikelyCategoryIdx] results[steps[i]]["predictedValue"] = predictedValue results[steps[i]]["predictionConfidence"] = predictionConfidence return results
[ "def", "getPredictionResults", "(", "network", ",", "clRegionName", ")", ":", "classifierRegion", "=", "network", ".", "regions", "[", "clRegionName", "]", "actualValues", "=", "classifierRegion", ".", "getOutputData", "(", "\"actualValues\"", ")", "probabilities", "=", "classifierRegion", ".", "getOutputData", "(", "\"probabilities\"", ")", "steps", "=", "classifierRegion", ".", "getSelf", "(", ")", ".", "stepsList", "N", "=", "classifierRegion", ".", "getSelf", "(", ")", ".", "maxCategoryCount", "results", "=", "{", "step", ":", "{", "}", "for", "step", "in", "steps", "}", "for", "i", "in", "range", "(", "len", "(", "steps", ")", ")", ":", "# stepProbabilities are probabilities for this prediction step only.", "stepProbabilities", "=", "probabilities", "[", "i", "*", "N", ":", "(", "i", "+", "1", ")", "*", "N", "-", "1", "]", "mostLikelyCategoryIdx", "=", "stepProbabilities", ".", "argmax", "(", ")", "predictedValue", "=", "actualValues", "[", "mostLikelyCategoryIdx", "]", "predictionConfidence", "=", "stepProbabilities", "[", "mostLikelyCategoryIdx", "]", "results", "[", "steps", "[", "i", "]", "]", "[", "\"predictedValue\"", "]", "=", "predictedValue", "results", "[", "steps", "[", "i", "]", "]", "[", "\"predictionConfidence\"", "]", "=", "predictionConfidence", "return", "results" ]
Get prediction results for all prediction steps.
[ "Get", "prediction", "results", "for", "all", "prediction", "steps", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L94-L110
valid
numenta/nupic
docs/examples/network/complete-network-example.py
runHotgym
def runHotgym(numRecords): """Run the Hot Gym example.""" # Create a data source for the network. dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH) numRecords = min(numRecords, dataSource.getDataRowCount()) network = createNetwork(dataSource) # Set predicted field network.regions["sensor"].setParameter("predictedField", "consumption") # Enable learning for all regions. network.regions["SP"].setParameter("learningMode", 1) network.regions["TM"].setParameter("learningMode", 1) network.regions["classifier"].setParameter("learningMode", 1) # Enable inference for all regions. network.regions["SP"].setParameter("inferenceMode", 1) network.regions["TM"].setParameter("inferenceMode", 1) network.regions["classifier"].setParameter("inferenceMode", 1) results = [] N = 1 # Run the network, N iterations at a time. for iteration in range(0, numRecords, N): network.run(N) predictionResults = getPredictionResults(network, "classifier") oneStep = predictionResults[1]["predictedValue"] oneStepConfidence = predictionResults[1]["predictionConfidence"] fiveStep = predictionResults[5]["predictedValue"] fiveStepConfidence = predictionResults[5]["predictionConfidence"] result = (oneStep, oneStepConfidence * 100, fiveStep, fiveStepConfidence * 100) print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result) results.append(result) return results
python
def runHotgym(numRecords): """Run the Hot Gym example.""" # Create a data source for the network. dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH) numRecords = min(numRecords, dataSource.getDataRowCount()) network = createNetwork(dataSource) # Set predicted field network.regions["sensor"].setParameter("predictedField", "consumption") # Enable learning for all regions. network.regions["SP"].setParameter("learningMode", 1) network.regions["TM"].setParameter("learningMode", 1) network.regions["classifier"].setParameter("learningMode", 1) # Enable inference for all regions. network.regions["SP"].setParameter("inferenceMode", 1) network.regions["TM"].setParameter("inferenceMode", 1) network.regions["classifier"].setParameter("inferenceMode", 1) results = [] N = 1 # Run the network, N iterations at a time. for iteration in range(0, numRecords, N): network.run(N) predictionResults = getPredictionResults(network, "classifier") oneStep = predictionResults[1]["predictedValue"] oneStepConfidence = predictionResults[1]["predictionConfidence"] fiveStep = predictionResults[5]["predictedValue"] fiveStepConfidence = predictionResults[5]["predictionConfidence"] result = (oneStep, oneStepConfidence * 100, fiveStep, fiveStepConfidence * 100) print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result) results.append(result) return results
[ "def", "runHotgym", "(", "numRecords", ")", ":", "# Create a data source for the network.", "dataSource", "=", "FileRecordStream", "(", "streamID", "=", "_INPUT_FILE_PATH", ")", "numRecords", "=", "min", "(", "numRecords", ",", "dataSource", ".", "getDataRowCount", "(", ")", ")", "network", "=", "createNetwork", "(", "dataSource", ")", "# Set predicted field", "network", ".", "regions", "[", "\"sensor\"", "]", ".", "setParameter", "(", "\"predictedField\"", ",", "\"consumption\"", ")", "# Enable learning for all regions.", "network", ".", "regions", "[", "\"SP\"", "]", ".", "setParameter", "(", "\"learningMode\"", ",", "1", ")", "network", ".", "regions", "[", "\"TM\"", "]", ".", "setParameter", "(", "\"learningMode\"", ",", "1", ")", "network", ".", "regions", "[", "\"classifier\"", "]", ".", "setParameter", "(", "\"learningMode\"", ",", "1", ")", "# Enable inference for all regions.", "network", ".", "regions", "[", "\"SP\"", "]", ".", "setParameter", "(", "\"inferenceMode\"", ",", "1", ")", "network", ".", "regions", "[", "\"TM\"", "]", ".", "setParameter", "(", "\"inferenceMode\"", ",", "1", ")", "network", ".", "regions", "[", "\"classifier\"", "]", ".", "setParameter", "(", "\"inferenceMode\"", ",", "1", ")", "results", "=", "[", "]", "N", "=", "1", "# Run the network, N iterations at a time.", "for", "iteration", "in", "range", "(", "0", ",", "numRecords", ",", "N", ")", ":", "network", ".", "run", "(", "N", ")", "predictionResults", "=", "getPredictionResults", "(", "network", ",", "\"classifier\"", ")", "oneStep", "=", "predictionResults", "[", "1", "]", "[", "\"predictedValue\"", "]", "oneStepConfidence", "=", "predictionResults", "[", "1", "]", "[", "\"predictionConfidence\"", "]", "fiveStep", "=", "predictionResults", "[", "5", "]", "[", "\"predictedValue\"", "]", "fiveStepConfidence", "=", "predictionResults", "[", "5", "]", "[", "\"predictionConfidence\"", "]", "result", "=", "(", "oneStep", ",", "oneStepConfidence", "*", "100", ",", "fiveStep", ",", "fiveStepConfidence", "*", "100", ")", "print", "\"1-step: {:16} ({:4.4}%)\\t 5-step: {:16} ({:4.4}%)\"", ".", "format", "(", "*", "result", ")", "results", ".", "append", "(", "result", ")", "return", "results" ]
Run the Hot Gym example.
[ "Run", "the", "Hot", "Gym", "example", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/docs/examples/network/complete-network-example.py#L113-L150
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner._loadDummyModelParameters
def _loadDummyModelParameters(self, params): """ Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index """ for key, value in params.iteritems(): if type(value) == list: index = self.modelIndex % len(params[key]) self._params[key] = params[key][index] else: self._params[key] = params[key]
python
def _loadDummyModelParameters(self, params): """ Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index """ for key, value in params.iteritems(): if type(value) == list: index = self.modelIndex % len(params[key]) self._params[key] = params[key][index] else: self._params[key] = params[key]
[ "def", "_loadDummyModelParameters", "(", "self", ",", "params", ")", ":", "for", "key", ",", "value", "in", "params", ".", "iteritems", "(", ")", ":", "if", "type", "(", "value", ")", "==", "list", ":", "index", "=", "self", ".", "modelIndex", "%", "len", "(", "params", "[", "key", "]", ")", "self", ".", "_params", "[", "key", "]", "=", "params", "[", "key", "]", "[", "index", "]", "else", ":", "self", ".", "_params", "[", "key", "]", "=", "params", "[", "key", "]" ]
Loads all the parameters for this dummy model. For any paramters specified as lists, read the appropriate value for this model using the model index
[ "Loads", "all", "the", "parameters", "for", "this", "dummy", "model", ".", "For", "any", "paramters", "specified", "as", "lists", "read", "the", "appropriate", "value", "for", "this", "model", "using", "the", "model", "index" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L339-L349
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner._computModelDelay
def _computModelDelay(self): """ Computes the amount of time (if any) to delay the run of this model. This can be determined by two mutually exclusive parameters: delay and sleepModelRange. 'delay' specifies the number of seconds a model should be delayed. If a list is specified, the appropriate amount of delay is determined by using the model's modelIndex property. However, this doesn't work when testing orphaned models, because the modelIndex will be the same for every recovery attempt. Therefore, every recovery attempt will also be delayed and potentially orphaned. 'sleepModelRange' doesn't use the modelIndex property for a model, but rather sees which order the model is in the database, and uses that to determine whether or not a model should be delayed. """ # 'delay' and 'sleepModelRange' are mutually exclusive if self._params['delay'] is not None \ and self._params['sleepModelRange'] is not None: raise RuntimeError("Only one of 'delay' or " "'sleepModelRange' may be specified") # Get the sleepModel range if self._sleepModelRange is not None: range, delay = self._sleepModelRange.split(':') delay = float(delay) range = map(int, range.split(',')) modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID) modelIDs.sort() range[1] = min(range[1], len(modelIDs)) # If the model is in range, add the delay if self._modelID in modelIDs[range[0]:range[1]]: self._delay = delay else: self._delay = self._params['delay']
python
def _computModelDelay(self): """ Computes the amount of time (if any) to delay the run of this model. This can be determined by two mutually exclusive parameters: delay and sleepModelRange. 'delay' specifies the number of seconds a model should be delayed. If a list is specified, the appropriate amount of delay is determined by using the model's modelIndex property. However, this doesn't work when testing orphaned models, because the modelIndex will be the same for every recovery attempt. Therefore, every recovery attempt will also be delayed and potentially orphaned. 'sleepModelRange' doesn't use the modelIndex property for a model, but rather sees which order the model is in the database, and uses that to determine whether or not a model should be delayed. """ # 'delay' and 'sleepModelRange' are mutually exclusive if self._params['delay'] is not None \ and self._params['sleepModelRange'] is not None: raise RuntimeError("Only one of 'delay' or " "'sleepModelRange' may be specified") # Get the sleepModel range if self._sleepModelRange is not None: range, delay = self._sleepModelRange.split(':') delay = float(delay) range = map(int, range.split(',')) modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID) modelIDs.sort() range[1] = min(range[1], len(modelIDs)) # If the model is in range, add the delay if self._modelID in modelIDs[range[0]:range[1]]: self._delay = delay else: self._delay = self._params['delay']
[ "def", "_computModelDelay", "(", "self", ")", ":", "# 'delay' and 'sleepModelRange' are mutually exclusive", "if", "self", ".", "_params", "[", "'delay'", "]", "is", "not", "None", "and", "self", ".", "_params", "[", "'sleepModelRange'", "]", "is", "not", "None", ":", "raise", "RuntimeError", "(", "\"Only one of 'delay' or \"", "\"'sleepModelRange' may be specified\"", ")", "# Get the sleepModel range", "if", "self", ".", "_sleepModelRange", "is", "not", "None", ":", "range", ",", "delay", "=", "self", ".", "_sleepModelRange", ".", "split", "(", "':'", ")", "delay", "=", "float", "(", "delay", ")", "range", "=", "map", "(", "int", ",", "range", ".", "split", "(", "','", ")", ")", "modelIDs", "=", "self", ".", "_jobsDAO", ".", "jobGetModelIDs", "(", "self", ".", "_jobID", ")", "modelIDs", ".", "sort", "(", ")", "range", "[", "1", "]", "=", "min", "(", "range", "[", "1", "]", ",", "len", "(", "modelIDs", ")", ")", "# If the model is in range, add the delay", "if", "self", ".", "_modelID", "in", "modelIDs", "[", "range", "[", "0", "]", ":", "range", "[", "1", "]", "]", ":", "self", ".", "_delay", "=", "delay", "else", ":", "self", ".", "_delay", "=", "self", ".", "_params", "[", "'delay'", "]" ]
Computes the amount of time (if any) to delay the run of this model. This can be determined by two mutually exclusive parameters: delay and sleepModelRange. 'delay' specifies the number of seconds a model should be delayed. If a list is specified, the appropriate amount of delay is determined by using the model's modelIndex property. However, this doesn't work when testing orphaned models, because the modelIndex will be the same for every recovery attempt. Therefore, every recovery attempt will also be delayed and potentially orphaned. 'sleepModelRange' doesn't use the modelIndex property for a model, but rather sees which order the model is in the database, and uses that to determine whether or not a model should be delayed.
[ "Computes", "the", "amount", "of", "time", "(", "if", "any", ")", "to", "delay", "the", "run", "of", "this", "model", ".", "This", "can", "be", "determined", "by", "two", "mutually", "exclusive", "parameters", ":", "delay", "and", "sleepModelRange", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L352-L390
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner._getMetrics
def _getMetrics(self): """ Protected function that can be overridden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model """ metric = None if self.metrics is not None: metric = self.metrics(self._currentRecordIndex+1) elif self.metricValue is not None: metric = self.metricValue else: raise RuntimeError('No metrics or metric value specified for dummy model') return {self._optimizeKeyPattern:metric}
python
def _getMetrics(self): """ Protected function that can be overridden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model """ metric = None if self.metrics is not None: metric = self.metrics(self._currentRecordIndex+1) elif self.metricValue is not None: metric = self.metricValue else: raise RuntimeError('No metrics or metric value specified for dummy model') return {self._optimizeKeyPattern:metric}
[ "def", "_getMetrics", "(", "self", ")", ":", "metric", "=", "None", "if", "self", ".", "metrics", "is", "not", "None", ":", "metric", "=", "self", ".", "metrics", "(", "self", ".", "_currentRecordIndex", "+", "1", ")", "elif", "self", ".", "metricValue", "is", "not", "None", ":", "metric", "=", "self", ".", "metricValue", "else", ":", "raise", "RuntimeError", "(", "'No metrics or metric value specified for dummy model'", ")", "return", "{", "self", ".", "_optimizeKeyPattern", ":", "metric", "}" ]
Protected function that can be overridden by subclasses. Its main purpose is to allow the the OPFDummyModelRunner to override this with deterministic values Returns: All the metrics being computed for this model
[ "Protected", "function", "that", "can", "be", "overridden", "by", "subclasses", ".", "Its", "main", "purpose", "is", "to", "allow", "the", "the", "OPFDummyModelRunner", "to", "override", "this", "with", "deterministic", "values" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L393-L408
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner.run
def run(self): """ Runs the given OPF task against the given Model instance """ self._logger.debug("Starting Dummy Model: modelID=%s;" % (self._modelID)) # ========================================================================= # Initialize periodic activities (e.g., for model result updates) # ========================================================================= periodic = self._initPeriodicActivities() self._optimizedMetricLabel = self._optimizeKeyPattern self._reportMetricLabels = [self._optimizeKeyPattern] # ========================================================================= # Create our top-level loop-control iterator # ========================================================================= if self._iterations >= 0: iterTracker = iter(xrange(self._iterations)) else: iterTracker = iter(itertools.count()) # ========================================================================= # This gets set in the unit tests. It tells the worker to sys exit # the first N models. This is how we generate orphaned models doSysExit = False if self._sysExitModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._sysExitModelRange if self._modelID in modelIDs[int(beg):int(end)]: doSysExit = True if self._delayModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._delayModelRange if self._modelID in modelIDs[int(beg):int(end)]: time.sleep(10) # DEBUG!!!! infinite wait if we have 50 models #if len(modelIDs) >= 50: # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] # while not jobCancel: # time.sleep(1) # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] if self._errModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._errModelRange if self._modelID in modelIDs[int(beg):int(end)]: raise RuntimeError("Exiting with error due to errModelRange parameter") # ========================================================================= # Delay, if necessary if self._delay is not None: time.sleep(self._delay) # ========================================================================= # Run it! # ========================================================================= self._currentRecordIndex = 0 while True: # ========================================================================= # Check if the model should be stopped # ========================================================================= # If killed by a terminator, stop running if self._isKilled: break # If job stops or hypersearch ends, stop running if self._isCanceled: break # If model is mature, stop running ONLY IF we are not the best model # for the job. Otherwise, keep running so we can keep returning # predictions to the user if self._isMature: if not self._isBestModel: self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED break else: self._cmpReason = self._jobsDAO.CMPL_REASON_EOF # ========================================================================= # Get the the next record, and "write it" # ========================================================================= try: self._currentRecordIndex = next(iterTracker) except StopIteration: break # "Write" a dummy output value. This is used to test that the batched # writing works properly self._writePrediction(ModelResult(None, None, None, None)) periodic.tick() # ========================================================================= # Compute wait times. See if model should exit # ========================================================================= if self.__shouldSysExit(self._currentRecordIndex): sys.exit(1) # Simulate computation time if self._busyWaitTime is not None: time.sleep(self._busyWaitTime) self.__computeWaitTime() # Asked to abort after so many iterations? if doSysExit: sys.exit(1) # Asked to raise a jobFailException? if self._jobFailErr: raise utils.JobFailException("E10000", "dummyModel's jobFailErr was True.") # ========================================================================= # Handle final operations # ========================================================================= if self._doFinalize: if not self._makeCheckpoint: self._model = None # Delay finalization operation if self._finalDelay is not None: time.sleep(self._finalDelay) self._finalize() self._logger.info("Finished: modelID=%r "% (self._modelID)) return (self._cmpReason, None)
python
def run(self): """ Runs the given OPF task against the given Model instance """ self._logger.debug("Starting Dummy Model: modelID=%s;" % (self._modelID)) # ========================================================================= # Initialize periodic activities (e.g., for model result updates) # ========================================================================= periodic = self._initPeriodicActivities() self._optimizedMetricLabel = self._optimizeKeyPattern self._reportMetricLabels = [self._optimizeKeyPattern] # ========================================================================= # Create our top-level loop-control iterator # ========================================================================= if self._iterations >= 0: iterTracker = iter(xrange(self._iterations)) else: iterTracker = iter(itertools.count()) # ========================================================================= # This gets set in the unit tests. It tells the worker to sys exit # the first N models. This is how we generate orphaned models doSysExit = False if self._sysExitModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._sysExitModelRange if self._modelID in modelIDs[int(beg):int(end)]: doSysExit = True if self._delayModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._delayModelRange if self._modelID in modelIDs[int(beg):int(end)]: time.sleep(10) # DEBUG!!!! infinite wait if we have 50 models #if len(modelIDs) >= 50: # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] # while not jobCancel: # time.sleep(1) # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] if self._errModelRange is not None: modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID) modelIDs = [x[0] for x in modelAndCounters] modelIDs.sort() (beg,end) = self._errModelRange if self._modelID in modelIDs[int(beg):int(end)]: raise RuntimeError("Exiting with error due to errModelRange parameter") # ========================================================================= # Delay, if necessary if self._delay is not None: time.sleep(self._delay) # ========================================================================= # Run it! # ========================================================================= self._currentRecordIndex = 0 while True: # ========================================================================= # Check if the model should be stopped # ========================================================================= # If killed by a terminator, stop running if self._isKilled: break # If job stops or hypersearch ends, stop running if self._isCanceled: break # If model is mature, stop running ONLY IF we are not the best model # for the job. Otherwise, keep running so we can keep returning # predictions to the user if self._isMature: if not self._isBestModel: self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED break else: self._cmpReason = self._jobsDAO.CMPL_REASON_EOF # ========================================================================= # Get the the next record, and "write it" # ========================================================================= try: self._currentRecordIndex = next(iterTracker) except StopIteration: break # "Write" a dummy output value. This is used to test that the batched # writing works properly self._writePrediction(ModelResult(None, None, None, None)) periodic.tick() # ========================================================================= # Compute wait times. See if model should exit # ========================================================================= if self.__shouldSysExit(self._currentRecordIndex): sys.exit(1) # Simulate computation time if self._busyWaitTime is not None: time.sleep(self._busyWaitTime) self.__computeWaitTime() # Asked to abort after so many iterations? if doSysExit: sys.exit(1) # Asked to raise a jobFailException? if self._jobFailErr: raise utils.JobFailException("E10000", "dummyModel's jobFailErr was True.") # ========================================================================= # Handle final operations # ========================================================================= if self._doFinalize: if not self._makeCheckpoint: self._model = None # Delay finalization operation if self._finalDelay is not None: time.sleep(self._finalDelay) self._finalize() self._logger.info("Finished: modelID=%r "% (self._modelID)) return (self._cmpReason, None)
[ "def", "run", "(", "self", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"Starting Dummy Model: modelID=%s;\"", "%", "(", "self", ".", "_modelID", ")", ")", "# =========================================================================", "# Initialize periodic activities (e.g., for model result updates)", "# =========================================================================", "periodic", "=", "self", ".", "_initPeriodicActivities", "(", ")", "self", ".", "_optimizedMetricLabel", "=", "self", ".", "_optimizeKeyPattern", "self", ".", "_reportMetricLabels", "=", "[", "self", ".", "_optimizeKeyPattern", "]", "# =========================================================================", "# Create our top-level loop-control iterator", "# =========================================================================", "if", "self", ".", "_iterations", ">=", "0", ":", "iterTracker", "=", "iter", "(", "xrange", "(", "self", ".", "_iterations", ")", ")", "else", ":", "iterTracker", "=", "iter", "(", "itertools", ".", "count", "(", ")", ")", "# =========================================================================", "# This gets set in the unit tests. It tells the worker to sys exit", "# the first N models. This is how we generate orphaned models", "doSysExit", "=", "False", "if", "self", ".", "_sysExitModelRange", "is", "not", "None", ":", "modelAndCounters", "=", "self", ".", "_jobsDAO", ".", "modelsGetUpdateCounters", "(", "self", ".", "_jobID", ")", "modelIDs", "=", "[", "x", "[", "0", "]", "for", "x", "in", "modelAndCounters", "]", "modelIDs", ".", "sort", "(", ")", "(", "beg", ",", "end", ")", "=", "self", ".", "_sysExitModelRange", "if", "self", ".", "_modelID", "in", "modelIDs", "[", "int", "(", "beg", ")", ":", "int", "(", "end", ")", "]", ":", "doSysExit", "=", "True", "if", "self", ".", "_delayModelRange", "is", "not", "None", ":", "modelAndCounters", "=", "self", ".", "_jobsDAO", ".", "modelsGetUpdateCounters", "(", "self", ".", "_jobID", ")", "modelIDs", "=", "[", "x", "[", "0", "]", "for", "x", "in", "modelAndCounters", "]", "modelIDs", ".", "sort", "(", ")", "(", "beg", ",", "end", ")", "=", "self", ".", "_delayModelRange", "if", "self", ".", "_modelID", "in", "modelIDs", "[", "int", "(", "beg", ")", ":", "int", "(", "end", ")", "]", ":", "time", ".", "sleep", "(", "10", ")", "# DEBUG!!!! infinite wait if we have 50 models", "#if len(modelIDs) >= 50:", "# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]", "# while not jobCancel:", "# time.sleep(1)", "# jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]", "if", "self", ".", "_errModelRange", "is", "not", "None", ":", "modelAndCounters", "=", "self", ".", "_jobsDAO", ".", "modelsGetUpdateCounters", "(", "self", ".", "_jobID", ")", "modelIDs", "=", "[", "x", "[", "0", "]", "for", "x", "in", "modelAndCounters", "]", "modelIDs", ".", "sort", "(", ")", "(", "beg", ",", "end", ")", "=", "self", ".", "_errModelRange", "if", "self", ".", "_modelID", "in", "modelIDs", "[", "int", "(", "beg", ")", ":", "int", "(", "end", ")", "]", ":", "raise", "RuntimeError", "(", "\"Exiting with error due to errModelRange parameter\"", ")", "# =========================================================================", "# Delay, if necessary", "if", "self", ".", "_delay", "is", "not", "None", ":", "time", ".", "sleep", "(", "self", ".", "_delay", ")", "# =========================================================================", "# Run it!", "# =========================================================================", "self", ".", "_currentRecordIndex", "=", "0", "while", "True", ":", "# =========================================================================", "# Check if the model should be stopped", "# =========================================================================", "# If killed by a terminator, stop running", "if", "self", ".", "_isKilled", ":", "break", "# If job stops or hypersearch ends, stop running", "if", "self", ".", "_isCanceled", ":", "break", "# If model is mature, stop running ONLY IF we are not the best model", "# for the job. Otherwise, keep running so we can keep returning", "# predictions to the user", "if", "self", ".", "_isMature", ":", "if", "not", "self", ".", "_isBestModel", ":", "self", ".", "_cmpReason", "=", "self", ".", "_jobsDAO", ".", "CMPL_REASON_STOPPED", "break", "else", ":", "self", ".", "_cmpReason", "=", "self", ".", "_jobsDAO", ".", "CMPL_REASON_EOF", "# =========================================================================", "# Get the the next record, and \"write it\"", "# =========================================================================", "try", ":", "self", ".", "_currentRecordIndex", "=", "next", "(", "iterTracker", ")", "except", "StopIteration", ":", "break", "# \"Write\" a dummy output value. This is used to test that the batched", "# writing works properly", "self", ".", "_writePrediction", "(", "ModelResult", "(", "None", ",", "None", ",", "None", ",", "None", ")", ")", "periodic", ".", "tick", "(", ")", "# =========================================================================", "# Compute wait times. See if model should exit", "# =========================================================================", "if", "self", ".", "__shouldSysExit", "(", "self", ".", "_currentRecordIndex", ")", ":", "sys", ".", "exit", "(", "1", ")", "# Simulate computation time", "if", "self", ".", "_busyWaitTime", "is", "not", "None", ":", "time", ".", "sleep", "(", "self", ".", "_busyWaitTime", ")", "self", ".", "__computeWaitTime", "(", ")", "# Asked to abort after so many iterations?", "if", "doSysExit", ":", "sys", ".", "exit", "(", "1", ")", "# Asked to raise a jobFailException?", "if", "self", ".", "_jobFailErr", ":", "raise", "utils", ".", "JobFailException", "(", "\"E10000\"", ",", "\"dummyModel's jobFailErr was True.\"", ")", "# =========================================================================", "# Handle final operations", "# =========================================================================", "if", "self", ".", "_doFinalize", ":", "if", "not", "self", ".", "_makeCheckpoint", ":", "self", ".", "_model", "=", "None", "# Delay finalization operation", "if", "self", ".", "_finalDelay", "is", "not", "None", ":", "time", ".", "sleep", "(", "self", ".", "_finalDelay", ")", "self", ".", "_finalize", "(", ")", "self", ".", "_logger", ".", "info", "(", "\"Finished: modelID=%r \"", "%", "(", "self", ".", "_modelID", ")", ")", "return", "(", "self", ".", "_cmpReason", ",", "None", ")" ]
Runs the given OPF task against the given Model instance
[ "Runs", "the", "given", "OPF", "task", "against", "the", "given", "Model", "instance" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L411-L551
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner._createPredictionLogger
def _createPredictionLogger(self): """ Creates the model's PredictionLogger object, which is an interface to write model results to a permanent storage location """ class DummyLogger: def writeRecord(self, record): pass def writeRecords(self, records, progressCB): pass def close(self): pass self._predictionLogger = DummyLogger()
python
def _createPredictionLogger(self): """ Creates the model's PredictionLogger object, which is an interface to write model results to a permanent storage location """ class DummyLogger: def writeRecord(self, record): pass def writeRecords(self, records, progressCB): pass def close(self): pass self._predictionLogger = DummyLogger()
[ "def", "_createPredictionLogger", "(", "self", ")", ":", "class", "DummyLogger", ":", "def", "writeRecord", "(", "self", ",", "record", ")", ":", "pass", "def", "writeRecords", "(", "self", ",", "records", ",", "progressCB", ")", ":", "pass", "def", "close", "(", "self", ")", ":", "pass", "self", ".", "_predictionLogger", "=", "DummyLogger", "(", ")" ]
Creates the model's PredictionLogger object, which is an interface to write model results to a permanent storage location
[ "Creates", "the", "model", "s", "PredictionLogger", "object", "which", "is", "an", "interface", "to", "write", "model", "results", "to", "a", "permanent", "storage", "location" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L575-L586
valid
numenta/nupic
src/nupic/swarming/dummy_model_runner.py
OPFDummyModelRunner.__shouldSysExit
def __shouldSysExit(self, iteration): """ Checks to see if the model should exit based on the exitAfter dummy parameter """ if self._exitAfter is None \ or iteration < self._exitAfter: return False results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params']) modelIDs = [e[0] for e in results] modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results] sameModelNumbers = filter(lambda x: x[1] == self.modelIndex, zip(modelIDs, modelNums)) firstModelID = min(zip(*sameModelNumbers)[0]) return firstModelID == self._modelID
python
def __shouldSysExit(self, iteration): """ Checks to see if the model should exit based on the exitAfter dummy parameter """ if self._exitAfter is None \ or iteration < self._exitAfter: return False results = self._jobsDAO.modelsGetFieldsForJob(self._jobID, ['params']) modelIDs = [e[0] for e in results] modelNums = [json.loads(e[1][0])['structuredParams']['__model_num'] for e in results] sameModelNumbers = filter(lambda x: x[1] == self.modelIndex, zip(modelIDs, modelNums)) firstModelID = min(zip(*sameModelNumbers)[0]) return firstModelID == self._modelID
[ "def", "__shouldSysExit", "(", "self", ",", "iteration", ")", ":", "if", "self", ".", "_exitAfter", "is", "None", "or", "iteration", "<", "self", ".", "_exitAfter", ":", "return", "False", "results", "=", "self", ".", "_jobsDAO", ".", "modelsGetFieldsForJob", "(", "self", ".", "_jobID", ",", "[", "'params'", "]", ")", "modelIDs", "=", "[", "e", "[", "0", "]", "for", "e", "in", "results", "]", "modelNums", "=", "[", "json", ".", "loads", "(", "e", "[", "1", "]", "[", "0", "]", ")", "[", "'structuredParams'", "]", "[", "'__model_num'", "]", "for", "e", "in", "results", "]", "sameModelNumbers", "=", "filter", "(", "lambda", "x", ":", "x", "[", "1", "]", "==", "self", ".", "modelIndex", ",", "zip", "(", "modelIDs", ",", "modelNums", ")", ")", "firstModelID", "=", "min", "(", "zip", "(", "*", "sameModelNumbers", ")", "[", "0", "]", ")", "return", "firstModelID", "==", "self", ".", "_modelID" ]
Checks to see if the model should exit based on the exitAfter dummy parameter
[ "Checks", "to", "see", "if", "the", "model", "should", "exit", "based", "on", "the", "exitAfter", "dummy", "parameter" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/dummy_model_runner.py#L589-L609
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getDescription
def getDescription(self): """Returns a description of the dataset""" description = {'name':self.name, 'fields':[f.name for f in self.fields], \ 'numRecords by field':[f.numRecords for f in self.fields]} return description
python
def getDescription(self): """Returns a description of the dataset""" description = {'name':self.name, 'fields':[f.name for f in self.fields], \ 'numRecords by field':[f.numRecords for f in self.fields]} return description
[ "def", "getDescription", "(", "self", ")", ":", "description", "=", "{", "'name'", ":", "self", ".", "name", ",", "'fields'", ":", "[", "f", ".", "name", "for", "f", "in", "self", ".", "fields", "]", ",", "'numRecords by field'", ":", "[", "f", ".", "numRecords", "for", "f", "in", "self", ".", "fields", "]", "}", "return", "description" ]
Returns a description of the dataset
[ "Returns", "a", "description", "of", "the", "dataset" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L50-L56
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.setSeed
def setSeed(self, seed): """Set the random seed and the numpy seed Parameters: -------------------------------------------------------------------- seed: random seed """ rand.seed(seed) np.random.seed(seed)
python
def setSeed(self, seed): """Set the random seed and the numpy seed Parameters: -------------------------------------------------------------------- seed: random seed """ rand.seed(seed) np.random.seed(seed)
[ "def", "setSeed", "(", "self", ",", "seed", ")", ":", "rand", ".", "seed", "(", "seed", ")", "np", ".", "random", ".", "seed", "(", "seed", ")" ]
Set the random seed and the numpy seed Parameters: -------------------------------------------------------------------- seed: random seed
[ "Set", "the", "random", "seed", "and", "the", "numpy", "seed", "Parameters", ":", "--------------------------------------------------------------------", "seed", ":", "random", "seed" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L59-L67
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.addField
def addField(self, name, fieldParams, encoderParams): """Add a single field to the dataset. Parameters: ------------------------------------------------------------------- name: The user-specified name of the field fieldSpec: A list of one or more dictionaries specifying parameters to be used for dataClass initialization. Each dict must contain the key 'type' that specifies a distribution for the values in this field encoderParams: Parameters for the field encoder """ assert fieldParams is not None and'type' in fieldParams dataClassName = fieldParams.pop('type') try: dataClass=eval(dataClassName)(fieldParams) except TypeError, e: print ("#### Error in constructing %s class object. Possibly missing " "some required constructor parameters. Parameters " "that were provided are: %s" % (dataClass, fieldParams)) raise encoderParams['dataClass']=dataClass encoderParams['dataClassName']=dataClassName fieldIndex = self.defineField(name, encoderParams)
python
def addField(self, name, fieldParams, encoderParams): """Add a single field to the dataset. Parameters: ------------------------------------------------------------------- name: The user-specified name of the field fieldSpec: A list of one or more dictionaries specifying parameters to be used for dataClass initialization. Each dict must contain the key 'type' that specifies a distribution for the values in this field encoderParams: Parameters for the field encoder """ assert fieldParams is not None and'type' in fieldParams dataClassName = fieldParams.pop('type') try: dataClass=eval(dataClassName)(fieldParams) except TypeError, e: print ("#### Error in constructing %s class object. Possibly missing " "some required constructor parameters. Parameters " "that were provided are: %s" % (dataClass, fieldParams)) raise encoderParams['dataClass']=dataClass encoderParams['dataClassName']=dataClassName fieldIndex = self.defineField(name, encoderParams)
[ "def", "addField", "(", "self", ",", "name", ",", "fieldParams", ",", "encoderParams", ")", ":", "assert", "fieldParams", "is", "not", "None", "and", "'type'", "in", "fieldParams", "dataClassName", "=", "fieldParams", ".", "pop", "(", "'type'", ")", "try", ":", "dataClass", "=", "eval", "(", "dataClassName", ")", "(", "fieldParams", ")", "except", "TypeError", ",", "e", ":", "print", "(", "\"#### Error in constructing %s class object. Possibly missing \"", "\"some required constructor parameters. Parameters \"", "\"that were provided are: %s\"", "%", "(", "dataClass", ",", "fieldParams", ")", ")", "raise", "encoderParams", "[", "'dataClass'", "]", "=", "dataClass", "encoderParams", "[", "'dataClassName'", "]", "=", "dataClassName", "fieldIndex", "=", "self", ".", "defineField", "(", "name", ",", "encoderParams", ")" ]
Add a single field to the dataset. Parameters: ------------------------------------------------------------------- name: The user-specified name of the field fieldSpec: A list of one or more dictionaries specifying parameters to be used for dataClass initialization. Each dict must contain the key 'type' that specifies a distribution for the values in this field encoderParams: Parameters for the field encoder
[ "Add", "a", "single", "field", "to", "the", "dataset", ".", "Parameters", ":", "-------------------------------------------------------------------", "name", ":", "The", "user", "-", "specified", "name", "of", "the", "field", "fieldSpec", ":", "A", "list", "of", "one", "or", "more", "dictionaries", "specifying", "parameters", "to", "be", "used", "for", "dataClass", "initialization", ".", "Each", "dict", "must", "contain", "the", "key", "type", "that", "specifies", "a", "distribution", "for", "the", "values", "in", "this", "field", "encoderParams", ":", "Parameters", "for", "the", "field", "encoder" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L70-L97
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.addMultipleFields
def addMultipleFields(self, fieldsInfo): """Add multiple fields to the dataset. Parameters: ------------------------------------------------------------------- fieldsInfo: A list of dictionaries, containing a field name, specs for the data classes and encoder params for the corresponding field. """ assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \ in fieldsInfo) for spec in fieldsInfo: self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
python
def addMultipleFields(self, fieldsInfo): """Add multiple fields to the dataset. Parameters: ------------------------------------------------------------------- fieldsInfo: A list of dictionaries, containing a field name, specs for the data classes and encoder params for the corresponding field. """ assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \ in fieldsInfo) for spec in fieldsInfo: self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
[ "def", "addMultipleFields", "(", "self", ",", "fieldsInfo", ")", ":", "assert", "all", "(", "x", "in", "field", "for", "x", "in", "[", "'name'", ",", "'fieldSpec'", ",", "'encoderParams'", "]", "for", "field", "in", "fieldsInfo", ")", "for", "spec", "in", "fieldsInfo", ":", "self", ".", "addField", "(", "spec", ".", "pop", "(", "'name'", ")", ",", "spec", ".", "pop", "(", "'fieldSpec'", ")", ",", "spec", ".", "pop", "(", "'encoderParams'", ")", ")" ]
Add multiple fields to the dataset. Parameters: ------------------------------------------------------------------- fieldsInfo: A list of dictionaries, containing a field name, specs for the data classes and encoder params for the corresponding field.
[ "Add", "multiple", "fields", "to", "the", "dataset", ".", "Parameters", ":", "-------------------------------------------------------------------", "fieldsInfo", ":", "A", "list", "of", "dictionaries", "containing", "a", "field", "name", "specs", "for", "the", "data", "classes", "and", "encoder", "params", "for", "the", "corresponding", "field", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L100-L112
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.defineField
def defineField(self, name, encoderParams=None): """Initialize field using relevant encoder parameters. Parameters: ------------------------------------------------------------------- name: Field name encoderParams: Parameters for the encoder. Returns the index of the field """ self.fields.append(_field(name, encoderParams)) return len(self.fields)-1
python
def defineField(self, name, encoderParams=None): """Initialize field using relevant encoder parameters. Parameters: ------------------------------------------------------------------- name: Field name encoderParams: Parameters for the encoder. Returns the index of the field """ self.fields.append(_field(name, encoderParams)) return len(self.fields)-1
[ "def", "defineField", "(", "self", ",", "name", ",", "encoderParams", "=", "None", ")", ":", "self", ".", "fields", ".", "append", "(", "_field", "(", "name", ",", "encoderParams", ")", ")", "return", "len", "(", "self", ".", "fields", ")", "-", "1" ]
Initialize field using relevant encoder parameters. Parameters: ------------------------------------------------------------------- name: Field name encoderParams: Parameters for the encoder. Returns the index of the field
[ "Initialize", "field", "using", "relevant", "encoder", "parameters", ".", "Parameters", ":", "-------------------------------------------------------------------", "name", ":", "Field", "name", "encoderParams", ":", "Parameters", "for", "the", "encoder", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L115-L126
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.setFlag
def setFlag(self, index, flag): """Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character """ assert len(self.fields)>index self.fields[index].flag=flag
python
def setFlag(self, index, flag): """Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character """ assert len(self.fields)>index self.fields[index].flag=flag
[ "def", "setFlag", "(", "self", ",", "index", ",", "flag", ")", ":", "assert", "len", "(", "self", ".", "fields", ")", ">", "index", "self", ".", "fields", "[", "index", "]", ".", "flag", "=", "flag" ]
Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character
[ "Set", "flag", "for", "field", "at", "index", ".", "Flags", "are", "special", "characters", "such", "as", "S", "for", "sequence", "or", "T", "for", "timestamp", ".", "Parameters", ":", "--------------------------------------------------------------------", "index", ":", "index", "of", "field", "whose", "flag", "is", "being", "set", "flag", ":", "special", "character" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L129-L138
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.generateRecord
def generateRecord(self, record): """Generate a record. Each value is stored in its respective field. Parameters: -------------------------------------------------------------------- record: A 1-D array containing as many values as the number of fields fields: An object of the class field that specifies the characteristics of each value in the record Assertion: -------------------------------------------------------------------- len(record)==len(fields): A value for each field must be specified. Replace missing values of any type by SENTINEL_VALUE_FOR_MISSING_DATA This method supports external classes but not combination of classes. """ assert(len(record)==len(self.fields)) if record is not None: for x in range(len(self.fields)): self.fields[x].addValue(record[x]) else: for field in self.fields: field.addValue(field.dataClass.getNext())
python
def generateRecord(self, record): """Generate a record. Each value is stored in its respective field. Parameters: -------------------------------------------------------------------- record: A 1-D array containing as many values as the number of fields fields: An object of the class field that specifies the characteristics of each value in the record Assertion: -------------------------------------------------------------------- len(record)==len(fields): A value for each field must be specified. Replace missing values of any type by SENTINEL_VALUE_FOR_MISSING_DATA This method supports external classes but not combination of classes. """ assert(len(record)==len(self.fields)) if record is not None: for x in range(len(self.fields)): self.fields[x].addValue(record[x]) else: for field in self.fields: field.addValue(field.dataClass.getNext())
[ "def", "generateRecord", "(", "self", ",", "record", ")", ":", "assert", "(", "len", "(", "record", ")", "==", "len", "(", "self", ".", "fields", ")", ")", "if", "record", "is", "not", "None", ":", "for", "x", "in", "range", "(", "len", "(", "self", ".", "fields", ")", ")", ":", "self", ".", "fields", "[", "x", "]", ".", "addValue", "(", "record", "[", "x", "]", ")", "else", ":", "for", "field", "in", "self", ".", "fields", ":", "field", ".", "addValue", "(", "field", ".", "dataClass", ".", "getNext", "(", ")", ")" ]
Generate a record. Each value is stored in its respective field. Parameters: -------------------------------------------------------------------- record: A 1-D array containing as many values as the number of fields fields: An object of the class field that specifies the characteristics of each value in the record Assertion: -------------------------------------------------------------------- len(record)==len(fields): A value for each field must be specified. Replace missing values of any type by SENTINEL_VALUE_FOR_MISSING_DATA This method supports external classes but not combination of classes.
[ "Generate", "a", "record", ".", "Each", "value", "is", "stored", "in", "its", "respective", "field", ".", "Parameters", ":", "--------------------------------------------------------------------", "record", ":", "A", "1", "-", "D", "array", "containing", "as", "many", "values", "as", "the", "number", "of", "fields", "fields", ":", "An", "object", "of", "the", "class", "field", "that", "specifies", "the", "characteristics", "of", "each", "value", "in", "the", "record", "Assertion", ":", "--------------------------------------------------------------------", "len", "(", "record", ")", "==", "len", "(", "fields", ")", ":", "A", "value", "for", "each", "field", "must", "be", "specified", ".", "Replace", "missing", "values", "of", "any", "type", "by", "SENTINEL_VALUE_FOR_MISSING_DATA" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L141-L163
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.generateRecords
def generateRecords(self, records): """Generate multiple records. Refer to definition for generateRecord""" if self.verbosity>0: print 'Generating', len(records), 'records...' for record in records: self.generateRecord(record)
python
def generateRecords(self, records): """Generate multiple records. Refer to definition for generateRecord""" if self.verbosity>0: print 'Generating', len(records), 'records...' for record in records: self.generateRecord(record)
[ "def", "generateRecords", "(", "self", ",", "records", ")", ":", "if", "self", ".", "verbosity", ">", "0", ":", "print", "'Generating'", ",", "len", "(", "records", ")", ",", "'records...'", "for", "record", "in", "records", ":", "self", ".", "generateRecord", "(", "record", ")" ]
Generate multiple records. Refer to definition for generateRecord
[ "Generate", "multiple", "records", ".", "Refer", "to", "definition", "for", "generateRecord" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L166-L171
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getRecord
def getRecord(self, n=None): """Returns the nth record""" if n is None: assert len(self.fields)>0 n = self.fields[0].numRecords-1 assert (all(field.numRecords>n for field in self.fields)) record = [field.values[n] for field in self.fields] return record
python
def getRecord(self, n=None): """Returns the nth record""" if n is None: assert len(self.fields)>0 n = self.fields[0].numRecords-1 assert (all(field.numRecords>n for field in self.fields)) record = [field.values[n] for field in self.fields] return record
[ "def", "getRecord", "(", "self", ",", "n", "=", "None", ")", ":", "if", "n", "is", "None", ":", "assert", "len", "(", "self", ".", "fields", ")", ">", "0", "n", "=", "self", ".", "fields", "[", "0", "]", ".", "numRecords", "-", "1", "assert", "(", "all", "(", "field", ".", "numRecords", ">", "n", "for", "field", "in", "self", ".", "fields", ")", ")", "record", "=", "[", "field", ".", "values", "[", "n", "]", "for", "field", "in", "self", ".", "fields", "]", "return", "record" ]
Returns the nth record
[ "Returns", "the", "nth", "record" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L174-L185
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getAllRecords
def getAllRecords(self): """Returns all the records""" values=[] numRecords = self.fields[0].numRecords assert (all(field.numRecords==numRecords for field in self.fields)) for x in range(numRecords): values.append(self.getRecord(x)) return values
python
def getAllRecords(self): """Returns all the records""" values=[] numRecords = self.fields[0].numRecords assert (all(field.numRecords==numRecords for field in self.fields)) for x in range(numRecords): values.append(self.getRecord(x)) return values
[ "def", "getAllRecords", "(", "self", ")", ":", "values", "=", "[", "]", "numRecords", "=", "self", ".", "fields", "[", "0", "]", ".", "numRecords", "assert", "(", "all", "(", "field", ".", "numRecords", "==", "numRecords", "for", "field", "in", "self", ".", "fields", ")", ")", "for", "x", "in", "range", "(", "numRecords", ")", ":", "values", ".", "append", "(", "self", ".", "getRecord", "(", "x", ")", ")", "return", "values" ]
Returns all the records
[ "Returns", "all", "the", "records" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L188-L197
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.encodeRecord
def encodeRecord(self, record, toBeAdded=True): """Encode a record as a sparse distributed representation Parameters: -------------------------------------------------------------------- record: Record to be encoded toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields """ encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \ xrange(len(self.fields))] return encoding
python
def encodeRecord(self, record, toBeAdded=True): """Encode a record as a sparse distributed representation Parameters: -------------------------------------------------------------------- record: Record to be encoded toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields """ encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \ xrange(len(self.fields))] return encoding
[ "def", "encodeRecord", "(", "self", ",", "record", ",", "toBeAdded", "=", "True", ")", ":", "encoding", "=", "[", "self", ".", "fields", "[", "i", "]", ".", "encodeValue", "(", "record", "[", "i", "]", ",", "toBeAdded", ")", "for", "i", "in", "xrange", "(", "len", "(", "self", ".", "fields", ")", ")", "]", "return", "encoding" ]
Encode a record as a sparse distributed representation Parameters: -------------------------------------------------------------------- record: Record to be encoded toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields
[ "Encode", "a", "record", "as", "a", "sparse", "distributed", "representation", "Parameters", ":", "--------------------------------------------------------------------", "record", ":", "Record", "to", "be", "encoded", "toBeAdded", ":", "Whether", "the", "encodings", "corresponding", "to", "the", "record", "are", "added", "to", "the", "corresponding", "fields" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L200-L211
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.encodeAllRecords
def encodeAllRecords(self, records=None, toBeAdded=True): """Encodes a list of records. Parameters: -------------------------------------------------------------------- records: One or more records. (i,j)th element of this 2D array specifies the value at field j of record i. If unspecified, records previously generated and stored are used. toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields """ if records is None: records = self.getAllRecords() if self.verbosity>0: print 'Encoding', len(records), 'records.' encodings = [self.encodeRecord(record, toBeAdded) for record in records] return encodings
python
def encodeAllRecords(self, records=None, toBeAdded=True): """Encodes a list of records. Parameters: -------------------------------------------------------------------- records: One or more records. (i,j)th element of this 2D array specifies the value at field j of record i. If unspecified, records previously generated and stored are used. toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields """ if records is None: records = self.getAllRecords() if self.verbosity>0: print 'Encoding', len(records), 'records.' encodings = [self.encodeRecord(record, toBeAdded) for record in records] return encodings
[ "def", "encodeAllRecords", "(", "self", ",", "records", "=", "None", ",", "toBeAdded", "=", "True", ")", ":", "if", "records", "is", "None", ":", "records", "=", "self", ".", "getAllRecords", "(", ")", "if", "self", ".", "verbosity", ">", "0", ":", "print", "'Encoding'", ",", "len", "(", "records", ")", ",", "'records.'", "encodings", "=", "[", "self", ".", "encodeRecord", "(", "record", ",", "toBeAdded", ")", "for", "record", "in", "records", "]", "return", "encodings" ]
Encodes a list of records. Parameters: -------------------------------------------------------------------- records: One or more records. (i,j)th element of this 2D array specifies the value at field j of record i. If unspecified, records previously generated and stored are used. toBeAdded: Whether the encodings corresponding to the record are added to the corresponding fields
[ "Encodes", "a", "list", "of", "records", ".", "Parameters", ":", "--------------------------------------------------------------------", "records", ":", "One", "or", "more", "records", ".", "(", "i", "j", ")", "th", "element", "of", "this", "2D", "array", "specifies", "the", "value", "at", "field", "j", "of", "record", "i", ".", "If", "unspecified", "records", "previously", "generated", "and", "stored", "are", "used", ".", "toBeAdded", ":", "Whether", "the", "encodings", "corresponding", "to", "the", "record", "are", "added", "to", "the", "corresponding", "fields" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L214-L230
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.addValueToField
def addValueToField(self, i, value=None): """Add 'value' to the field i. Parameters: -------------------------------------------------------------------- value: value to be added i: value is added to field i """ assert(len(self.fields)>i) if value is None: value = self.fields[i].dataClass.getNext() self.fields[i].addValue(value) return value else: self.fields[i].addValue(value)
python
def addValueToField(self, i, value=None): """Add 'value' to the field i. Parameters: -------------------------------------------------------------------- value: value to be added i: value is added to field i """ assert(len(self.fields)>i) if value is None: value = self.fields[i].dataClass.getNext() self.fields[i].addValue(value) return value else: self.fields[i].addValue(value)
[ "def", "addValueToField", "(", "self", ",", "i", ",", "value", "=", "None", ")", ":", "assert", "(", "len", "(", "self", ".", "fields", ")", ">", "i", ")", "if", "value", "is", "None", ":", "value", "=", "self", ".", "fields", "[", "i", "]", ".", "dataClass", ".", "getNext", "(", ")", "self", ".", "fields", "[", "i", "]", ".", "addValue", "(", "value", ")", "return", "value", "else", ":", "self", ".", "fields", "[", "i", "]", ".", "addValue", "(", "value", ")" ]
Add 'value' to the field i. Parameters: -------------------------------------------------------------------- value: value to be added i: value is added to field i
[ "Add", "value", "to", "the", "field", "i", ".", "Parameters", ":", "--------------------------------------------------------------------", "value", ":", "value", "to", "be", "added", "i", ":", "value", "is", "added", "to", "field", "i" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L233-L247
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.addValuesToField
def addValuesToField(self, i, numValues): """Add values to the field i.""" assert(len(self.fields)>i) values = [self.addValueToField(i) for n in range(numValues)] return values
python
def addValuesToField(self, i, numValues): """Add values to the field i.""" assert(len(self.fields)>i) values = [self.addValueToField(i) for n in range(numValues)] return values
[ "def", "addValuesToField", "(", "self", ",", "i", ",", "numValues", ")", ":", "assert", "(", "len", "(", "self", ".", "fields", ")", ">", "i", ")", "values", "=", "[", "self", ".", "addValueToField", "(", "i", ")", "for", "n", "in", "range", "(", "numValues", ")", "]", "return", "values" ]
Add values to the field i.
[ "Add", "values", "to", "the", "field", "i", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L250-L255
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getSDRforValue
def getSDRforValue(self, i, j): """Returns the sdr for jth value at column i""" assert len(self.fields)>i assert self.fields[i].numRecords>j encoding = self.fields[i].encodings[j] return encoding
python
def getSDRforValue(self, i, j): """Returns the sdr for jth value at column i""" assert len(self.fields)>i assert self.fields[i].numRecords>j encoding = self.fields[i].encodings[j] return encoding
[ "def", "getSDRforValue", "(", "self", ",", "i", ",", "j", ")", ":", "assert", "len", "(", "self", ".", "fields", ")", ">", "i", "assert", "self", ".", "fields", "[", "i", "]", ".", "numRecords", ">", "j", "encoding", "=", "self", ".", "fields", "[", "i", "]", ".", "encodings", "[", "j", "]", "return", "encoding" ]
Returns the sdr for jth value at column i
[ "Returns", "the", "sdr", "for", "jth", "value", "at", "column", "i" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L258-L264
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getZeroedOutEncoding
def getZeroedOutEncoding(self, n): """Returns the nth encoding with the predictedField zeroed out""" assert all(field.numRecords>n for field in self.fields) encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\ if field.isPredictedField else field.encodings[n] for field in self.fields]) return encoding
python
def getZeroedOutEncoding(self, n): """Returns the nth encoding with the predictedField zeroed out""" assert all(field.numRecords>n for field in self.fields) encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\ if field.isPredictedField else field.encodings[n] for field in self.fields]) return encoding
[ "def", "getZeroedOutEncoding", "(", "self", ",", "n", ")", ":", "assert", "all", "(", "field", ".", "numRecords", ">", "n", "for", "field", "in", "self", ".", "fields", ")", "encoding", "=", "np", ".", "concatenate", "(", "[", "field", ".", "encoder", ".", "encode", "(", "SENTINEL_VALUE_FOR_MISSING_DATA", ")", "if", "field", ".", "isPredictedField", "else", "field", ".", "encodings", "[", "n", "]", "for", "field", "in", "self", ".", "fields", "]", ")", "return", "encoding" ]
Returns the nth encoding with the predictedField zeroed out
[ "Returns", "the", "nth", "encoding", "with", "the", "predictedField", "zeroed", "out" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L267-L275
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getTotaln
def getTotaln(self): """Returns the cumulative n for all the fields in the dataset""" n = sum([field.n for field in self.fields]) return n
python
def getTotaln(self): """Returns the cumulative n for all the fields in the dataset""" n = sum([field.n for field in self.fields]) return n
[ "def", "getTotaln", "(", "self", ")", ":", "n", "=", "sum", "(", "[", "field", ".", "n", "for", "field", "in", "self", ".", "fields", "]", ")", "return", "n" ]
Returns the cumulative n for all the fields in the dataset
[ "Returns", "the", "cumulative", "n", "for", "all", "the", "fields", "in", "the", "dataset" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L278-L282
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getTotalw
def getTotalw(self): """Returns the cumulative w for all the fields in the dataset""" w = sum([field.w for field in self.fields]) return w
python
def getTotalw(self): """Returns the cumulative w for all the fields in the dataset""" w = sum([field.w for field in self.fields]) return w
[ "def", "getTotalw", "(", "self", ")", ":", "w", "=", "sum", "(", "[", "field", ".", "w", "for", "field", "in", "self", ".", "fields", "]", ")", "return", "w" ]
Returns the cumulative w for all the fields in the dataset
[ "Returns", "the", "cumulative", "w", "for", "all", "the", "fields", "in", "the", "dataset" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L285-L289
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getEncoding
def getEncoding(self, n): """Returns the nth encoding""" assert (all(field.numEncodings>n for field in self.fields)) encoding = np.concatenate([field.encodings[n] for field in self.fields]) return encoding
python
def getEncoding(self, n): """Returns the nth encoding""" assert (all(field.numEncodings>n for field in self.fields)) encoding = np.concatenate([field.encodings[n] for field in self.fields]) return encoding
[ "def", "getEncoding", "(", "self", ",", "n", ")", ":", "assert", "(", "all", "(", "field", ".", "numEncodings", ">", "n", "for", "field", "in", "self", ".", "fields", ")", ")", "encoding", "=", "np", ".", "concatenate", "(", "[", "field", ".", "encodings", "[", "n", "]", "for", "field", "in", "self", ".", "fields", "]", ")", "return", "encoding" ]
Returns the nth encoding
[ "Returns", "the", "nth", "encoding" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L292-L298
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.getAllEncodings
def getAllEncodings(self): """Returns encodings for all the records""" numEncodings=self.fields[0].numEncodings assert (all(field.numEncodings==numEncodings for field in self.fields)) encodings = [self.getEncoding(index) for index in range(numEncodings)] return encodings
python
def getAllEncodings(self): """Returns encodings for all the records""" numEncodings=self.fields[0].numEncodings assert (all(field.numEncodings==numEncodings for field in self.fields)) encodings = [self.getEncoding(index) for index in range(numEncodings)] return encodings
[ "def", "getAllEncodings", "(", "self", ")", ":", "numEncodings", "=", "self", ".", "fields", "[", "0", "]", ".", "numEncodings", "assert", "(", "all", "(", "field", ".", "numEncodings", "==", "numEncodings", "for", "field", "in", "self", ".", "fields", ")", ")", "encodings", "=", "[", "self", ".", "getEncoding", "(", "index", ")", "for", "index", "in", "range", "(", "numEncodings", ")", "]", "return", "encodings" ]
Returns encodings for all the records
[ "Returns", "encodings", "for", "all", "the", "records" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L301-L308
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.saveRecords
def saveRecords(self, path='myOutput'): """Export all the records into a csv file in numenta format. Example header format: fieldName1 fieldName2 fieldName3 date string float T S Parameters: -------------------------------------------------------------------- path: Relative path of the file to which the records are to be exported """ numRecords = self.fields[0].numRecords assert (all(field.numRecords==numRecords for field in self.fields)) import csv with open(path+'.csv', 'wb') as f: writer = csv.writer(f) writer.writerow(self.getAllFieldNames()) writer.writerow(self.getAllDataTypes()) writer.writerow(self.getAllFlags()) writer.writerows(self.getAllRecords()) if self.verbosity>0: print '******', numRecords,'records exported in numenta format to file:',\ path,'******\n'
python
def saveRecords(self, path='myOutput'): """Export all the records into a csv file in numenta format. Example header format: fieldName1 fieldName2 fieldName3 date string float T S Parameters: -------------------------------------------------------------------- path: Relative path of the file to which the records are to be exported """ numRecords = self.fields[0].numRecords assert (all(field.numRecords==numRecords for field in self.fields)) import csv with open(path+'.csv', 'wb') as f: writer = csv.writer(f) writer.writerow(self.getAllFieldNames()) writer.writerow(self.getAllDataTypes()) writer.writerow(self.getAllFlags()) writer.writerows(self.getAllRecords()) if self.verbosity>0: print '******', numRecords,'records exported in numenta format to file:',\ path,'******\n'
[ "def", "saveRecords", "(", "self", ",", "path", "=", "'myOutput'", ")", ":", "numRecords", "=", "self", ".", "fields", "[", "0", "]", ".", "numRecords", "assert", "(", "all", "(", "field", ".", "numRecords", "==", "numRecords", "for", "field", "in", "self", ".", "fields", ")", ")", "import", "csv", "with", "open", "(", "path", "+", "'.csv'", ",", "'wb'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ")", "writer", ".", "writerow", "(", "self", ".", "getAllFieldNames", "(", ")", ")", "writer", ".", "writerow", "(", "self", ".", "getAllDataTypes", "(", ")", ")", "writer", ".", "writerow", "(", "self", ".", "getAllFlags", "(", ")", ")", "writer", ".", "writerows", "(", "self", ".", "getAllRecords", "(", ")", ")", "if", "self", ".", "verbosity", ">", "0", ":", "print", "'******'", ",", "numRecords", ",", "'records exported in numenta format to file:'", ",", "path", ",", "'******\\n'" ]
Export all the records into a csv file in numenta format. Example header format: fieldName1 fieldName2 fieldName3 date string float T S Parameters: -------------------------------------------------------------------- path: Relative path of the file to which the records are to be exported
[ "Export", "all", "the", "records", "into", "a", "csv", "file", "in", "numenta", "format", "." ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L339-L363
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
DataGenerator.removeAllRecords
def removeAllRecords(self): """Deletes all the values in the dataset""" for field in self.fields: field.encodings, field.values=[], [] field.numRecords, field.numEncodings= (0, 0)
python
def removeAllRecords(self): """Deletes all the values in the dataset""" for field in self.fields: field.encodings, field.values=[], [] field.numRecords, field.numEncodings= (0, 0)
[ "def", "removeAllRecords", "(", "self", ")", ":", "for", "field", "in", "self", ".", "fields", ":", "field", ".", "encodings", ",", "field", ".", "values", "=", "[", "]", ",", "[", "]", "field", ".", "numRecords", ",", "field", ".", "numEncodings", "=", "(", "0", ",", "0", ")" ]
Deletes all the values in the dataset
[ "Deletes", "all", "the", "values", "in", "the", "dataset" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L366-L371
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
_field.encodeValue
def encodeValue(self, value, toBeAdded=True): """Value is encoded as a sdr using the encoding parameters of the Field""" encodedValue = np.array(self.encoder.encode(value), dtype=realDType) if toBeAdded: self.encodings.append(encodedValue) self.numEncodings+=1 return encodedValue
python
def encodeValue(self, value, toBeAdded=True): """Value is encoded as a sdr using the encoding parameters of the Field""" encodedValue = np.array(self.encoder.encode(value), dtype=realDType) if toBeAdded: self.encodings.append(encodedValue) self.numEncodings+=1 return encodedValue
[ "def", "encodeValue", "(", "self", ",", "value", ",", "toBeAdded", "=", "True", ")", ":", "encodedValue", "=", "np", ".", "array", "(", "self", ".", "encoder", ".", "encode", "(", "value", ")", ",", "dtype", "=", "realDType", ")", "if", "toBeAdded", ":", "self", ".", "encodings", ".", "append", "(", "encodedValue", ")", "self", ".", "numEncodings", "+=", "1", "return", "encodedValue" ]
Value is encoded as a sdr using the encoding parameters of the Field
[ "Value", "is", "encoded", "as", "a", "sdr", "using", "the", "encoding", "parameters", "of", "the", "Field" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L444-L453
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
_field._setTypes
def _setTypes(self, encoderSpec): """Set up the dataTypes and initialize encoders""" if self.encoderType is None: if self.dataType in ['int','float']: self.encoderType='adaptiveScalar' elif self.dataType=='string': self.encoderType='category' elif self.dataType in ['date', 'datetime']: self.encoderType='date' if self.dataType is None: if self.encoderType in ['scalar','adaptiveScalar']: self.dataType='float' elif self.encoderType in ['category', 'enumeration']: self.dataType='string' elif self.encoderType in ['date', 'datetime']: self.dataType='datetime'
python
def _setTypes(self, encoderSpec): """Set up the dataTypes and initialize encoders""" if self.encoderType is None: if self.dataType in ['int','float']: self.encoderType='adaptiveScalar' elif self.dataType=='string': self.encoderType='category' elif self.dataType in ['date', 'datetime']: self.encoderType='date' if self.dataType is None: if self.encoderType in ['scalar','adaptiveScalar']: self.dataType='float' elif self.encoderType in ['category', 'enumeration']: self.dataType='string' elif self.encoderType in ['date', 'datetime']: self.dataType='datetime'
[ "def", "_setTypes", "(", "self", ",", "encoderSpec", ")", ":", "if", "self", ".", "encoderType", "is", "None", ":", "if", "self", ".", "dataType", "in", "[", "'int'", ",", "'float'", "]", ":", "self", ".", "encoderType", "=", "'adaptiveScalar'", "elif", "self", ".", "dataType", "==", "'string'", ":", "self", ".", "encoderType", "=", "'category'", "elif", "self", ".", "dataType", "in", "[", "'date'", ",", "'datetime'", "]", ":", "self", ".", "encoderType", "=", "'date'", "if", "self", ".", "dataType", "is", "None", ":", "if", "self", ".", "encoderType", "in", "[", "'scalar'", ",", "'adaptiveScalar'", "]", ":", "self", ".", "dataType", "=", "'float'", "elif", "self", ".", "encoderType", "in", "[", "'category'", ",", "'enumeration'", "]", ":", "self", ".", "dataType", "=", "'string'", "elif", "self", ".", "encoderType", "in", "[", "'date'", ",", "'datetime'", "]", ":", "self", ".", "dataType", "=", "'datetime'" ]
Set up the dataTypes and initialize encoders
[ "Set", "up", "the", "dataTypes", "and", "initialize", "encoders" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L456-L473
valid
numenta/nupic
src/nupic/data/generators/data_generator.py
_field._initializeEncoders
def _initializeEncoders(self, encoderSpec): """ Initialize the encoders""" #Initializing scalar encoder if self.encoderType in ['adaptiveScalar', 'scalar']: if 'minval' in encoderSpec: self.minval = encoderSpec.pop('minval') else: self.minval=None if 'maxval' in encoderSpec: self.maxval = encoderSpec.pop('maxval') else: self.maxval = None self.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \ w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True) #Initializing category encoder elif self.encoderType=='category': self.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \ w=self.w, n=self.n) #Initializing date encoder elif self.encoderType in ['date', 'datetime']: self.encoder=date.DateEncoder(name='dateEncoder') else: raise RuntimeError('Error in constructing class object. Either encoder type' 'or dataType must be specified')
python
def _initializeEncoders(self, encoderSpec): """ Initialize the encoders""" #Initializing scalar encoder if self.encoderType in ['adaptiveScalar', 'scalar']: if 'minval' in encoderSpec: self.minval = encoderSpec.pop('minval') else: self.minval=None if 'maxval' in encoderSpec: self.maxval = encoderSpec.pop('maxval') else: self.maxval = None self.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \ w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True) #Initializing category encoder elif self.encoderType=='category': self.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \ w=self.w, n=self.n) #Initializing date encoder elif self.encoderType in ['date', 'datetime']: self.encoder=date.DateEncoder(name='dateEncoder') else: raise RuntimeError('Error in constructing class object. Either encoder type' 'or dataType must be specified')
[ "def", "_initializeEncoders", "(", "self", ",", "encoderSpec", ")", ":", "#Initializing scalar encoder", "if", "self", ".", "encoderType", "in", "[", "'adaptiveScalar'", ",", "'scalar'", "]", ":", "if", "'minval'", "in", "encoderSpec", ":", "self", ".", "minval", "=", "encoderSpec", ".", "pop", "(", "'minval'", ")", "else", ":", "self", ".", "minval", "=", "None", "if", "'maxval'", "in", "encoderSpec", ":", "self", ".", "maxval", "=", "encoderSpec", ".", "pop", "(", "'maxval'", ")", "else", ":", "self", ".", "maxval", "=", "None", "self", ".", "encoder", "=", "adaptive_scalar", ".", "AdaptiveScalarEncoder", "(", "name", "=", "'AdaptiveScalarEncoder'", ",", "w", "=", "self", ".", "w", ",", "n", "=", "self", ".", "n", ",", "minval", "=", "self", ".", "minval", ",", "maxval", "=", "self", ".", "maxval", ",", "periodic", "=", "False", ",", "forced", "=", "True", ")", "#Initializing category encoder", "elif", "self", ".", "encoderType", "==", "'category'", ":", "self", ".", "encoder", "=", "sdr_category", ".", "SDRCategoryEncoder", "(", "name", "=", "'categoryEncoder'", ",", "w", "=", "self", ".", "w", ",", "n", "=", "self", ".", "n", ")", "#Initializing date encoder", "elif", "self", ".", "encoderType", "in", "[", "'date'", ",", "'datetime'", "]", ":", "self", ".", "encoder", "=", "date", ".", "DateEncoder", "(", "name", "=", "'dateEncoder'", ")", "else", ":", "raise", "RuntimeError", "(", "'Error in constructing class object. Either encoder type'", "'or dataType must be specified'", ")" ]
Initialize the encoders
[ "Initialize", "the", "encoders" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L476-L500
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.getScalars
def getScalars(self, input): """ See method description in base.py """ if input == SENTINEL_VALUE_FOR_MISSING_DATA: return numpy.array([None]) else: return numpy.array([self.categoryToIndex.get(input, 0)])
python
def getScalars(self, input): """ See method description in base.py """ if input == SENTINEL_VALUE_FOR_MISSING_DATA: return numpy.array([None]) else: return numpy.array([self.categoryToIndex.get(input, 0)])
[ "def", "getScalars", "(", "self", ",", "input", ")", ":", "if", "input", "==", "SENTINEL_VALUE_FOR_MISSING_DATA", ":", "return", "numpy", ".", "array", "(", "[", "None", "]", ")", "else", ":", "return", "numpy", ".", "array", "(", "[", "self", ".", "categoryToIndex", ".", "get", "(", "input", ",", "0", ")", "]", ")" ]
See method description in base.py
[ "See", "method", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L105-L110
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.getBucketIndices
def getBucketIndices(self, input): """ See method description in base.py """ # Get the bucket index from the underlying scalar encoder if input == SENTINEL_VALUE_FOR_MISSING_DATA: return [None] else: return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
python
def getBucketIndices(self, input): """ See method description in base.py """ # Get the bucket index from the underlying scalar encoder if input == SENTINEL_VALUE_FOR_MISSING_DATA: return [None] else: return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))
[ "def", "getBucketIndices", "(", "self", ",", "input", ")", ":", "# Get the bucket index from the underlying scalar encoder", "if", "input", "==", "SENTINEL_VALUE_FOR_MISSING_DATA", ":", "return", "[", "None", "]", "else", ":", "return", "self", ".", "encoder", ".", "getBucketIndices", "(", "self", ".", "categoryToIndex", ".", "get", "(", "input", ",", "0", ")", ")" ]
See method description in base.py
[ "See", "method", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L113-L120
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.decode
def decode(self, encoded, parentFieldName=''): """ See the function description in base.py """ # Get the scalar values from the underlying scalar encoder (fieldsDict, fieldNames) = self.encoder.decode(encoded) if len(fieldsDict) == 0: return (fieldsDict, fieldNames) # Expect only 1 field assert(len(fieldsDict) == 1) # Get the list of categories the scalar values correspond to and # generate the description from the category name(s). (inRanges, inDesc) = fieldsDict.values()[0] outRanges = [] desc = "" for (minV, maxV) in inRanges: minV = int(round(minV)) maxV = int(round(maxV)) outRanges.append((minV, maxV)) while minV <= maxV: if len(desc) > 0: desc += ", " desc += self.indexToCategory[minV] minV += 1 # Return result if parentFieldName != '': fieldName = "%s.%s" % (parentFieldName, self.name) else: fieldName = self.name return ({fieldName: (outRanges, desc)}, [fieldName])
python
def decode(self, encoded, parentFieldName=''): """ See the function description in base.py """ # Get the scalar values from the underlying scalar encoder (fieldsDict, fieldNames) = self.encoder.decode(encoded) if len(fieldsDict) == 0: return (fieldsDict, fieldNames) # Expect only 1 field assert(len(fieldsDict) == 1) # Get the list of categories the scalar values correspond to and # generate the description from the category name(s). (inRanges, inDesc) = fieldsDict.values()[0] outRanges = [] desc = "" for (minV, maxV) in inRanges: minV = int(round(minV)) maxV = int(round(maxV)) outRanges.append((minV, maxV)) while minV <= maxV: if len(desc) > 0: desc += ", " desc += self.indexToCategory[minV] minV += 1 # Return result if parentFieldName != '': fieldName = "%s.%s" % (parentFieldName, self.name) else: fieldName = self.name return ({fieldName: (outRanges, desc)}, [fieldName])
[ "def", "decode", "(", "self", ",", "encoded", ",", "parentFieldName", "=", "''", ")", ":", "# Get the scalar values from the underlying scalar encoder", "(", "fieldsDict", ",", "fieldNames", ")", "=", "self", ".", "encoder", ".", "decode", "(", "encoded", ")", "if", "len", "(", "fieldsDict", ")", "==", "0", ":", "return", "(", "fieldsDict", ",", "fieldNames", ")", "# Expect only 1 field", "assert", "(", "len", "(", "fieldsDict", ")", "==", "1", ")", "# Get the list of categories the scalar values correspond to and", "# generate the description from the category name(s).", "(", "inRanges", ",", "inDesc", ")", "=", "fieldsDict", ".", "values", "(", ")", "[", "0", "]", "outRanges", "=", "[", "]", "desc", "=", "\"\"", "for", "(", "minV", ",", "maxV", ")", "in", "inRanges", ":", "minV", "=", "int", "(", "round", "(", "minV", ")", ")", "maxV", "=", "int", "(", "round", "(", "maxV", ")", ")", "outRanges", ".", "append", "(", "(", "minV", ",", "maxV", ")", ")", "while", "minV", "<=", "maxV", ":", "if", "len", "(", "desc", ")", ">", "0", ":", "desc", "+=", "\", \"", "desc", "+=", "self", ".", "indexToCategory", "[", "minV", "]", "minV", "+=", "1", "# Return result", "if", "parentFieldName", "!=", "''", ":", "fieldName", "=", "\"%s.%s\"", "%", "(", "parentFieldName", ",", "self", ".", "name", ")", "else", ":", "fieldName", "=", "self", ".", "name", "return", "(", "{", "fieldName", ":", "(", "outRanges", ",", "desc", ")", "}", ",", "[", "fieldName", "]", ")" ]
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L137-L169
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.closenessScores
def closenessScores(self, expValues, actValues, fractional=True,): """ See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder """ expValue = expValues[0] actValue = actValues[0] if expValue == actValue: closeness = 1.0 else: closeness = 0.0 if not fractional: closeness = 1.0 - closeness return numpy.array([closeness])
python
def closenessScores(self, expValues, actValues, fractional=True,): """ See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder """ expValue = expValues[0] actValue = actValues[0] if expValue == actValue: closeness = 1.0 else: closeness = 0.0 if not fractional: closeness = 1.0 - closeness return numpy.array([closeness])
[ "def", "closenessScores", "(", "self", ",", "expValues", ",", "actValues", ",", "fractional", "=", "True", ",", ")", ":", "expValue", "=", "expValues", "[", "0", "]", "actValue", "=", "actValues", "[", "0", "]", "if", "expValue", "==", "actValue", ":", "closeness", "=", "1.0", "else", ":", "closeness", "=", "0.0", "if", "not", "fractional", ":", "closeness", "=", "1.0", "-", "closeness", "return", "numpy", ".", "array", "(", "[", "closeness", "]", ")" ]
See the function description in base.py kwargs will have the keyword "fractional", which is ignored by this encoder
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L172-L189
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.getBucketValues
def getBucketValues(self): """ See the function description in base.py """ if self._bucketValues is None: numBuckets = len(self.encoder.getBucketValues()) self._bucketValues = [] for bucketIndex in range(numBuckets): self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value) return self._bucketValues
python
def getBucketValues(self): """ See the function description in base.py """ if self._bucketValues is None: numBuckets = len(self.encoder.getBucketValues()) self._bucketValues = [] for bucketIndex in range(numBuckets): self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value) return self._bucketValues
[ "def", "getBucketValues", "(", "self", ")", ":", "if", "self", ".", "_bucketValues", "is", "None", ":", "numBuckets", "=", "len", "(", "self", ".", "encoder", ".", "getBucketValues", "(", ")", ")", "self", ".", "_bucketValues", "=", "[", "]", "for", "bucketIndex", "in", "range", "(", "numBuckets", ")", ":", "self", ".", "_bucketValues", ".", "append", "(", "self", ".", "getBucketInfo", "(", "[", "bucketIndex", "]", ")", "[", "0", "]", ".", "value", ")", "return", "self", ".", "_bucketValues" ]
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L192-L201
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.getBucketInfo
def getBucketInfo(self, buckets): """ See the function description in base.py """ # For the category encoder, the bucket index is the category index bucketInfo = self.encoder.getBucketInfo(buckets)[0] categoryIndex = int(round(bucketInfo.value)) category = self.indexToCategory[categoryIndex] return [EncoderResult(value=category, scalar=categoryIndex, encoding=bucketInfo.encoding)]
python
def getBucketInfo(self, buckets): """ See the function description in base.py """ # For the category encoder, the bucket index is the category index bucketInfo = self.encoder.getBucketInfo(buckets)[0] categoryIndex = int(round(bucketInfo.value)) category = self.indexToCategory[categoryIndex] return [EncoderResult(value=category, scalar=categoryIndex, encoding=bucketInfo.encoding)]
[ "def", "getBucketInfo", "(", "self", ",", "buckets", ")", ":", "# For the category encoder, the bucket index is the category index", "bucketInfo", "=", "self", ".", "encoder", ".", "getBucketInfo", "(", "buckets", ")", "[", "0", "]", "categoryIndex", "=", "int", "(", "round", "(", "bucketInfo", ".", "value", ")", ")", "category", "=", "self", ".", "indexToCategory", "[", "categoryIndex", "]", "return", "[", "EncoderResult", "(", "value", "=", "category", ",", "scalar", "=", "categoryIndex", ",", "encoding", "=", "bucketInfo", ".", "encoding", ")", "]" ]
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L204-L215
valid
numenta/nupic
src/nupic/encoders/category.py
CategoryEncoder.topDownCompute
def topDownCompute(self, encoded): """ See the function description in base.py """ encoderResult = self.encoder.topDownCompute(encoded)[0] value = encoderResult.value categoryIndex = int(round(value)) category = self.indexToCategory[categoryIndex] return EncoderResult(value=category, scalar=categoryIndex, encoding=encoderResult.encoding)
python
def topDownCompute(self, encoded): """ See the function description in base.py """ encoderResult = self.encoder.topDownCompute(encoded)[0] value = encoderResult.value categoryIndex = int(round(value)) category = self.indexToCategory[categoryIndex] return EncoderResult(value=category, scalar=categoryIndex, encoding=encoderResult.encoding)
[ "def", "topDownCompute", "(", "self", ",", "encoded", ")", ":", "encoderResult", "=", "self", ".", "encoder", ".", "topDownCompute", "(", "encoded", ")", "[", "0", "]", "value", "=", "encoderResult", ".", "value", "categoryIndex", "=", "int", "(", "round", "(", "value", ")", ")", "category", "=", "self", ".", "indexToCategory", "[", "categoryIndex", "]", "return", "EncoderResult", "(", "value", "=", "category", ",", "scalar", "=", "categoryIndex", ",", "encoding", "=", "encoderResult", ".", "encoding", ")" ]
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
5922fafffdccc8812e72b3324965ad2f7d4bbdad
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/category.py#L218-L228
valid