repo
stringclasses 679
values | path
stringlengths 6
122
| func_name
stringlengths 2
76
| original_string
stringlengths 87
70.9k
| language
stringclasses 1
value | code
stringlengths 87
70.9k
| code_tokens
sequencelengths 20
6.91k
| docstring
stringlengths 1
21.7k
| docstring_tokens
sequencelengths 1
1.6k
| sha
stringclasses 679
values | url
stringlengths 92
213
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
numenta/nupic | src/nupic/frameworks/opf/helpers.py | loadExperiment | def loadExperiment(path):
"""Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
descriptionPyModule = loadExperimentDescriptionScriptFromDir(path)
expIface = getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
return expIface.getModelDescription(), expIface.getModelControl() | python | def loadExperiment(path):
"""Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control)
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
descriptionPyModule = loadExperimentDescriptionScriptFromDir(path)
expIface = getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
return expIface.getModelDescription(), expIface.getModelControl() | [
"def",
"loadExperiment",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"descriptionPyModule",
"=",
"loadExperimentDescriptionScriptFromDir",
"(",
"path",
")",
"expIface",
"=",
"getExperimentDescriptionInterfaceFromModule",
"(",
"descriptionPyModule",
")",
"return",
"expIface",
".",
"getModelDescription",
"(",
")",
",",
"expIface",
".",
"getModelControl",
"(",
")"
] | Loads the experiment description file from the path.
:param path: (string) The path to a directory containing a description.py file
or the file itself.
:returns: (config, control) | [
"Loads",
"the",
"experiment",
"description",
"file",
"from",
"the",
"path",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L37-L48 | valid |
numenta/nupic | src/nupic/frameworks/opf/helpers.py | loadExperimentDescriptionScriptFromDir | def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, "description.py")
module = _loadDescriptionFile(descriptionScriptPath)
return module | python | def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, "description.py")
module = _loadDescriptionFile(descriptionScriptPath)
return module | [
"def",
"loadExperimentDescriptionScriptFromDir",
"(",
"experimentDir",
")",
":",
"descriptionScriptPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"experimentDir",
",",
"\"description.py\"",
")",
"module",
"=",
"_loadDescriptionFile",
"(",
"descriptionScriptPath",
")",
"return",
"module"
] | Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts | [
"Loads",
"the",
"experiment",
"description",
"python",
"script",
"from",
"the",
"given",
"experiment",
"directory",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L51-L61 | valid |
numenta/nupic | src/nupic/frameworks/opf/helpers.py | getExperimentDescriptionInterfaceFromModule | def getExperimentDescriptionInterfaceFromModule(module):
"""
:param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description
"""
result = module.descriptionInterface
assert isinstance(result, exp_description_api.DescriptionIface), \
"expected DescriptionIface-based instance, but got %s" % type(result)
return result | python | def getExperimentDescriptionInterfaceFromModule(module):
"""
:param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description
"""
result = module.descriptionInterface
assert isinstance(result, exp_description_api.DescriptionIface), \
"expected DescriptionIface-based instance, but got %s" % type(result)
return result | [
"def",
"getExperimentDescriptionInterfaceFromModule",
"(",
"module",
")",
":",
"result",
"=",
"module",
".",
"descriptionInterface",
"assert",
"isinstance",
"(",
"result",
",",
"exp_description_api",
".",
"DescriptionIface",
")",
",",
"\"expected DescriptionIface-based instance, but got %s\"",
"%",
"type",
"(",
"result",
")",
"return",
"result"
] | :param module: imported description.py module
:returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`)
represents the experiment description | [
":",
"param",
"module",
":",
"imported",
"description",
".",
"py",
"module"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L64-L75 | valid |
numenta/nupic | src/nupic/frameworks/opf/helpers.py | _loadDescriptionFile | def _loadDescriptionFile(descriptionPyPath):
"""Loads a description file and returns it as a module.
descriptionPyPath: path of description.py file to load
"""
global g_descriptionImportCount
if not os.path.isfile(descriptionPyPath):
raise RuntimeError(("Experiment description file %s does not exist or " + \
"is not a file") % (descriptionPyPath,))
mod = imp.load_source("pf_description%d" % g_descriptionImportCount,
descriptionPyPath)
g_descriptionImportCount += 1
if not hasattr(mod, "descriptionInterface"):
raise RuntimeError("Experiment description file %s does not define %s" % \
(descriptionPyPath, "descriptionInterface"))
if not isinstance(mod.descriptionInterface, exp_description_api.DescriptionIface):
raise RuntimeError(("Experiment description file %s defines %s but it " + \
"is not DescriptionIface-based") % \
(descriptionPyPath, name))
return mod | python | def _loadDescriptionFile(descriptionPyPath):
"""Loads a description file and returns it as a module.
descriptionPyPath: path of description.py file to load
"""
global g_descriptionImportCount
if not os.path.isfile(descriptionPyPath):
raise RuntimeError(("Experiment description file %s does not exist or " + \
"is not a file") % (descriptionPyPath,))
mod = imp.load_source("pf_description%d" % g_descriptionImportCount,
descriptionPyPath)
g_descriptionImportCount += 1
if not hasattr(mod, "descriptionInterface"):
raise RuntimeError("Experiment description file %s does not define %s" % \
(descriptionPyPath, "descriptionInterface"))
if not isinstance(mod.descriptionInterface, exp_description_api.DescriptionIface):
raise RuntimeError(("Experiment description file %s defines %s but it " + \
"is not DescriptionIface-based") % \
(descriptionPyPath, name))
return mod | [
"def",
"_loadDescriptionFile",
"(",
"descriptionPyPath",
")",
":",
"global",
"g_descriptionImportCount",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"descriptionPyPath",
")",
":",
"raise",
"RuntimeError",
"(",
"(",
"\"Experiment description file %s does not exist or \"",
"+",
"\"is not a file\"",
")",
"%",
"(",
"descriptionPyPath",
",",
")",
")",
"mod",
"=",
"imp",
".",
"load_source",
"(",
"\"pf_description%d\"",
"%",
"g_descriptionImportCount",
",",
"descriptionPyPath",
")",
"g_descriptionImportCount",
"+=",
"1",
"if",
"not",
"hasattr",
"(",
"mod",
",",
"\"descriptionInterface\"",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Experiment description file %s does not define %s\"",
"%",
"(",
"descriptionPyPath",
",",
"\"descriptionInterface\"",
")",
")",
"if",
"not",
"isinstance",
"(",
"mod",
".",
"descriptionInterface",
",",
"exp_description_api",
".",
"DescriptionIface",
")",
":",
"raise",
"RuntimeError",
"(",
"(",
"\"Experiment description file %s defines %s but it \"",
"+",
"\"is not DescriptionIface-based\"",
")",
"%",
"(",
"descriptionPyPath",
",",
"name",
")",
")",
"return",
"mod"
] | Loads a description file and returns it as a module.
descriptionPyPath: path of description.py file to load | [
"Loads",
"a",
"description",
"file",
"and",
"returns",
"it",
"as",
"a",
"module",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/helpers.py#L80-L104 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.update | def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore | python | def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore | [
"def",
"update",
"(",
"self",
",",
"modelID",
",",
"modelParams",
",",
"modelParamsHash",
",",
"metricResult",
",",
"completed",
",",
"completionReason",
",",
"matured",
",",
"numRecords",
")",
":",
"# The modelParamsHash must always be provided - it can change after a",
"# model is inserted into the models table if it got detected as an",
"# orphan",
"assert",
"(",
"modelParamsHash",
"is",
"not",
"None",
")",
"# We consider a model metricResult as \"final\" if it has completed or",
"# matured. By default, assume anything that has completed has matured",
"if",
"completed",
":",
"matured",
"=",
"True",
"# Get the canonicalized optimize metric results. For this metric, lower",
"# is always better",
"if",
"metricResult",
"is",
"not",
"None",
"and",
"matured",
"and",
"completionReason",
"in",
"[",
"ClientJobsDAO",
".",
"CMPL_REASON_EOF",
",",
"ClientJobsDAO",
".",
"CMPL_REASON_STOPPED",
"]",
":",
"# Canonicalize the error score so that lower is better",
"if",
"self",
".",
"_hsObj",
".",
"_maximize",
":",
"errScore",
"=",
"-",
"1",
"*",
"metricResult",
"else",
":",
"errScore",
"=",
"metricResult",
"if",
"errScore",
"<",
"self",
".",
"_bestResult",
":",
"self",
".",
"_bestResult",
"=",
"errScore",
"self",
".",
"_bestModelID",
"=",
"modelID",
"self",
".",
"_hsObj",
".",
"logger",
".",
"info",
"(",
"\"New best model after %d evaluations: errScore \"",
"\"%g on model %s\"",
"%",
"(",
"len",
"(",
"self",
".",
"_allResults",
")",
",",
"self",
".",
"_bestResult",
",",
"self",
".",
"_bestModelID",
")",
")",
"else",
":",
"errScore",
"=",
"numpy",
".",
"inf",
"# If this model completed with an unacceptable completion reason, set the",
"# errScore to infinite and essentially make this model invisible to",
"# further queries",
"if",
"completed",
"and",
"completionReason",
"in",
"[",
"ClientJobsDAO",
".",
"CMPL_REASON_ORPHAN",
"]",
":",
"errScore",
"=",
"numpy",
".",
"inf",
"hidden",
"=",
"True",
"else",
":",
"hidden",
"=",
"False",
"# Update our set of erred models and completed models. These are used",
"# to determine if we should abort the search because of too many errors",
"if",
"completed",
":",
"self",
".",
"_completedModels",
".",
"add",
"(",
"modelID",
")",
"self",
".",
"_numCompletedModels",
"=",
"len",
"(",
"self",
".",
"_completedModels",
")",
"if",
"completionReason",
"==",
"ClientJobsDAO",
".",
"CMPL_REASON_ERROR",
":",
"self",
".",
"_errModels",
".",
"add",
"(",
"modelID",
")",
"self",
".",
"_numErrModels",
"=",
"len",
"(",
"self",
".",
"_errModels",
")",
"# Are we creating a new entry?",
"wasHidden",
"=",
"False",
"if",
"modelID",
"not",
"in",
"self",
".",
"_modelIDToIdx",
":",
"assert",
"(",
"modelParams",
"is",
"not",
"None",
")",
"entry",
"=",
"dict",
"(",
"modelID",
"=",
"modelID",
",",
"modelParams",
"=",
"modelParams",
",",
"modelParamsHash",
"=",
"modelParamsHash",
",",
"errScore",
"=",
"errScore",
",",
"completed",
"=",
"completed",
",",
"matured",
"=",
"matured",
",",
"numRecords",
"=",
"numRecords",
",",
"hidden",
"=",
"hidden",
")",
"self",
".",
"_allResults",
".",
"append",
"(",
"entry",
")",
"entryIdx",
"=",
"len",
"(",
"self",
".",
"_allResults",
")",
"-",
"1",
"self",
".",
"_modelIDToIdx",
"[",
"modelID",
"]",
"=",
"entryIdx",
"self",
".",
"_paramsHashToIndexes",
"[",
"modelParamsHash",
"]",
"=",
"entryIdx",
"swarmId",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'swarmId'",
"]",
"if",
"not",
"hidden",
":",
"# Update the list of particles in each swarm",
"if",
"swarmId",
"in",
"self",
".",
"_swarmIdToIndexes",
":",
"self",
".",
"_swarmIdToIndexes",
"[",
"swarmId",
"]",
".",
"append",
"(",
"entryIdx",
")",
"else",
":",
"self",
".",
"_swarmIdToIndexes",
"[",
"swarmId",
"]",
"=",
"[",
"entryIdx",
"]",
"# Update number of particles at each generation in this swarm",
"genIdx",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'genIdx'",
"]",
"numPsEntry",
"=",
"self",
".",
"_swarmNumParticlesPerGeneration",
".",
"get",
"(",
"swarmId",
",",
"[",
"0",
"]",
")",
"while",
"genIdx",
">=",
"len",
"(",
"numPsEntry",
")",
":",
"numPsEntry",
".",
"append",
"(",
"0",
")",
"numPsEntry",
"[",
"genIdx",
"]",
"+=",
"1",
"self",
".",
"_swarmNumParticlesPerGeneration",
"[",
"swarmId",
"]",
"=",
"numPsEntry",
"# Replacing an existing one",
"else",
":",
"entryIdx",
"=",
"self",
".",
"_modelIDToIdx",
".",
"get",
"(",
"modelID",
",",
"None",
")",
"assert",
"(",
"entryIdx",
"is",
"not",
"None",
")",
"entry",
"=",
"self",
".",
"_allResults",
"[",
"entryIdx",
"]",
"wasHidden",
"=",
"entry",
"[",
"'hidden'",
"]",
"# If the paramsHash changed, note that. This can happen for orphaned",
"# models",
"if",
"entry",
"[",
"'modelParamsHash'",
"]",
"!=",
"modelParamsHash",
":",
"self",
".",
"_paramsHashToIndexes",
".",
"pop",
"(",
"entry",
"[",
"'modelParamsHash'",
"]",
")",
"self",
".",
"_paramsHashToIndexes",
"[",
"modelParamsHash",
"]",
"=",
"entryIdx",
"entry",
"[",
"'modelParamsHash'",
"]",
"=",
"modelParamsHash",
"# Get the model params, swarmId, and genIdx",
"modelParams",
"=",
"entry",
"[",
"'modelParams'",
"]",
"swarmId",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'swarmId'",
"]",
"genIdx",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'genIdx'",
"]",
"# If this particle just became hidden, remove it from our swarm counts",
"if",
"hidden",
"and",
"not",
"wasHidden",
":",
"assert",
"(",
"entryIdx",
"in",
"self",
".",
"_swarmIdToIndexes",
"[",
"swarmId",
"]",
")",
"self",
".",
"_swarmIdToIndexes",
"[",
"swarmId",
"]",
".",
"remove",
"(",
"entryIdx",
")",
"self",
".",
"_swarmNumParticlesPerGeneration",
"[",
"swarmId",
"]",
"[",
"genIdx",
"]",
"-=",
"1",
"# Update the entry for the latest info",
"entry",
"[",
"'errScore'",
"]",
"=",
"errScore",
"entry",
"[",
"'completed'",
"]",
"=",
"completed",
"entry",
"[",
"'matured'",
"]",
"=",
"matured",
"entry",
"[",
"'numRecords'",
"]",
"=",
"numRecords",
"entry",
"[",
"'hidden'",
"]",
"=",
"hidden",
"# Update the particle best errScore",
"particleId",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'id'",
"]",
"genIdx",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'genIdx'",
"]",
"if",
"matured",
"and",
"not",
"hidden",
":",
"(",
"oldResult",
",",
"pos",
")",
"=",
"self",
".",
"_particleBest",
".",
"get",
"(",
"particleId",
",",
"(",
"numpy",
".",
"inf",
",",
"None",
")",
")",
"if",
"errScore",
"<",
"oldResult",
":",
"pos",
"=",
"Particle",
".",
"getPositionFromState",
"(",
"modelParams",
"[",
"'particleState'",
"]",
")",
"self",
".",
"_particleBest",
"[",
"particleId",
"]",
"=",
"(",
"errScore",
",",
"pos",
")",
"# Update the particle latest generation index",
"prevGenIdx",
"=",
"self",
".",
"_particleLatestGenIdx",
".",
"get",
"(",
"particleId",
",",
"-",
"1",
")",
"if",
"not",
"hidden",
"and",
"genIdx",
">",
"prevGenIdx",
":",
"self",
".",
"_particleLatestGenIdx",
"[",
"particleId",
"]",
"=",
"genIdx",
"elif",
"hidden",
"and",
"not",
"wasHidden",
"and",
"genIdx",
"==",
"prevGenIdx",
":",
"self",
".",
"_particleLatestGenIdx",
"[",
"particleId",
"]",
"=",
"genIdx",
"-",
"1",
"# Update the swarm best score",
"if",
"not",
"hidden",
":",
"swarmId",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'swarmId'",
"]",
"if",
"not",
"swarmId",
"in",
"self",
".",
"_swarmBestOverall",
":",
"self",
".",
"_swarmBestOverall",
"[",
"swarmId",
"]",
"=",
"[",
"]",
"bestScores",
"=",
"self",
".",
"_swarmBestOverall",
"[",
"swarmId",
"]",
"while",
"genIdx",
">=",
"len",
"(",
"bestScores",
")",
":",
"bestScores",
".",
"append",
"(",
"(",
"None",
",",
"numpy",
".",
"inf",
")",
")",
"if",
"errScore",
"<",
"bestScores",
"[",
"genIdx",
"]",
"[",
"1",
"]",
":",
"bestScores",
"[",
"genIdx",
"]",
"=",
"(",
"modelID",
",",
"errScore",
")",
"# Update the self._modifiedSwarmGens flags to support the",
"# getMaturedSwarmGenerations() call.",
"if",
"not",
"hidden",
":",
"key",
"=",
"(",
"swarmId",
",",
"genIdx",
")",
"if",
"not",
"key",
"in",
"self",
".",
"_maturedSwarmGens",
":",
"self",
".",
"_modifiedSwarmGens",
".",
"add",
"(",
"key",
")",
"return",
"errScore"
] | Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric | [
"Insert",
"a",
"new",
"entry",
"or",
"update",
"an",
"existing",
"one",
".",
"If",
"this",
"is",
"an",
"update",
"of",
"an",
"existing",
"entry",
"then",
"modelParams",
"will",
"be",
"None"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L134-L308 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getModelIDFromParamsHash | def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None | python | def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None | [
"def",
"getModelIDFromParamsHash",
"(",
"self",
",",
"paramsHash",
")",
":",
"entryIdx",
"=",
"self",
".",
"_paramsHashToIndexes",
".",
"get",
"(",
"paramsHash",
",",
"None",
")",
"if",
"entryIdx",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_allResults",
"[",
"entryIdx",
"]",
"[",
"'modelID'",
"]",
"else",
":",
"return",
"None"
] | Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found | [
"Return",
"the",
"modelID",
"of",
"the",
"model",
"with",
"the",
"given",
"paramsHash",
"or",
"None",
"if",
"not",
"found",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L337-L350 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.numModels | def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']]) | python | def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']]) | [
"def",
"numModels",
"(",
"self",
",",
"swarmId",
"=",
"None",
",",
"includeHidden",
"=",
"False",
")",
":",
"# Count all models",
"if",
"includeHidden",
":",
"if",
"swarmId",
"is",
"None",
":",
"return",
"len",
"(",
"self",
".",
"_allResults",
")",
"else",
":",
"return",
"len",
"(",
"self",
".",
"_swarmIdToIndexes",
".",
"get",
"(",
"swarmId",
",",
"[",
"]",
")",
")",
"# Only count non-hidden models",
"else",
":",
"if",
"swarmId",
"is",
"None",
":",
"entries",
"=",
"self",
".",
"_allResults",
"else",
":",
"entries",
"=",
"[",
"self",
".",
"_allResults",
"[",
"entryIdx",
"]",
"for",
"entryIdx",
"in",
"self",
".",
"_swarmIdToIndexes",
".",
"get",
"(",
"swarmId",
",",
"[",
"]",
")",
"]",
"return",
"len",
"(",
"[",
"entry",
"for",
"entry",
"in",
"entries",
"if",
"not",
"entry",
"[",
"'hidden'",
"]",
"]",
")"
] | Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels | [
"Return",
"the",
"total",
"#",
"of",
"models",
"we",
"have",
"in",
"our",
"database",
"(",
"if",
"swarmId",
"is",
"None",
")",
"or",
"in",
"a",
"specific",
"swarm",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L352-L379 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.bestModelIdAndErrScore | def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore) | python | def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore) | [
"def",
"bestModelIdAndErrScore",
"(",
"self",
",",
"swarmId",
"=",
"None",
",",
"genIdx",
"=",
"None",
")",
":",
"if",
"swarmId",
"is",
"None",
":",
"return",
"(",
"self",
".",
"_bestModelID",
",",
"self",
".",
"_bestResult",
")",
"else",
":",
"if",
"swarmId",
"not",
"in",
"self",
".",
"_swarmBestOverall",
":",
"return",
"(",
"None",
",",
"numpy",
".",
"inf",
")",
"# Get the best score, considering the appropriate generations",
"genScores",
"=",
"self",
".",
"_swarmBestOverall",
"[",
"swarmId",
"]",
"bestModelId",
"=",
"None",
"bestScore",
"=",
"numpy",
".",
"inf",
"for",
"(",
"i",
",",
"(",
"modelId",
",",
"errScore",
")",
")",
"in",
"enumerate",
"(",
"genScores",
")",
":",
"if",
"genIdx",
"is",
"not",
"None",
"and",
"i",
">",
"genIdx",
":",
"break",
"if",
"errScore",
"<",
"bestScore",
":",
"bestScore",
"=",
"errScore",
"bestModelId",
"=",
"modelId",
"return",
"(",
"bestModelId",
",",
"bestScore",
")"
] | Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result) | [
"Return",
"the",
"model",
"ID",
"of",
"the",
"model",
"with",
"the",
"best",
"result",
"so",
"far",
"and",
"it",
"s",
"score",
"on",
"the",
"optimize",
"metric",
".",
"If",
"swarm",
"is",
"None",
"then",
"it",
"returns",
"the",
"global",
"best",
"otherwise",
"it",
"returns",
"the",
"best",
"for",
"the",
"given",
"swarm",
"for",
"all",
"generatons",
"up",
"to",
"and",
"including",
"genIdx",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L381-L415 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getParticleInfo | def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured']) | python | def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured']) | [
"def",
"getParticleInfo",
"(",
"self",
",",
"modelId",
")",
":",
"entry",
"=",
"self",
".",
"_allResults",
"[",
"self",
".",
"_modelIDToIdx",
"[",
"modelId",
"]",
"]",
"return",
"(",
"entry",
"[",
"'modelParams'",
"]",
"[",
"'particleState'",
"]",
",",
"modelId",
",",
"entry",
"[",
"'errScore'",
"]",
",",
"entry",
"[",
"'completed'",
"]",
",",
"entry",
"[",
"'matured'",
"]",
")"
] | Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured) | [
"Return",
"particle",
"info",
"for",
"a",
"specific",
"modelId",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L417-L428 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getParticleInfos | def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags) | python | def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags) | [
"def",
"getParticleInfos",
"(",
"self",
",",
"swarmId",
"=",
"None",
",",
"genIdx",
"=",
"None",
",",
"completed",
"=",
"None",
",",
"matured",
"=",
"None",
",",
"lastDescendent",
"=",
"False",
")",
":",
"# The indexes of all the models in this swarm. This list excludes hidden",
"# (orphaned) models.",
"if",
"swarmId",
"is",
"not",
"None",
":",
"entryIdxs",
"=",
"self",
".",
"_swarmIdToIndexes",
".",
"get",
"(",
"swarmId",
",",
"[",
"]",
")",
"else",
":",
"entryIdxs",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"_allResults",
")",
")",
"if",
"len",
"(",
"entryIdxs",
")",
"==",
"0",
":",
"return",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"# Get the particles of interest",
"particleStates",
"=",
"[",
"]",
"modelIds",
"=",
"[",
"]",
"errScores",
"=",
"[",
"]",
"completedFlags",
"=",
"[",
"]",
"maturedFlags",
"=",
"[",
"]",
"for",
"idx",
"in",
"entryIdxs",
":",
"entry",
"=",
"self",
".",
"_allResults",
"[",
"idx",
"]",
"# If this entry is hidden (i.e. it was an orphaned model), it should",
"# not be in this list",
"if",
"swarmId",
"is",
"not",
"None",
":",
"assert",
"(",
"not",
"entry",
"[",
"'hidden'",
"]",
")",
"# Get info on this model",
"modelParams",
"=",
"entry",
"[",
"'modelParams'",
"]",
"isCompleted",
"=",
"entry",
"[",
"'completed'",
"]",
"isMatured",
"=",
"entry",
"[",
"'matured'",
"]",
"particleState",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"particleGenIdx",
"=",
"particleState",
"[",
"'genIdx'",
"]",
"particleId",
"=",
"particleState",
"[",
"'id'",
"]",
"if",
"genIdx",
"is",
"not",
"None",
"and",
"particleGenIdx",
"!=",
"genIdx",
":",
"continue",
"if",
"completed",
"is",
"not",
"None",
"and",
"(",
"completed",
"!=",
"isCompleted",
")",
":",
"continue",
"if",
"matured",
"is",
"not",
"None",
"and",
"(",
"matured",
"!=",
"isMatured",
")",
":",
"continue",
"if",
"lastDescendent",
"and",
"(",
"self",
".",
"_particleLatestGenIdx",
"[",
"particleId",
"]",
"!=",
"particleGenIdx",
")",
":",
"continue",
"# Incorporate into return values",
"particleStates",
".",
"append",
"(",
"particleState",
")",
"modelIds",
".",
"append",
"(",
"entry",
"[",
"'modelID'",
"]",
")",
"errScores",
".",
"append",
"(",
"entry",
"[",
"'errScore'",
"]",
")",
"completedFlags",
".",
"append",
"(",
"isCompleted",
")",
"maturedFlags",
".",
"append",
"(",
"isMatured",
")",
"return",
"(",
"particleStates",
",",
"modelIds",
",",
"errScores",
",",
"completedFlags",
",",
"maturedFlags",
")"
] | Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans | [
"Return",
"a",
"list",
"of",
"particleStates",
"for",
"all",
"particles",
"we",
"know",
"about",
"in",
"the",
"given",
"swarm",
"their",
"model",
"Ids",
"and",
"metric",
"results",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L431-L516 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getOrphanParticleInfos | def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags) | python | def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags) | [
"def",
"getOrphanParticleInfos",
"(",
"self",
",",
"swarmId",
",",
"genIdx",
")",
":",
"entryIdxs",
"=",
"range",
"(",
"len",
"(",
"self",
".",
"_allResults",
")",
")",
"if",
"len",
"(",
"entryIdxs",
")",
"==",
"0",
":",
"return",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"# Get the particles of interest",
"particleStates",
"=",
"[",
"]",
"modelIds",
"=",
"[",
"]",
"errScores",
"=",
"[",
"]",
"completedFlags",
"=",
"[",
"]",
"maturedFlags",
"=",
"[",
"]",
"for",
"idx",
"in",
"entryIdxs",
":",
"# Get info on this model",
"entry",
"=",
"self",
".",
"_allResults",
"[",
"idx",
"]",
"if",
"not",
"entry",
"[",
"'hidden'",
"]",
":",
"continue",
"modelParams",
"=",
"entry",
"[",
"'modelParams'",
"]",
"if",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'swarmId'",
"]",
"!=",
"swarmId",
":",
"continue",
"isCompleted",
"=",
"entry",
"[",
"'completed'",
"]",
"isMatured",
"=",
"entry",
"[",
"'matured'",
"]",
"particleState",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"particleGenIdx",
"=",
"particleState",
"[",
"'genIdx'",
"]",
"particleId",
"=",
"particleState",
"[",
"'id'",
"]",
"if",
"genIdx",
"is",
"not",
"None",
"and",
"particleGenIdx",
"!=",
"genIdx",
":",
"continue",
"# Incorporate into return values",
"particleStates",
".",
"append",
"(",
"particleState",
")",
"modelIds",
".",
"append",
"(",
"entry",
"[",
"'modelID'",
"]",
")",
"errScores",
".",
"append",
"(",
"entry",
"[",
"'errScore'",
"]",
")",
"completedFlags",
".",
"append",
"(",
"isCompleted",
")",
"maturedFlags",
".",
"append",
"(",
"isMatured",
")",
"return",
"(",
"particleStates",
",",
"modelIds",
",",
"errScores",
",",
"completedFlags",
",",
"maturedFlags",
")"
] | Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans | [
"Return",
"a",
"list",
"of",
"particleStates",
"for",
"all",
"particles",
"in",
"the",
"given",
"swarm",
"generation",
"that",
"have",
"been",
"orphaned",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L520-L578 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getMaturedSwarmGenerations | def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result | python | def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result | [
"def",
"getMaturedSwarmGenerations",
"(",
"self",
")",
":",
"# Return results go in this list",
"result",
"=",
"[",
"]",
"# For each of the swarm generations which have had model result updates",
"# since the last time we were called, see which have completed.",
"modifiedSwarmGens",
"=",
"sorted",
"(",
"self",
".",
"_modifiedSwarmGens",
")",
"# Walk through them in order from lowest to highest generation index",
"for",
"key",
"in",
"modifiedSwarmGens",
":",
"(",
"swarmId",
",",
"genIdx",
")",
"=",
"key",
"# Skip it if we've already reported on it. This should happen rarely, if",
"# ever. It means that some worker has started and completed a model in",
"# this generation after we've determined that the generation has ended.",
"if",
"key",
"in",
"self",
".",
"_maturedSwarmGens",
":",
"self",
".",
"_modifiedSwarmGens",
".",
"remove",
"(",
"key",
")",
"continue",
"# If the previous generation for this swarm is not complete yet, don't",
"# bother evaluating this one.",
"if",
"(",
"genIdx",
">=",
"1",
")",
"and",
"not",
"(",
"swarmId",
",",
"genIdx",
"-",
"1",
")",
"in",
"self",
".",
"_maturedSwarmGens",
":",
"continue",
"# We found a swarm generation that had some results reported since last",
"# time, see if it's complete or not",
"(",
"_",
",",
"_",
",",
"errScores",
",",
"completedFlags",
",",
"maturedFlags",
")",
"=",
"self",
".",
"getParticleInfos",
"(",
"swarmId",
",",
"genIdx",
")",
"maturedFlags",
"=",
"numpy",
".",
"array",
"(",
"maturedFlags",
")",
"numMatured",
"=",
"maturedFlags",
".",
"sum",
"(",
")",
"if",
"numMatured",
">=",
"self",
".",
"_hsObj",
".",
"_minParticlesPerSwarm",
"and",
"numMatured",
"==",
"len",
"(",
"maturedFlags",
")",
":",
"errScores",
"=",
"numpy",
".",
"array",
"(",
"errScores",
")",
"bestScore",
"=",
"errScores",
".",
"min",
"(",
")",
"self",
".",
"_maturedSwarmGens",
".",
"add",
"(",
"key",
")",
"self",
".",
"_modifiedSwarmGens",
".",
"remove",
"(",
"key",
")",
"result",
".",
"append",
"(",
"(",
"swarmId",
",",
"genIdx",
",",
"bestScore",
")",
")",
"# Return results",
"return",
"result"
] | Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore) | [
"Return",
"a",
"list",
"of",
"swarm",
"generations",
"that",
"have",
"completed",
"and",
"the",
"best",
"(",
"minimal",
")",
"errScore",
"seen",
"for",
"each",
"of",
"them",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L581-L630 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.firstNonFullGeneration | def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0] | python | def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0] | [
"def",
"firstNonFullGeneration",
"(",
"self",
",",
"swarmId",
",",
"minNumParticles",
")",
":",
"if",
"not",
"swarmId",
"in",
"self",
".",
"_swarmNumParticlesPerGeneration",
":",
"return",
"None",
"numPsPerGen",
"=",
"self",
".",
"_swarmNumParticlesPerGeneration",
"[",
"swarmId",
"]",
"numPsPerGen",
"=",
"numpy",
".",
"array",
"(",
"numPsPerGen",
")",
"firstNonFull",
"=",
"numpy",
".",
"where",
"(",
"numPsPerGen",
"<",
"minNumParticles",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"firstNonFull",
")",
"==",
"0",
":",
"return",
"len",
"(",
"numPsPerGen",
")",
"else",
":",
"return",
"firstNonFull",
"[",
"0",
"]"
] | Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all. | [
"Return",
"the",
"generation",
"index",
"of",
"the",
"first",
"generation",
"in",
"the",
"given",
"swarm",
"that",
"does",
"not",
"have",
"numParticles",
"particles",
"in",
"it",
"either",
"still",
"in",
"the",
"running",
"state",
"or",
"completed",
".",
"This",
"does",
"not",
"include",
"orphaned",
"particles",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L632-L657 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | ResultsDB.getResultsPerChoice | def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results | python | def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results | [
"def",
"getResultsPerChoice",
"(",
"self",
",",
"swarmId",
",",
"maxGenIdx",
",",
"varName",
")",
":",
"results",
"=",
"dict",
"(",
")",
"# Get all the completed particles in this swarm",
"(",
"allParticles",
",",
"_",
",",
"resultErrs",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"getParticleInfos",
"(",
"swarmId",
",",
"genIdx",
"=",
"None",
",",
"matured",
"=",
"True",
")",
"for",
"particleState",
",",
"resultErr",
"in",
"itertools",
".",
"izip",
"(",
"allParticles",
",",
"resultErrs",
")",
":",
"# Consider this generation?",
"if",
"maxGenIdx",
"is",
"not",
"None",
":",
"if",
"particleState",
"[",
"'genIdx'",
"]",
">",
"maxGenIdx",
":",
"continue",
"# Ignore unless this model completed successfully",
"if",
"resultErr",
"==",
"numpy",
".",
"inf",
":",
"continue",
"position",
"=",
"Particle",
".",
"getPositionFromState",
"(",
"particleState",
")",
"varPosition",
"=",
"position",
"[",
"varName",
"]",
"varPositionStr",
"=",
"str",
"(",
"varPosition",
")",
"if",
"varPositionStr",
"in",
"results",
":",
"results",
"[",
"varPositionStr",
"]",
"[",
"1",
"]",
".",
"append",
"(",
"resultErr",
")",
"else",
":",
"results",
"[",
"varPositionStr",
"]",
"=",
"(",
"varPosition",
",",
"[",
"resultErr",
"]",
")",
"return",
"results"
] | Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice. | [
"Return",
"a",
"dict",
"of",
"the",
"errors",
"obtained",
"on",
"models",
"that",
"were",
"run",
"with",
"each",
"value",
"from",
"a",
"PermuteChoice",
"variable",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L683-L730 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._getStreamDef | def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef | python | def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef | [
"def",
"_getStreamDef",
"(",
"self",
",",
"modelDescription",
")",
":",
"#--------------------------------------------------------------------------",
"# Generate the string containing the aggregation settings.",
"aggregationPeriod",
"=",
"{",
"'days'",
":",
"0",
",",
"'hours'",
":",
"0",
",",
"'microseconds'",
":",
"0",
",",
"'milliseconds'",
":",
"0",
",",
"'minutes'",
":",
"0",
",",
"'months'",
":",
"0",
",",
"'seconds'",
":",
"0",
",",
"'weeks'",
":",
"0",
",",
"'years'",
":",
"0",
",",
"}",
"# Honor any overrides provided in the stream definition",
"aggFunctionsDict",
"=",
"{",
"}",
"if",
"'aggregation'",
"in",
"modelDescription",
"[",
"'streamDef'",
"]",
":",
"for",
"key",
"in",
"aggregationPeriod",
".",
"keys",
"(",
")",
":",
"if",
"key",
"in",
"modelDescription",
"[",
"'streamDef'",
"]",
"[",
"'aggregation'",
"]",
":",
"aggregationPeriod",
"[",
"key",
"]",
"=",
"modelDescription",
"[",
"'streamDef'",
"]",
"[",
"'aggregation'",
"]",
"[",
"key",
"]",
"if",
"'fields'",
"in",
"modelDescription",
"[",
"'streamDef'",
"]",
"[",
"'aggregation'",
"]",
":",
"for",
"(",
"fieldName",
",",
"func",
")",
"in",
"modelDescription",
"[",
"'streamDef'",
"]",
"[",
"'aggregation'",
"]",
"[",
"'fields'",
"]",
":",
"aggFunctionsDict",
"[",
"fieldName",
"]",
"=",
"str",
"(",
"func",
")",
"# Do we have any aggregation at all?",
"hasAggregation",
"=",
"False",
"for",
"v",
"in",
"aggregationPeriod",
".",
"values",
"(",
")",
":",
"if",
"v",
"!=",
"0",
":",
"hasAggregation",
"=",
"True",
"break",
"# Convert the aggFunctionsDict to a list",
"aggFunctionList",
"=",
"aggFunctionsDict",
".",
"items",
"(",
")",
"aggregationInfo",
"=",
"dict",
"(",
"aggregationPeriod",
")",
"aggregationInfo",
"[",
"'fields'",
"]",
"=",
"aggFunctionList",
"streamDef",
"=",
"copy",
".",
"deepcopy",
"(",
"modelDescription",
"[",
"'streamDef'",
"]",
")",
"streamDef",
"[",
"'aggregation'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"aggregationInfo",
")",
"return",
"streamDef"
] | Generate stream definition based on | [
"Generate",
"stream",
"definition",
"based",
"on"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1101-L1143 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2.close | def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return | python | def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tempDir",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"_tempDir",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Removing temporary directory %r\"",
",",
"self",
".",
"_tempDir",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"_tempDir",
")",
"self",
".",
"_tempDir",
"=",
"None",
"return"
] | Deletes temporary system objects/files. | [
"Deletes",
"temporary",
"system",
"objects",
"/",
"files",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1153-L1160 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._readPermutationsFile | def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations) | python | def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations) | [
"def",
"_readPermutationsFile",
"(",
"self",
",",
"filename",
",",
"modelDescription",
")",
":",
"# Open and execute the permutations file",
"vars",
"=",
"{",
"}",
"permFile",
"=",
"execfile",
"(",
"filename",
",",
"globals",
"(",
")",
",",
"vars",
")",
"# Read in misc info.",
"self",
".",
"_reportKeys",
"=",
"vars",
".",
"get",
"(",
"'report'",
",",
"[",
"]",
")",
"self",
".",
"_filterFunc",
"=",
"vars",
".",
"get",
"(",
"'permutationFilter'",
",",
"None",
")",
"self",
".",
"_dummyModelParamsFunc",
"=",
"vars",
".",
"get",
"(",
"'dummyModelParams'",
",",
"None",
")",
"self",
".",
"_predictedField",
"=",
"None",
"# default",
"self",
".",
"_predictedFieldEncoder",
"=",
"None",
"# default",
"self",
".",
"_fixedFields",
"=",
"None",
"# default",
"# The fastSwarm variable, if present, contains the params from a best",
"# model from a previous swarm. If present, use info from that to seed",
"# a fast swarm",
"self",
".",
"_fastSwarmModelParams",
"=",
"vars",
".",
"get",
"(",
"'fastSwarmModelParams'",
",",
"None",
")",
"if",
"self",
".",
"_fastSwarmModelParams",
"is",
"not",
"None",
":",
"encoders",
"=",
"self",
".",
"_fastSwarmModelParams",
"[",
"'structuredParams'",
"]",
"[",
"'modelParams'",
"]",
"[",
"'sensorParams'",
"]",
"[",
"'encoders'",
"]",
"self",
".",
"_fixedFields",
"=",
"[",
"]",
"for",
"fieldName",
"in",
"encoders",
":",
"if",
"encoders",
"[",
"fieldName",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_fixedFields",
".",
"append",
"(",
"fieldName",
")",
"if",
"'fixedFields'",
"in",
"vars",
":",
"self",
".",
"_fixedFields",
"=",
"vars",
"[",
"'fixedFields'",
"]",
"# Get min number of particles per swarm from either permutations file or",
"# config.",
"self",
".",
"_minParticlesPerSwarm",
"=",
"vars",
".",
"get",
"(",
"'minParticlesPerSwarm'",
")",
"if",
"self",
".",
"_minParticlesPerSwarm",
"==",
"None",
":",
"self",
".",
"_minParticlesPerSwarm",
"=",
"Configuration",
".",
"get",
"(",
"'nupic.hypersearch.minParticlesPerSwarm'",
")",
"self",
".",
"_minParticlesPerSwarm",
"=",
"int",
"(",
"self",
".",
"_minParticlesPerSwarm",
")",
"# Enable logic to kill off speculative swarms when an earlier sprint",
"# has found that it contains poorly performing field combination?",
"self",
".",
"_killUselessSwarms",
"=",
"vars",
".",
"get",
"(",
"'killUselessSwarms'",
",",
"True",
")",
"# The caller can request that the predicted field ALWAYS be included (\"yes\")",
"# or optionally include (\"auto\"). The setting of \"no\" is N/A and ignored",
"# because in that case the encoder for the predicted field will not even",
"# be present in the permutations file.",
"# When set to \"yes\", this will force the first sprint to try the predicted",
"# field only (the legacy mode of swarming).",
"# When set to \"auto\", the first sprint tries all possible fields (one at a",
"# time) in the first sprint.",
"self",
".",
"_inputPredictedField",
"=",
"vars",
".",
"get",
"(",
"\"inputPredictedField\"",
",",
"\"yes\"",
")",
"# Try all possible 3-field combinations? Normally, we start with the best",
"# 2-field combination as a base. When this flag is set though, we try",
"# all possible 3-field combinations which takes longer but can find a",
"# better model.",
"self",
".",
"_tryAll3FieldCombinations",
"=",
"vars",
".",
"get",
"(",
"'tryAll3FieldCombinations'",
",",
"False",
")",
"# Always include timestamp fields in the 3-field swarms?",
"# This is a less compute intensive version of tryAll3FieldCombinations.",
"# Instead of trying ALL possible 3 field combinations, it just insures",
"# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left",
"# out when generating the 3-field swarms.",
"self",
".",
"_tryAll3FieldCombinationsWTimestamps",
"=",
"vars",
".",
"get",
"(",
"'tryAll3FieldCombinationsWTimestamps'",
",",
"False",
")",
"# Allow the permutations file to override minFieldContribution. This would",
"# be set to a negative number for large swarms so that you don't disqualify",
"# a field in an early sprint just because it did poorly there. Sometimes,",
"# a field that did poorly in an early sprint could help accuracy when",
"# added in a later sprint",
"minFieldContribution",
"=",
"vars",
".",
"get",
"(",
"'minFieldContribution'",
",",
"None",
")",
"if",
"minFieldContribution",
"is",
"not",
"None",
":",
"self",
".",
"_minFieldContribution",
"=",
"minFieldContribution",
"# Allow the permutations file to override maxBranching.",
"maxBranching",
"=",
"vars",
".",
"get",
"(",
"'maxFieldBranching'",
",",
"None",
")",
"if",
"maxBranching",
"is",
"not",
"None",
":",
"self",
".",
"_maxBranching",
"=",
"maxBranching",
"# Read in the optimization info.",
"if",
"'maximize'",
"in",
"vars",
":",
"self",
".",
"_optimizeKey",
"=",
"vars",
"[",
"'maximize'",
"]",
"self",
".",
"_maximize",
"=",
"True",
"elif",
"'minimize'",
"in",
"vars",
":",
"self",
".",
"_optimizeKey",
"=",
"vars",
"[",
"'minimize'",
"]",
"self",
".",
"_maximize",
"=",
"False",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Permutations file '%s' does not include a maximize\"",
"\" or minimize metric.\"",
")",
"# The permutations file is the new location for maxModels. The old location,",
"# in the jobParams is deprecated.",
"maxModels",
"=",
"vars",
".",
"get",
"(",
"'maxModels'",
")",
"if",
"maxModels",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_maxModels",
"is",
"None",
":",
"self",
".",
"_maxModels",
"=",
"maxModels",
"else",
":",
"raise",
"RuntimeError",
"(",
"'It is an error to specify maxModels both in the job'",
"' params AND in the permutations file.'",
")",
"# Figure out if what kind of search this is:",
"#",
"# If it's a temporal prediction search:",
"# the first sprint has 1 swarm, with just the predicted field",
"# elif it's a spatial prediction search:",
"# the first sprint has N swarms, each with predicted field + one",
"# other field.",
"# elif it's a classification search:",
"# the first sprint has N swarms, each with 1 field",
"inferenceType",
"=",
"modelDescription",
"[",
"'modelParams'",
"]",
"[",
"'inferenceType'",
"]",
"if",
"not",
"InferenceType",
".",
"validate",
"(",
"inferenceType",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid inference type %s\"",
"%",
"inferenceType",
")",
"if",
"inferenceType",
"in",
"[",
"InferenceType",
".",
"TemporalMultiStep",
",",
"InferenceType",
".",
"NontemporalMultiStep",
"]",
":",
"# If it does not have a separate encoder for the predicted field that",
"# goes to the classifier, it is a legacy multi-step network",
"classifierOnlyEncoder",
"=",
"None",
"for",
"encoder",
"in",
"modelDescription",
"[",
"\"modelParams\"",
"]",
"[",
"\"sensorParams\"",
"]",
"[",
"\"encoders\"",
"]",
".",
"values",
"(",
")",
":",
"if",
"encoder",
".",
"get",
"(",
"\"classifierOnly\"",
",",
"False",
")",
"and",
"encoder",
"[",
"\"fieldname\"",
"]",
"==",
"vars",
".",
"get",
"(",
"'predictedField'",
",",
"None",
")",
":",
"classifierOnlyEncoder",
"=",
"encoder",
"break",
"if",
"classifierOnlyEncoder",
"is",
"None",
"or",
"self",
".",
"_inputPredictedField",
"==",
"\"yes\"",
":",
"# If we don't have a separate encoder for the classifier (legacy",
"# MultiStep) or the caller explicitly wants to include the predicted",
"# field, then use the legacy temporal search methodology.",
"self",
".",
"_searchType",
"=",
"HsSearchType",
".",
"legacyTemporal",
"else",
":",
"self",
".",
"_searchType",
"=",
"HsSearchType",
".",
"temporal",
"elif",
"inferenceType",
"in",
"[",
"InferenceType",
".",
"TemporalNextStep",
",",
"InferenceType",
".",
"TemporalAnomaly",
"]",
":",
"self",
".",
"_searchType",
"=",
"HsSearchType",
".",
"legacyTemporal",
"elif",
"inferenceType",
"in",
"(",
"InferenceType",
".",
"TemporalClassification",
",",
"InferenceType",
".",
"NontemporalClassification",
")",
":",
"self",
".",
"_searchType",
"=",
"HsSearchType",
".",
"classification",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unsupported inference type: %s\"",
"%",
"inferenceType",
")",
"# Get the predicted field. Note that even classification experiments",
"# have a \"predicted\" field - which is the field that contains the",
"# classification value.",
"self",
".",
"_predictedField",
"=",
"vars",
".",
"get",
"(",
"'predictedField'",
",",
"None",
")",
"if",
"self",
".",
"_predictedField",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Permutations file '%s' does not have the required\"",
"\" 'predictedField' variable\"",
"%",
"filename",
")",
"# Read in and validate the permutations dict",
"if",
"'permutations'",
"not",
"in",
"vars",
":",
"raise",
"RuntimeError",
"(",
"\"Permutations file '%s' does not define permutations\"",
"%",
"filename",
")",
"if",
"not",
"isinstance",
"(",
"vars",
"[",
"'permutations'",
"]",
",",
"dict",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Permutations file '%s' defines a permutations variable \"",
"\"but it is not a dict\"",
")",
"self",
".",
"_encoderNames",
"=",
"[",
"]",
"self",
".",
"_permutations",
"=",
"vars",
"[",
"'permutations'",
"]",
"self",
".",
"_flattenedPermutations",
"=",
"dict",
"(",
")",
"def",
"_flattenPermutations",
"(",
"value",
",",
"keys",
")",
":",
"if",
"':'",
"in",
"keys",
"[",
"-",
"1",
"]",
":",
"raise",
"RuntimeError",
"(",
"\"The permutation variable '%s' contains a ':' \"",
"\"character, which is not allowed.\"",
")",
"flatKey",
"=",
"_flattenKeys",
"(",
"keys",
")",
"if",
"isinstance",
"(",
"value",
",",
"PermuteEncoder",
")",
":",
"self",
".",
"_encoderNames",
".",
"append",
"(",
"flatKey",
")",
"# If this is the encoder for the predicted field, save its name.",
"if",
"value",
".",
"fieldName",
"==",
"self",
".",
"_predictedField",
":",
"self",
".",
"_predictedFieldEncoder",
"=",
"flatKey",
"# Store the flattened representations of the variables within the",
"# encoder.",
"for",
"encKey",
",",
"encValue",
"in",
"value",
".",
"kwArgs",
".",
"iteritems",
"(",
")",
":",
"if",
"isinstance",
"(",
"encValue",
",",
"PermuteVariable",
")",
":",
"self",
".",
"_flattenedPermutations",
"[",
"'%s:%s'",
"%",
"(",
"flatKey",
",",
"encKey",
")",
"]",
"=",
"encValue",
"elif",
"isinstance",
"(",
"value",
",",
"PermuteVariable",
")",
":",
"self",
".",
"_flattenedPermutations",
"[",
"flatKey",
"]",
"=",
"value",
"else",
":",
"if",
"isinstance",
"(",
"value",
",",
"PermuteVariable",
")",
":",
"self",
".",
"_flattenedPermutations",
"[",
"key",
"]",
"=",
"value",
"rApply",
"(",
"self",
".",
"_permutations",
",",
"_flattenPermutations",
")"
] | Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None | [
"Read",
"the",
"permutations",
"file",
"and",
"initialize",
"the",
"following",
"member",
"variables",
":",
"_predictedField",
":",
"field",
"name",
"of",
"the",
"field",
"we",
"are",
"trying",
"to",
"predict",
"_permutations",
":",
"Dict",
"containing",
"the",
"full",
"permutations",
"dictionary",
".",
"_flattenedPermutations",
":",
"Dict",
"containing",
"the",
"flattened",
"version",
"of",
"_permutations",
".",
"The",
"keys",
"leading",
"to",
"the",
"value",
"in",
"the",
"dict",
"are",
"joined",
"with",
"a",
"period",
"to",
"create",
"the",
"new",
"key",
"and",
"permute",
"variables",
"within",
"encoders",
"are",
"pulled",
"out",
"of",
"the",
"encoder",
".",
"_encoderNames",
":",
"keys",
"from",
"self",
".",
"_permutations",
"of",
"only",
"the",
"encoder",
"variables",
".",
"_reportKeys",
":",
"The",
"report",
"list",
"from",
"the",
"permutations",
"file",
".",
"This",
"is",
"a",
"list",
"of",
"the",
"items",
"from",
"each",
"experiment",
"s",
"pickled",
"results",
"file",
"that",
"should",
"be",
"included",
"in",
"the",
"final",
"report",
".",
"The",
"format",
"of",
"each",
"item",
"is",
"a",
"string",
"of",
"key",
"names",
"separated",
"by",
"colons",
"each",
"key",
"being",
"one",
"level",
"deeper",
"into",
"the",
"experiment",
"results",
"dict",
".",
"For",
"example",
"key1",
":",
"key2",
".",
"_filterFunc",
":",
"a",
"user",
"-",
"supplied",
"function",
"that",
"can",
"be",
"used",
"to",
"filter",
"out",
"specific",
"permutation",
"combinations",
".",
"_optimizeKey",
":",
"which",
"report",
"key",
"to",
"optimize",
"for",
"_maximize",
":",
"True",
"if",
"we",
"should",
"try",
"and",
"maximize",
"the",
"optimizeKey",
"metric",
".",
"False",
"if",
"we",
"should",
"minimize",
"it",
".",
"_dummyModelParamsFunc",
":",
"a",
"user",
"-",
"supplied",
"function",
"that",
"can",
"be",
"used",
"to",
"artificially",
"generate",
"HTMPredictionModel",
"results",
".",
"When",
"supplied",
"the",
"model",
"is",
"not",
"actually",
"run",
"through",
"the",
"OPF",
"but",
"instead",
"is",
"run",
"through",
"a",
"Dummy",
"Model",
"(",
"nupic",
".",
"swarming",
".",
"ModelRunner",
".",
"OPFDummyModelRunner",
")",
".",
"This",
"function",
"returns",
"the",
"params",
"dict",
"used",
"to",
"control",
"various",
"options",
"in",
"the",
"dummy",
"model",
"(",
"the",
"returned",
"metric",
"the",
"execution",
"time",
"etc",
".",
")",
".",
"This",
"is",
"used",
"for",
"hypersearch",
"algorithm",
"development",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1162-L1388 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._checkForOrphanedModels | def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0) | python | def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0) | [
"def",
"_checkForOrphanedModels",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Checking for orphaned models older than %s\"",
"%",
"(",
"self",
".",
"_modelOrphanIntervalSecs",
")",
")",
"while",
"True",
":",
"orphanedModelId",
"=",
"self",
".",
"_cjDAO",
".",
"modelAdoptNextOrphan",
"(",
"self",
".",
"_jobID",
",",
"self",
".",
"_modelOrphanIntervalSecs",
")",
"if",
"orphanedModelId",
"is",
"None",
":",
"return",
"self",
".",
"logger",
".",
"info",
"(",
"\"Removing orphaned model: %d\"",
"%",
"(",
"orphanedModelId",
")",
")",
"# Change the model hash and params hash as stored in the models table so",
"# that we can insert a new model with the same paramsHash",
"for",
"attempt",
"in",
"range",
"(",
"100",
")",
":",
"paramsHash",
"=",
"hashlib",
".",
"md5",
"(",
"\"OrphanParams.%d.%d\"",
"%",
"(",
"orphanedModelId",
",",
"attempt",
")",
")",
".",
"digest",
"(",
")",
"particleHash",
"=",
"hashlib",
".",
"md5",
"(",
"\"OrphanParticle.%d.%d\"",
"%",
"(",
"orphanedModelId",
",",
"attempt",
")",
")",
".",
"digest",
"(",
")",
"try",
":",
"self",
".",
"_cjDAO",
".",
"modelSetFields",
"(",
"orphanedModelId",
",",
"dict",
"(",
"engParamsHash",
"=",
"paramsHash",
",",
"engParticleHash",
"=",
"particleHash",
")",
")",
"success",
"=",
"True",
"except",
":",
"success",
"=",
"False",
"if",
"success",
":",
"break",
"if",
"not",
"success",
":",
"raise",
"RuntimeError",
"(",
"\"Unexpected failure to change paramsHash and \"",
"\"particleHash of orphaned model\"",
")",
"# Mark this model as complete, with reason \"orphaned\"",
"self",
".",
"_cjDAO",
".",
"modelSetCompleted",
"(",
"modelID",
"=",
"orphanedModelId",
",",
"completionReason",
"=",
"ClientJobsDAO",
".",
"CMPL_REASON_ORPHAN",
",",
"completionMsg",
"=",
"\"Orphaned\"",
")",
"# Update our results DB immediately, rather than wait for the worker",
"# to inform us. This insures that the getParticleInfos() calls we make",
"# below don't include this particle. Setting the metricResult to None",
"# sets it to worst case",
"self",
".",
"_resultsDB",
".",
"update",
"(",
"modelID",
"=",
"orphanedModelId",
",",
"modelParams",
"=",
"None",
",",
"modelParamsHash",
"=",
"paramsHash",
",",
"metricResult",
"=",
"None",
",",
"completed",
"=",
"True",
",",
"completionReason",
"=",
"ClientJobsDAO",
".",
"CMPL_REASON_ORPHAN",
",",
"matured",
"=",
"True",
",",
"numRecords",
"=",
"0",
")"
] | If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval: | [
"If",
"there",
"are",
"any",
"models",
"that",
"haven",
"t",
"been",
"updated",
"in",
"a",
"while",
"consider",
"them",
"dead",
"and",
"mark",
"them",
"as",
"hidden",
"in",
"our",
"resultsDB",
".",
"We",
"also",
"change",
"the",
"paramsHash",
"and",
"particleHash",
"of",
"orphaned",
"models",
"so",
"that",
"we",
"can",
"re",
"-",
"generate",
"that",
"particle",
"and",
"/",
"or",
"model",
"again",
"if",
"we",
"desire",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1463-L1522 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._hsStatePeriodicUpdate | def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4))) | python | def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4))) | [
"def",
"_hsStatePeriodicUpdate",
"(",
"self",
",",
"exhaustedSwarmId",
"=",
"None",
")",
":",
"if",
"self",
".",
"_hsState",
"is",
"None",
":",
"self",
".",
"_hsState",
"=",
"HsState",
"(",
"self",
")",
"# Read in current state from the DB",
"self",
".",
"_hsState",
".",
"readStateFromDB",
"(",
")",
"# This will hold the list of completed swarms that we find",
"completedSwarms",
"=",
"set",
"(",
")",
"# Mark the exhausted swarm as completing/completed, if any",
"if",
"exhaustedSwarmId",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Removing swarm %s from the active set \"",
"\"because we can't find any new unique particle \"",
"\"positions\"",
"%",
"(",
"exhaustedSwarmId",
")",
")",
"# Is it completing or completed?",
"(",
"particles",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
"=",
"exhaustedSwarmId",
",",
"matured",
"=",
"False",
")",
"if",
"len",
"(",
"particles",
")",
">",
"0",
":",
"exhaustedSwarmStatus",
"=",
"'completing'",
"else",
":",
"exhaustedSwarmStatus",
"=",
"'completed'",
"# Kill all swarms that don't need to be explored based on the most recent",
"# information.",
"if",
"self",
".",
"_killUselessSwarms",
":",
"self",
".",
"_hsState",
".",
"killUselessSwarms",
"(",
")",
"# For all swarms that were in the 'completing' state, see if they have",
"# completed yet.",
"#",
"# Note that we are not quite sure why this doesn't automatically get handled",
"# when we receive notification that a model finally completed in a swarm.",
"# But, we ARE running into a situation, when speculativeParticles is off,",
"# where we have one or more swarms in the 'completing' state even though all",
"# models have since finished. This logic will serve as a failsafe against",
"# this situation.",
"completingSwarms",
"=",
"self",
".",
"_hsState",
".",
"getCompletingSwarms",
"(",
")",
"for",
"swarmId",
"in",
"completingSwarms",
":",
"# Is it completed?",
"(",
"particles",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
"=",
"swarmId",
",",
"matured",
"=",
"False",
")",
"if",
"len",
"(",
"particles",
")",
"==",
"0",
":",
"completedSwarms",
".",
"add",
"(",
"swarmId",
")",
"# Are there any swarms we can remove (because they have matured)?",
"completedSwarmGens",
"=",
"self",
".",
"_resultsDB",
".",
"getMaturedSwarmGenerations",
"(",
")",
"priorCompletedSwarms",
"=",
"self",
".",
"_hsState",
".",
"getCompletedSwarms",
"(",
")",
"for",
"(",
"swarmId",
",",
"genIdx",
",",
"errScore",
")",
"in",
"completedSwarmGens",
":",
"# Don't need to report it if the swarm already completed",
"if",
"swarmId",
"in",
"priorCompletedSwarms",
":",
"continue",
"completedList",
"=",
"self",
".",
"_swarmTerminator",
".",
"recordDataPoint",
"(",
"swarmId",
"=",
"swarmId",
",",
"generation",
"=",
"genIdx",
",",
"errScore",
"=",
"errScore",
")",
"# Update status message",
"statusMsg",
"=",
"\"Completed generation #%d of swarm '%s' with a best\"",
"\" errScore of %g\"",
"%",
"(",
"genIdx",
",",
"swarmId",
",",
"errScore",
")",
"if",
"len",
"(",
"completedList",
")",
">",
"0",
":",
"statusMsg",
"=",
"\"%s. Matured swarm(s): %s\"",
"%",
"(",
"statusMsg",
",",
"completedList",
")",
"self",
".",
"logger",
".",
"info",
"(",
"statusMsg",
")",
"self",
".",
"_cjDAO",
".",
"jobSetFields",
"(",
"jobID",
"=",
"self",
".",
"_jobID",
",",
"fields",
"=",
"dict",
"(",
"engStatus",
"=",
"statusMsg",
")",
",",
"useConnectionID",
"=",
"False",
",",
"ignoreUnchanged",
"=",
"True",
")",
"# Special test mode to check which swarms have terminated",
"if",
"'NTA_TEST_recordSwarmTerminations'",
"in",
"os",
".",
"environ",
":",
"while",
"True",
":",
"resultsStr",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'results'",
"]",
")",
"[",
"0",
"]",
"if",
"resultsStr",
"is",
"None",
":",
"results",
"=",
"{",
"}",
"else",
":",
"results",
"=",
"json",
".",
"loads",
"(",
"resultsStr",
")",
"if",
"not",
"'terminatedSwarms'",
"in",
"results",
":",
"results",
"[",
"'terminatedSwarms'",
"]",
"=",
"{",
"}",
"for",
"swarm",
"in",
"completedList",
":",
"if",
"swarm",
"not",
"in",
"results",
"[",
"'terminatedSwarms'",
"]",
":",
"results",
"[",
"'terminatedSwarms'",
"]",
"[",
"swarm",
"]",
"=",
"(",
"genIdx",
",",
"self",
".",
"_swarmTerminator",
".",
"swarmScores",
"[",
"swarm",
"]",
")",
"newResultsStr",
"=",
"json",
".",
"dumps",
"(",
"results",
")",
"if",
"newResultsStr",
"==",
"resultsStr",
":",
"break",
"updated",
"=",
"self",
".",
"_cjDAO",
".",
"jobSetFieldIfEqual",
"(",
"jobID",
"=",
"self",
".",
"_jobID",
",",
"fieldName",
"=",
"'results'",
",",
"curValue",
"=",
"resultsStr",
",",
"newValue",
"=",
"json",
".",
"dumps",
"(",
"results",
")",
")",
"if",
"updated",
":",
"break",
"if",
"len",
"(",
"completedList",
")",
">",
"0",
":",
"for",
"name",
"in",
"completedList",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Swarm matured: %s. Score at generation %d: \"",
"\"%s\"",
"%",
"(",
"name",
",",
"genIdx",
",",
"errScore",
")",
")",
"completedSwarms",
"=",
"completedSwarms",
".",
"union",
"(",
"completedList",
")",
"if",
"len",
"(",
"completedSwarms",
")",
"==",
"0",
"and",
"(",
"exhaustedSwarmId",
"is",
"None",
")",
":",
"return",
"# We need to mark one or more swarms as completed, keep trying until",
"# successful, or until some other worker does it for us.",
"while",
"True",
":",
"if",
"exhaustedSwarmId",
"is",
"not",
"None",
":",
"self",
".",
"_hsState",
".",
"setSwarmState",
"(",
"exhaustedSwarmId",
",",
"exhaustedSwarmStatus",
")",
"# Mark the completed swarms as completed",
"for",
"swarmId",
"in",
"completedSwarms",
":",
"self",
".",
"_hsState",
".",
"setSwarmState",
"(",
"swarmId",
",",
"'completed'",
")",
"# If nothing changed, we're done",
"if",
"not",
"self",
".",
"_hsState",
".",
"isDirty",
"(",
")",
":",
"return",
"# Update the shared Hypersearch state now",
"# This will do nothing and return False if some other worker beat us to it",
"success",
"=",
"self",
".",
"_hsState",
".",
"writeStateToDB",
"(",
")",
"if",
"success",
":",
"# Go through and cancel all models that are still running, except for",
"# the best model. Once the best model changes, the one that used to be",
"# best (and has matured) will notice that and stop itself at that point.",
"jobResultsStr",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'results'",
"]",
")",
"[",
"0",
"]",
"if",
"jobResultsStr",
"is",
"not",
"None",
":",
"jobResults",
"=",
"json",
".",
"loads",
"(",
"jobResultsStr",
")",
"bestModelId",
"=",
"jobResults",
".",
"get",
"(",
"'bestModel'",
",",
"None",
")",
"else",
":",
"bestModelId",
"=",
"None",
"for",
"swarmId",
"in",
"list",
"(",
"completedSwarms",
")",
":",
"(",
"_",
",",
"modelIds",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
"=",
"swarmId",
",",
"completed",
"=",
"False",
")",
"if",
"bestModelId",
"in",
"modelIds",
":",
"modelIds",
".",
"remove",
"(",
"bestModelId",
")",
"if",
"len",
"(",
"modelIds",
")",
"==",
"0",
":",
"continue",
"self",
".",
"logger",
".",
"info",
"(",
"\"Killing the following models in swarm '%s' because\"",
"\"the swarm is being terminated: %s\"",
"%",
"(",
"swarmId",
",",
"str",
"(",
"modelIds",
")",
")",
")",
"for",
"modelId",
"in",
"modelIds",
":",
"self",
".",
"_cjDAO",
".",
"modelSetFields",
"(",
"modelId",
",",
"dict",
"(",
"engStop",
"=",
"ClientJobsDAO",
".",
"STOP_REASON_KILLED",
")",
",",
"ignoreUnchanged",
"=",
"True",
")",
"return",
"# We were not able to change the state because some other worker beat us",
"# to it.",
"# Get the new state, and try again to apply our changes.",
"self",
".",
"_hsState",
".",
"readStateFromDB",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"New hsState has been set by some other worker to: \"",
"\" \\n%s\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"self",
".",
"_hsState",
".",
"_state",
",",
"indent",
"=",
"4",
")",
")",
")"
] | Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it. | [
"Periodically",
"check",
"to",
"see",
"if",
"we",
"should",
"remove",
"a",
"certain",
"field",
"combination",
"from",
"evaluation",
"(",
"because",
"it",
"is",
"doing",
"so",
"poorly",
")",
"or",
"move",
"on",
"to",
"the",
"next",
"sprint",
"(",
"add",
"in",
"more",
"fields",
")",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1525-L1698 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._getCandidateParticleAndSwarm | def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None) | python | def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None) | [
"def",
"_getCandidateParticleAndSwarm",
"(",
"self",
",",
"exhaustedSwarmId",
"=",
"None",
")",
":",
"# Cancel search?",
"jobCancel",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'cancel'",
"]",
")",
"[",
"0",
"]",
"if",
"jobCancel",
":",
"self",
".",
"_jobCancelled",
"=",
"True",
"# Did a worker cancel the job because of an error?",
"(",
"workerCmpReason",
",",
"workerCmpMsg",
")",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'workerCompletionReason'",
",",
"'workerCompletionMsg'",
"]",
")",
"if",
"workerCmpReason",
"==",
"ClientJobsDAO",
".",
"CMPL_REASON_SUCCESS",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Exiting due to job being cancelled\"",
")",
"self",
".",
"_cjDAO",
".",
"jobSetFields",
"(",
"self",
".",
"_jobID",
",",
"dict",
"(",
"workerCompletionMsg",
"=",
"\"Job was cancelled\"",
")",
",",
"useConnectionID",
"=",
"False",
",",
"ignoreUnchanged",
"=",
"True",
")",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Exiting because some worker set the \"",
"\"workerCompletionReason to %s. WorkerCompletionMsg: %s\"",
"%",
"(",
"workerCmpReason",
",",
"workerCmpMsg",
")",
")",
"return",
"(",
"True",
",",
"None",
",",
"None",
")",
"# Perform periodic updates on the Hypersearch state.",
"if",
"self",
".",
"_hsState",
"is",
"not",
"None",
":",
"priorActiveSwarms",
"=",
"self",
".",
"_hsState",
".",
"getActiveSwarms",
"(",
")",
"else",
":",
"priorActiveSwarms",
"=",
"None",
"# Update the HypersearchState, checking for matured swarms, and marking",
"# the passed in swarm as exhausted, if any",
"self",
".",
"_hsStatePeriodicUpdate",
"(",
"exhaustedSwarmId",
"=",
"exhaustedSwarmId",
")",
"# The above call may have modified self._hsState['activeSwarmIds']",
"# Log the current set of active swarms",
"activeSwarms",
"=",
"self",
".",
"_hsState",
".",
"getActiveSwarms",
"(",
")",
"if",
"activeSwarms",
"!=",
"priorActiveSwarms",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Active swarms changed to %s (from %s)\"",
"%",
"(",
"activeSwarms",
",",
"priorActiveSwarms",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Active swarms: %s\"",
"%",
"(",
"activeSwarms",
")",
")",
"# If too many model errors were detected, exit",
"totalCmpModels",
"=",
"self",
".",
"_resultsDB",
".",
"getNumCompletedModels",
"(",
")",
"if",
"totalCmpModels",
">",
"5",
":",
"numErrs",
"=",
"self",
".",
"_resultsDB",
".",
"getNumErrModels",
"(",
")",
"if",
"(",
"float",
"(",
"numErrs",
")",
"/",
"totalCmpModels",
")",
">",
"self",
".",
"_maxPctErrModels",
":",
"# Get one of the errors",
"errModelIds",
"=",
"self",
".",
"_resultsDB",
".",
"getErrModelIds",
"(",
")",
"resInfo",
"=",
"self",
".",
"_cjDAO",
".",
"modelsGetResultAndStatus",
"(",
"[",
"errModelIds",
"[",
"0",
"]",
"]",
")",
"[",
"0",
"]",
"modelErrMsg",
"=",
"resInfo",
".",
"completionMsg",
"cmpMsg",
"=",
"\"%s: Exiting due to receiving too many models failing\"",
"\" from exceptions (%d out of %d). \\nModel Exception: %s\"",
"%",
"(",
"ErrorCodes",
".",
"tooManyModelErrs",
",",
"numErrs",
",",
"totalCmpModels",
",",
"modelErrMsg",
")",
"self",
".",
"logger",
".",
"error",
"(",
"cmpMsg",
")",
"# Cancel the entire job now, if it has not already been cancelled",
"workerCmpReason",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'workerCompletionReason'",
"]",
")",
"[",
"0",
"]",
"if",
"workerCmpReason",
"==",
"ClientJobsDAO",
".",
"CMPL_REASON_SUCCESS",
":",
"self",
".",
"_cjDAO",
".",
"jobSetFields",
"(",
"self",
".",
"_jobID",
",",
"fields",
"=",
"dict",
"(",
"cancel",
"=",
"True",
",",
"workerCompletionReason",
"=",
"ClientJobsDAO",
".",
"CMPL_REASON_ERROR",
",",
"workerCompletionMsg",
"=",
"cmpMsg",
")",
",",
"useConnectionID",
"=",
"False",
",",
"ignoreUnchanged",
"=",
"True",
")",
"return",
"(",
"True",
",",
"None",
",",
"None",
")",
"# If HsState thinks the search is over, exit. It is seeing if the results",
"# on the sprint we just completed are worse than a prior sprint.",
"if",
"self",
".",
"_hsState",
".",
"isSearchOver",
"(",
")",
":",
"cmpMsg",
"=",
"\"Exiting because results did not improve in most recently\"",
"\" completed sprint.\"",
"self",
".",
"logger",
".",
"info",
"(",
"cmpMsg",
")",
"self",
".",
"_cjDAO",
".",
"jobSetFields",
"(",
"self",
".",
"_jobID",
",",
"dict",
"(",
"workerCompletionMsg",
"=",
"cmpMsg",
")",
",",
"useConnectionID",
"=",
"False",
",",
"ignoreUnchanged",
"=",
"True",
")",
"return",
"(",
"True",
",",
"None",
",",
"None",
")",
"# Search successive active sprints, until we can find a candidate particle",
"# to work with",
"sprintIdx",
"=",
"-",
"1",
"while",
"True",
":",
"# Is this sprint active?",
"sprintIdx",
"+=",
"1",
"(",
"active",
",",
"eos",
")",
"=",
"self",
".",
"_hsState",
".",
"isSprintActive",
"(",
"sprintIdx",
")",
"# If no more sprints to explore:",
"if",
"eos",
":",
"# If any prior ones are still being explored, finish up exploring them",
"if",
"self",
".",
"_hsState",
".",
"anyGoodSprintsActive",
"(",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"No more sprints to explore, waiting for prior\"",
"\" sprints to complete\"",
")",
"return",
"(",
"False",
",",
"None",
",",
"None",
")",
"# Else, we're done",
"else",
":",
"cmpMsg",
"=",
"\"Exiting because we've evaluated all possible field \"",
"\"combinations\"",
"self",
".",
"_cjDAO",
".",
"jobSetFields",
"(",
"self",
".",
"_jobID",
",",
"dict",
"(",
"workerCompletionMsg",
"=",
"cmpMsg",
")",
",",
"useConnectionID",
"=",
"False",
",",
"ignoreUnchanged",
"=",
"True",
")",
"self",
".",
"logger",
".",
"info",
"(",
"cmpMsg",
")",
"return",
"(",
"True",
",",
"None",
",",
"None",
")",
"if",
"not",
"active",
":",
"if",
"not",
"self",
".",
"_speculativeParticles",
":",
"if",
"not",
"self",
".",
"_hsState",
".",
"isSprintCompleted",
"(",
"sprintIdx",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Waiting for all particles in sprint %d to complete\"",
"\"before evolving any more particles\"",
"%",
"(",
"sprintIdx",
")",
")",
"return",
"(",
"False",
",",
"None",
",",
"None",
")",
"continue",
"# ====================================================================",
"# Look for swarms that have particle \"holes\" in their generations. That is,",
"# an earlier generation with less than minParticlesPerSwarm. This can",
"# happen if a model that was started eariler got orphaned. If we detect",
"# this, start a new particle in that generation.",
"swarmIds",
"=",
"self",
".",
"_hsState",
".",
"getActiveSwarms",
"(",
"sprintIdx",
")",
"for",
"swarmId",
"in",
"swarmIds",
":",
"firstNonFullGenIdx",
"=",
"self",
".",
"_resultsDB",
".",
"firstNonFullGeneration",
"(",
"swarmId",
"=",
"swarmId",
",",
"minNumParticles",
"=",
"self",
".",
"_minParticlesPerSwarm",
")",
"if",
"firstNonFullGenIdx",
"is",
"None",
":",
"continue",
"if",
"firstNonFullGenIdx",
"<",
"self",
".",
"_resultsDB",
".",
"highestGeneration",
"(",
"swarmId",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Cloning an earlier model in generation %d of swarm \"",
"\"%s (sprintIdx=%s) to replace an orphaned model\"",
"%",
"(",
"firstNonFullGenIdx",
",",
"swarmId",
",",
"sprintIdx",
")",
")",
"# Clone a random orphaned particle from the incomplete generation",
"(",
"allParticles",
",",
"allModelIds",
",",
"errScores",
",",
"completed",
",",
"matured",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getOrphanParticleInfos",
"(",
"swarmId",
",",
"firstNonFullGenIdx",
")",
"if",
"len",
"(",
"allModelIds",
")",
">",
"0",
":",
"# We have seen instances where we get stuck in a loop incessantly",
"# trying to clone earlier models (NUP-1511). My best guess is that",
"# we've already successfully cloned each of the orphaned models at",
"# least once, but still need at least one more. If we don't create",
"# a new particleID, we will never be able to instantiate another",
"# model (since particleID hash is a unique key in the models table).",
"# So, on 1/8/2013 this logic was changed to create a new particleID",
"# whenever we clone an orphan.",
"newParticleId",
"=",
"True",
"self",
".",
"logger",
".",
"info",
"(",
"\"Cloning an orphaned model\"",
")",
"# If there is no orphan, clone one of the other particles. We can",
"# have no orphan if this was a speculative generation that only",
"# continued particles completed in the prior generation.",
"else",
":",
"newParticleId",
"=",
"True",
"self",
".",
"logger",
".",
"info",
"(",
"\"No orphans found, so cloning a non-orphan\"",
")",
"(",
"allParticles",
",",
"allModelIds",
",",
"errScores",
",",
"completed",
",",
"matured",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
"=",
"swarmId",
",",
"genIdx",
"=",
"firstNonFullGenIdx",
")",
"# Clone that model",
"modelId",
"=",
"random",
".",
"choice",
"(",
"allModelIds",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Cloning model %r\"",
"%",
"(",
"modelId",
")",
")",
"(",
"particleState",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfo",
"(",
"modelId",
")",
"particle",
"=",
"Particle",
"(",
"hsObj",
"=",
"self",
",",
"resultsDB",
"=",
"self",
".",
"_resultsDB",
",",
"flattenedPermuteVars",
"=",
"self",
".",
"_flattenedPermutations",
",",
"newFromClone",
"=",
"particleState",
",",
"newParticleId",
"=",
"newParticleId",
")",
"return",
"(",
"False",
",",
"particle",
",",
"swarmId",
")",
"# ====================================================================",
"# Sort the swarms in priority order, trying the ones with the least",
"# number of models first",
"swarmSizes",
"=",
"numpy",
".",
"array",
"(",
"[",
"self",
".",
"_resultsDB",
".",
"numModels",
"(",
"x",
")",
"for",
"x",
"in",
"swarmIds",
"]",
")",
"swarmSizeAndIdList",
"=",
"zip",
"(",
"swarmSizes",
",",
"swarmIds",
")",
"swarmSizeAndIdList",
".",
"sort",
"(",
")",
"for",
"(",
"_",
",",
"swarmId",
")",
"in",
"swarmSizeAndIdList",
":",
"# -------------------------------------------------------------------",
"# 1.) The particle will be created from new (at generation #0) if there",
"# are not already self._minParticlesPerSwarm particles in the swarm.",
"(",
"allParticles",
",",
"allModelIds",
",",
"errScores",
",",
"completed",
",",
"matured",
")",
"=",
"(",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
")",
")",
"if",
"len",
"(",
"allParticles",
")",
"<",
"self",
".",
"_minParticlesPerSwarm",
":",
"particle",
"=",
"Particle",
"(",
"hsObj",
"=",
"self",
",",
"resultsDB",
"=",
"self",
".",
"_resultsDB",
",",
"flattenedPermuteVars",
"=",
"self",
".",
"_flattenedPermutations",
",",
"swarmId",
"=",
"swarmId",
",",
"newFarFrom",
"=",
"allParticles",
")",
"# Jam in the best encoder state found from the first sprint",
"bestPriorModel",
"=",
"None",
"if",
"sprintIdx",
">=",
"1",
":",
"(",
"bestPriorModel",
",",
"errScore",
")",
"=",
"self",
".",
"_hsState",
".",
"bestModelInSprint",
"(",
"0",
")",
"if",
"bestPriorModel",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Best model and errScore from previous sprint(%d):\"",
"\" %s, %g\"",
"%",
"(",
"0",
",",
"str",
"(",
"bestPriorModel",
")",
",",
"errScore",
")",
")",
"(",
"baseState",
",",
"modelId",
",",
"errScore",
",",
"completed",
",",
"matured",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfo",
"(",
"bestPriorModel",
")",
"particle",
".",
"copyEncoderStatesFrom",
"(",
"baseState",
")",
"# Copy the best inference type from the earlier sprint",
"particle",
".",
"copyVarStatesFrom",
"(",
"baseState",
",",
"[",
"'modelParams|inferenceType'",
"]",
")",
"# It's best to jiggle the best settings from the prior sprint, so",
"# compute a new position starting from that previous best",
"# Only jiggle the vars we copied from the prior model",
"whichVars",
"=",
"[",
"]",
"for",
"varName",
"in",
"baseState",
"[",
"'varStates'",
"]",
":",
"if",
"':'",
"in",
"varName",
":",
"whichVars",
".",
"append",
"(",
"varName",
")",
"particle",
".",
"newPosition",
"(",
"whichVars",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Particle after incorporating encoder vars from best \"",
"\"model in previous sprint: \\n%s\"",
"%",
"(",
"str",
"(",
"particle",
")",
")",
")",
"return",
"(",
"False",
",",
"particle",
",",
"swarmId",
")",
"# -------------------------------------------------------------------",
"# 2.) Look for a completed particle to evolve",
"# Note that we use lastDescendent. We only want to evolve particles that",
"# are at their most recent generation index.",
"(",
"readyParticles",
",",
"readyModelIds",
",",
"readyErrScores",
",",
"_",
",",
"_",
")",
"=",
"(",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
",",
"genIdx",
"=",
"None",
",",
"matured",
"=",
"True",
",",
"lastDescendent",
"=",
"True",
")",
")",
"# If we have at least 1 ready particle to evolve...",
"if",
"len",
"(",
"readyParticles",
")",
">",
"0",
":",
"readyGenIdxs",
"=",
"[",
"x",
"[",
"'genIdx'",
"]",
"for",
"x",
"in",
"readyParticles",
"]",
"sortedGenIdxs",
"=",
"sorted",
"(",
"set",
"(",
"readyGenIdxs",
")",
")",
"genIdx",
"=",
"sortedGenIdxs",
"[",
"0",
"]",
"# Now, genIdx has the generation of the particle we want to run,",
"# Get a particle from that generation and evolve it.",
"useParticle",
"=",
"None",
"for",
"particle",
"in",
"readyParticles",
":",
"if",
"particle",
"[",
"'genIdx'",
"]",
"==",
"genIdx",
":",
"useParticle",
"=",
"particle",
"break",
"# If speculativeParticles is off, we don't want to evolve a particle",
"# into the next generation until all particles in the current",
"# generation have completed.",
"if",
"not",
"self",
".",
"_speculativeParticles",
":",
"(",
"particles",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"swarmId",
",",
"genIdx",
"=",
"genIdx",
",",
"matured",
"=",
"False",
")",
"if",
"len",
"(",
"particles",
")",
">",
"0",
":",
"continue",
"particle",
"=",
"Particle",
"(",
"hsObj",
"=",
"self",
",",
"resultsDB",
"=",
"self",
".",
"_resultsDB",
",",
"flattenedPermuteVars",
"=",
"self",
".",
"_flattenedPermutations",
",",
"evolveFromState",
"=",
"useParticle",
")",
"return",
"(",
"False",
",",
"particle",
",",
"swarmId",
")",
"# END: for (swarmSize, swarmId) in swarmSizeAndIdList:",
"# No success in this swarm, onto next swarm",
"# ====================================================================",
"# We couldn't find a particle in this sprint ready to evolve. If",
"# speculative particles is OFF, we have to wait for one or more other",
"# workers to finish up their particles before we can do anything.",
"if",
"not",
"self",
".",
"_speculativeParticles",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Waiting for one or more of the %s swarms \"",
"\"to complete a generation before evolving any more particles\"",
"%",
"(",
"str",
"(",
"swarmIds",
")",
")",
")",
"return",
"(",
"False",
",",
"None",
",",
"None",
")"
] | Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False. | [
"Find",
"or",
"create",
"a",
"candidate",
"particle",
"to",
"produce",
"a",
"new",
"model",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1700-L2016 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2._okToExit | def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True | python | def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True | [
"def",
"_okToExit",
"(",
"self",
")",
":",
"# Send an update status periodically to the JobTracker so that it doesn't",
"# think this worker is dead.",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"reporter:status:In hypersearchV2: _okToExit\"",
"# Any immature models still running?",
"if",
"not",
"self",
".",
"_jobCancelled",
":",
"(",
"_",
",",
"modelIds",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"matured",
"=",
"False",
")",
"if",
"len",
"(",
"modelIds",
")",
">",
"0",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Ready to end hyperseach, but not all models have \"",
"\"matured yet. Sleeping a bit to wait for all models \"",
"\"to mature.\"",
")",
"# Sleep for a bit, no need to check for orphaned models very often",
"time",
".",
"sleep",
"(",
"5.0",
"*",
"random",
".",
"random",
"(",
")",
")",
"return",
"False",
"# All particles have matured, send a STOP signal to any that are still",
"# running.",
"(",
"_",
",",
"modelIds",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"self",
".",
"_resultsDB",
".",
"getParticleInfos",
"(",
"completed",
"=",
"False",
")",
"for",
"modelId",
"in",
"modelIds",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Stopping model %d because the search has ended\"",
"%",
"(",
"modelId",
")",
")",
"self",
".",
"_cjDAO",
".",
"modelSetFields",
"(",
"modelId",
",",
"dict",
"(",
"engStop",
"=",
"ClientJobsDAO",
".",
"STOP_REASON_STOPPED",
")",
",",
"ignoreUnchanged",
"=",
"True",
")",
"# Update the HsState to get the accurate field contributions.",
"self",
".",
"_hsStatePeriodicUpdate",
"(",
")",
"pctFieldContributions",
",",
"absFieldContributions",
"=",
"self",
".",
"_hsState",
".",
"getFieldContributions",
"(",
")",
"# Update the results field with the new field contributions.",
"jobResultsStr",
"=",
"self",
".",
"_cjDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'results'",
"]",
")",
"[",
"0",
"]",
"if",
"jobResultsStr",
"is",
"not",
"None",
":",
"jobResults",
"=",
"json",
".",
"loads",
"(",
"jobResultsStr",
")",
"else",
":",
"jobResults",
"=",
"{",
"}",
"# Update the fieldContributions field.",
"if",
"pctFieldContributions",
"!=",
"jobResults",
".",
"get",
"(",
"'fieldContributions'",
",",
"None",
")",
":",
"jobResults",
"[",
"'fieldContributions'",
"]",
"=",
"pctFieldContributions",
"jobResults",
"[",
"'absoluteFieldContributions'",
"]",
"=",
"absFieldContributions",
"isUpdated",
"=",
"self",
".",
"_cjDAO",
".",
"jobSetFieldIfEqual",
"(",
"self",
".",
"_jobID",
",",
"fieldName",
"=",
"'results'",
",",
"curValue",
"=",
"jobResultsStr",
",",
"newValue",
"=",
"json",
".",
"dumps",
"(",
"jobResults",
")",
")",
"if",
"isUpdated",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Successfully updated the field contributions:%s'",
",",
"pctFieldContributions",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Failed updating the field contributions, '",
"'another hypersearch worker must have updated it'",
")",
"return",
"True"
] | Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True. | [
"Test",
"if",
"it",
"s",
"OK",
"to",
"exit",
"this",
"worker",
".",
"This",
"is",
"only",
"called",
"when",
"we",
"run",
"out",
"of",
"prospective",
"new",
"models",
"to",
"evaluate",
".",
"This",
"method",
"sees",
"if",
"all",
"models",
"have",
"matured",
"yet",
".",
"If",
"not",
"it",
"will",
"sleep",
"for",
"a",
"bit",
"and",
"return",
"False",
".",
"This",
"will",
"indicate",
"to",
"the",
"hypersearch",
"worker",
"that",
"we",
"should",
"keep",
"running",
"and",
"check",
"again",
"later",
".",
"This",
"gives",
"this",
"worker",
"a",
"chance",
"to",
"pick",
"up",
"and",
"adopt",
"any",
"model",
"which",
"may",
"become",
"orphaned",
"by",
"another",
"worker",
"before",
"it",
"matures",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L2021-L2087 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2.createModels | def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults) | python | def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults) | [
"def",
"createModels",
"(",
"self",
",",
"numModels",
"=",
"1",
")",
":",
"# Check for and mark orphaned models",
"self",
".",
"_checkForOrphanedModels",
"(",
")",
"modelResults",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"numModels",
")",
":",
"candidateParticle",
"=",
"None",
"# If we've reached the max # of model to evaluate, we're done.",
"if",
"(",
"self",
".",
"_maxModels",
"is",
"not",
"None",
"and",
"(",
"self",
".",
"_resultsDB",
".",
"numModels",
"(",
")",
"-",
"self",
".",
"_resultsDB",
".",
"getNumErrModels",
"(",
")",
")",
">=",
"self",
".",
"_maxModels",
")",
":",
"return",
"(",
"self",
".",
"_okToExit",
"(",
")",
",",
"[",
"]",
")",
"# If we don't already have a particle to work on, get a candidate swarm and",
"# particle to work with. If None is returned for the particle it means",
"# either that the search is over (if exitNow is also True) or that we need",
"# to wait for other workers to finish up their models before we can pick",
"# another particle to run (if exitNow is False).",
"if",
"candidateParticle",
"is",
"None",
":",
"(",
"exitNow",
",",
"candidateParticle",
",",
"candidateSwarm",
")",
"=",
"(",
"self",
".",
"_getCandidateParticleAndSwarm",
"(",
")",
")",
"if",
"candidateParticle",
"is",
"None",
":",
"if",
"exitNow",
":",
"return",
"(",
"self",
".",
"_okToExit",
"(",
")",
",",
"[",
"]",
")",
"else",
":",
"# Send an update status periodically to the JobTracker so that it doesn't",
"# think this worker is dead.",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"reporter:status:In hypersearchV2: speculativeWait\"",
"time",
".",
"sleep",
"(",
"self",
".",
"_speculativeWaitSecondsMax",
"*",
"random",
".",
"random",
"(",
")",
")",
"return",
"(",
"False",
",",
"[",
"]",
")",
"useEncoders",
"=",
"candidateSwarm",
".",
"split",
"(",
"'.'",
")",
"numAttempts",
"=",
"0",
"# Loop until we can create a unique model that we haven't seen yet.",
"while",
"True",
":",
"# If this is the Nth attempt with the same candidate, agitate it a bit",
"# to find a new unique position for it.",
"if",
"numAttempts",
">=",
"1",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Agitating particle to get unique position after %d \"",
"\"failed attempts in a row\"",
"%",
"(",
"numAttempts",
")",
")",
"candidateParticle",
".",
"agitate",
"(",
")",
"# Create the hierarchical params expected by the base description. Note",
"# that this is where we incorporate encoders that have no permuted",
"# values in them.",
"position",
"=",
"candidateParticle",
".",
"getPosition",
"(",
")",
"structuredParams",
"=",
"dict",
"(",
")",
"def",
"_buildStructuredParams",
"(",
"value",
",",
"keys",
")",
":",
"flatKey",
"=",
"_flattenKeys",
"(",
"keys",
")",
"# If it's an encoder, either put in None if it's not used, or replace",
"# all permuted constructor params with the actual position.",
"if",
"flatKey",
"in",
"self",
".",
"_encoderNames",
":",
"if",
"flatKey",
"in",
"useEncoders",
":",
"# Form encoder dict, substituting in chosen permutation values.",
"return",
"value",
".",
"getDict",
"(",
"flatKey",
",",
"position",
")",
"# Encoder not used.",
"else",
":",
"return",
"None",
"# Regular top-level variable.",
"elif",
"flatKey",
"in",
"position",
":",
"return",
"position",
"[",
"flatKey",
"]",
"# Fixed override of a parameter in the base description.",
"else",
":",
"return",
"value",
"structuredParams",
"=",
"rCopy",
"(",
"self",
".",
"_permutations",
",",
"_buildStructuredParams",
",",
"discardNoneKeys",
"=",
"False",
")",
"# Create the modelParams.",
"modelParams",
"=",
"dict",
"(",
"structuredParams",
"=",
"structuredParams",
",",
"particleState",
"=",
"candidateParticle",
".",
"getState",
"(",
")",
")",
"# And the hashes.",
"m",
"=",
"hashlib",
".",
"md5",
"(",
")",
"m",
".",
"update",
"(",
"sortedJSONDumpS",
"(",
"structuredParams",
")",
")",
"m",
".",
"update",
"(",
"self",
".",
"_baseDescriptionHash",
")",
"paramsHash",
"=",
"m",
".",
"digest",
"(",
")",
"particleInst",
"=",
"\"%s.%s\"",
"%",
"(",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'id'",
"]",
",",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'genIdx'",
"]",
")",
"particleHash",
"=",
"hashlib",
".",
"md5",
"(",
"particleInst",
")",
".",
"digest",
"(",
")",
"# Increase attempt counter",
"numAttempts",
"+=",
"1",
"# If this is a new one, and passes the filter test, exit with it.",
"# TODO: There is currently a problem with this filters implementation as",
"# it relates to self._maxUniqueModelAttempts. When there is a filter in",
"# effect, we should try a lot more times before we decide we have",
"# exhausted the parameter space for this swarm. The question is, how many",
"# more times?",
"if",
"self",
".",
"_filterFunc",
"and",
"not",
"self",
".",
"_filterFunc",
"(",
"structuredParams",
")",
":",
"valid",
"=",
"False",
"else",
":",
"valid",
"=",
"True",
"if",
"valid",
"and",
"self",
".",
"_resultsDB",
".",
"getModelIDFromParamsHash",
"(",
"paramsHash",
")",
"is",
"None",
":",
"break",
"# If we've exceeded the max allowed number of attempts, mark this swarm",
"# as completing or completed, so we don't try and allocate any more new",
"# particles to it, and pick another.",
"if",
"numAttempts",
">=",
"self",
".",
"_maxUniqueModelAttempts",
":",
"(",
"exitNow",
",",
"candidateParticle",
",",
"candidateSwarm",
")",
"=",
"self",
".",
"_getCandidateParticleAndSwarm",
"(",
"exhaustedSwarmId",
"=",
"candidateSwarm",
")",
"if",
"candidateParticle",
"is",
"None",
":",
"if",
"exitNow",
":",
"return",
"(",
"self",
".",
"_okToExit",
"(",
")",
",",
"[",
"]",
")",
"else",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"_speculativeWaitSecondsMax",
"*",
"random",
".",
"random",
"(",
")",
")",
"return",
"(",
"False",
",",
"[",
"]",
")",
"numAttempts",
"=",
"0",
"useEncoders",
"=",
"candidateSwarm",
".",
"split",
"(",
"'.'",
")",
"# Log message",
"if",
"self",
".",
"logger",
".",
"getEffectiveLevel",
"(",
")",
"<=",
"logging",
".",
"DEBUG",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Submitting new potential model to HypersearchWorker: \\n%s\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"modelParams",
",",
"indent",
"=",
"4",
")",
")",
")",
"modelResults",
".",
"append",
"(",
"(",
"modelParams",
",",
"paramsHash",
",",
"particleHash",
")",
")",
"return",
"(",
"False",
",",
"modelResults",
")"
] | Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc. | [
"Create",
"one",
"or",
"more",
"new",
"models",
"for",
"evaluation",
".",
"These",
"should",
"NOT",
"be",
"models",
"that",
"we",
"already",
"know",
"are",
"in",
"progress",
"(",
"i",
".",
"e",
".",
"those",
"that",
"have",
"been",
"sent",
"to",
"us",
"via",
"recordModelProgress",
")",
".",
"We",
"return",
"a",
"list",
"of",
"models",
"to",
"the",
"caller",
"(",
"HypersearchWorker",
")",
"and",
"if",
"one",
"can",
"be",
"successfully",
"inserted",
"into",
"the",
"models",
"table",
"(",
"i",
".",
"e",
".",
"it",
"is",
"not",
"a",
"duplicate",
")",
"then",
"HypersearchWorker",
"will",
"turn",
"around",
"and",
"call",
"our",
"runModel",
"()",
"method",
"passing",
"in",
"this",
"model",
".",
"If",
"it",
"is",
"a",
"duplicate",
"HypersearchWorker",
"will",
"call",
"this",
"method",
"again",
".",
"A",
"model",
"is",
"a",
"duplicate",
"if",
"either",
"the",
"modelParamsHash",
"or",
"particleHash",
"is",
"identical",
"to",
"another",
"entry",
"in",
"the",
"model",
"table",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L2100-L2300 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2.recordModelProgress | def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID)) | python | def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID)) | [
"def",
"recordModelProgress",
"(",
"self",
",",
"modelID",
",",
"modelParams",
",",
"modelParamsHash",
",",
"results",
",",
"completed",
",",
"completionReason",
",",
"matured",
",",
"numRecords",
")",
":",
"if",
"results",
"is",
"None",
":",
"metricResult",
"=",
"None",
"else",
":",
"metricResult",
"=",
"results",
"[",
"1",
"]",
".",
"values",
"(",
")",
"[",
"0",
"]",
"# Update our database.",
"errScore",
"=",
"self",
".",
"_resultsDB",
".",
"update",
"(",
"modelID",
"=",
"modelID",
",",
"modelParams",
"=",
"modelParams",
",",
"modelParamsHash",
"=",
"modelParamsHash",
",",
"metricResult",
"=",
"metricResult",
",",
"completed",
"=",
"completed",
",",
"completionReason",
"=",
"completionReason",
",",
"matured",
"=",
"matured",
",",
"numRecords",
"=",
"numRecords",
")",
"# Log message.",
"self",
".",
"logger",
".",
"debug",
"(",
"'Received progress on model %d: completed: %s, '",
"'cmpReason: %s, numRecords: %d, errScore: %s'",
",",
"modelID",
",",
"completed",
",",
"completionReason",
",",
"numRecords",
",",
"errScore",
")",
"# Log best so far.",
"(",
"bestModelID",
",",
"bestResult",
")",
"=",
"self",
".",
"_resultsDB",
".",
"bestModelIdAndErrScore",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Best err score seen so far: %s on model %s'",
"%",
"(",
"bestResult",
",",
"bestModelID",
")",
")"
] | Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model. | [
"Record",
"or",
"update",
"the",
"results",
"for",
"a",
"model",
".",
"This",
"is",
"called",
"by",
"the",
"HSW",
"whenever",
"it",
"gets",
"results",
"info",
"for",
"another",
"model",
"or",
"updated",
"results",
"on",
"a",
"model",
"that",
"is",
"still",
"running",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L2302-L2362 | valid |
numenta/nupic | src/nupic/swarming/hypersearch_v2.py | HypersearchV2.runModel | def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e) | python | def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e) | [
"def",
"runModel",
"(",
"self",
",",
"modelID",
",",
"jobID",
",",
"modelParams",
",",
"modelParamsHash",
",",
"jobsDAO",
",",
"modelCheckpointGUID",
")",
":",
"# We're going to make an assumption that if we're not using streams, that",
"# we also don't need checkpoints saved. For now, this assumption is OK",
"# (if there are no streams, we're typically running on a single machine",
"# and just save models to files) but we may want to break this out as",
"# a separate controllable parameter in the future",
"if",
"not",
"self",
".",
"_createCheckpoints",
":",
"modelCheckpointGUID",
"=",
"None",
"# Register this model in our database",
"self",
".",
"_resultsDB",
".",
"update",
"(",
"modelID",
"=",
"modelID",
",",
"modelParams",
"=",
"modelParams",
",",
"modelParamsHash",
"=",
"modelParamsHash",
",",
"metricResult",
"=",
"None",
",",
"completed",
"=",
"False",
",",
"completionReason",
"=",
"None",
",",
"matured",
"=",
"False",
",",
"numRecords",
"=",
"0",
")",
"# Get the structured params, which we pass to the base description",
"structuredParams",
"=",
"modelParams",
"[",
"'structuredParams'",
"]",
"if",
"self",
".",
"logger",
".",
"getEffectiveLevel",
"(",
")",
"<=",
"logging",
".",
"DEBUG",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Running Model. \\nmodelParams: %s, \\nmodelID=%s, \"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"modelParams",
",",
"indent",
"=",
"4",
")",
",",
"modelID",
")",
")",
"# Record time.clock() so that we can report on cpu time",
"cpuTimeStart",
"=",
"time",
".",
"clock",
"(",
")",
"# Run the experiment. This will report the results back to the models",
"# database for us as well.",
"logLevel",
"=",
"self",
".",
"logger",
".",
"getEffectiveLevel",
"(",
")",
"try",
":",
"if",
"self",
".",
"_dummyModel",
"is",
"None",
"or",
"self",
".",
"_dummyModel",
"is",
"False",
":",
"(",
"cmpReason",
",",
"cmpMsg",
")",
"=",
"runModelGivenBaseAndParams",
"(",
"modelID",
"=",
"modelID",
",",
"jobID",
"=",
"jobID",
",",
"baseDescription",
"=",
"self",
".",
"_baseDescription",
",",
"params",
"=",
"structuredParams",
",",
"predictedField",
"=",
"self",
".",
"_predictedField",
",",
"reportKeys",
"=",
"self",
".",
"_reportKeys",
",",
"optimizeKey",
"=",
"self",
".",
"_optimizeKey",
",",
"jobsDAO",
"=",
"jobsDAO",
",",
"modelCheckpointGUID",
"=",
"modelCheckpointGUID",
",",
"logLevel",
"=",
"logLevel",
",",
"predictionCacheMaxRecords",
"=",
"self",
".",
"_predictionCacheMaxRecords",
")",
"else",
":",
"dummyParams",
"=",
"dict",
"(",
"self",
".",
"_dummyModel",
")",
"dummyParams",
"[",
"'permutationParams'",
"]",
"=",
"structuredParams",
"if",
"self",
".",
"_dummyModelParamsFunc",
"is",
"not",
"None",
":",
"permInfo",
"=",
"dict",
"(",
"structuredParams",
")",
"permInfo",
"[",
"'generation'",
"]",
"=",
"modelParams",
"[",
"'particleState'",
"]",
"[",
"'genIdx'",
"]",
"dummyParams",
".",
"update",
"(",
"self",
".",
"_dummyModelParamsFunc",
"(",
"permInfo",
")",
")",
"(",
"cmpReason",
",",
"cmpMsg",
")",
"=",
"runDummyModel",
"(",
"modelID",
"=",
"modelID",
",",
"jobID",
"=",
"jobID",
",",
"params",
"=",
"dummyParams",
",",
"predictedField",
"=",
"self",
".",
"_predictedField",
",",
"reportKeys",
"=",
"self",
".",
"_reportKeys",
",",
"optimizeKey",
"=",
"self",
".",
"_optimizeKey",
",",
"jobsDAO",
"=",
"jobsDAO",
",",
"modelCheckpointGUID",
"=",
"modelCheckpointGUID",
",",
"logLevel",
"=",
"logLevel",
",",
"predictionCacheMaxRecords",
"=",
"self",
".",
"_predictionCacheMaxRecords",
")",
"# Write out the completion reason and message",
"jobsDAO",
".",
"modelSetCompleted",
"(",
"modelID",
",",
"completionReason",
"=",
"cmpReason",
",",
"completionMsg",
"=",
"cmpMsg",
",",
"cpuTime",
"=",
"time",
".",
"clock",
"(",
")",
"-",
"cpuTimeStart",
")",
"except",
"InvalidConnectionException",
",",
"e",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"\"%s\"",
",",
"e",
")"
] | Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key | [
"Run",
"the",
"given",
"model",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L2364-L2467 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _escape | def _escape(s):
"""Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
"""
assert isinstance(s, str), \
"expected %s but got %s; value=%s" % (type(str), type(s), s)
s = s.replace("\\", "\\\\")
s = s.replace("\n", "\\n")
s = s.replace("\t", "\\t")
s = s.replace(",", "\t")
return s | python | def _escape(s):
"""Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs
"""
assert isinstance(s, str), \
"expected %s but got %s; value=%s" % (type(str), type(s), s)
s = s.replace("\\", "\\\\")
s = s.replace("\n", "\\n")
s = s.replace("\t", "\\t")
s = s.replace(",", "\t")
return s | [
"def",
"_escape",
"(",
"s",
")",
":",
"assert",
"isinstance",
"(",
"s",
",",
"str",
")",
",",
"\"expected %s but got %s; value=%s\"",
"%",
"(",
"type",
"(",
"str",
")",
",",
"type",
"(",
"s",
")",
",",
"s",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\\\\n\"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\t\"",
",",
"\"\\\\t\"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\",\"",
",",
"\"\\t\"",
")",
"return",
"s"
] | Escape commas, tabs, newlines and dashes in a string
Commas are encoded as tabs | [
"Escape",
"commas",
"tabs",
"newlines",
"and",
"dashes",
"in",
"a",
"string"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L101-L112 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _engineServicesRunning | def _engineServicesRunning():
""" Return true if the engine services are running
"""
process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
result = process.returncode
if result != 0:
raise RuntimeError("Unable to check for running client job manager")
# See if the CJM is running
running = False
for line in stdout.split("\n"):
if "python" in line and "clientjobmanager.client_job_manager" in line:
running = True
break
return running | python | def _engineServicesRunning():
""" Return true if the engine services are running
"""
process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
result = process.returncode
if result != 0:
raise RuntimeError("Unable to check for running client job manager")
# See if the CJM is running
running = False
for line in stdout.split("\n"):
if "python" in line and "clientjobmanager.client_job_manager" in line:
running = True
break
return running | [
"def",
"_engineServicesRunning",
"(",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"ps\"",
",",
"\"aux\"",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
"=",
"process",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"result",
"=",
"process",
".",
"returncode",
"if",
"result",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Unable to check for running client job manager\"",
")",
"# See if the CJM is running",
"running",
"=",
"False",
"for",
"line",
"in",
"stdout",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"\"python\"",
"in",
"line",
"and",
"\"clientjobmanager.client_job_manager\"",
"in",
"line",
":",
"running",
"=",
"True",
"break",
"return",
"running"
] | Return true if the engine services are running | [
"Return",
"true",
"if",
"the",
"engine",
"services",
"are",
"running"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L116-L133 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | runWithConfig | def runWithConfig(swarmConfig, options,
outDir=None, outputLabel="default",
permWorkDir=None, verbosity=1):
"""
Starts a swarm, given an dictionary configuration.
@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.
@param outDir {string} Optional path to write swarm details (defaults to
current working directory).
@param outputLabel {string} Optional label for output (defaults to "default").
@param permWorkDir {string} Optional location of working directory (defaults
to current working directory).
@param verbosity {int} Optional (1,2,3) increasing verbosity of output.
@returns {object} Model parameters
"""
global g_currentVerbosityLevel
g_currentVerbosityLevel = verbosity
# Generate the description and permutations.py files in the same directory
# for reference.
if outDir is None:
outDir = os.getcwd()
if permWorkDir is None:
permWorkDir = os.getcwd()
_checkOverwrite(options, outDir)
_generateExpFilesFromSwarmDescription(swarmConfig, outDir)
options["expDescConfig"] = swarmConfig
options["outputLabel"] = outputLabel
options["outDir"] = outDir
options["permWorkDir"] = permWorkDir
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions) | python | def runWithConfig(swarmConfig, options,
outDir=None, outputLabel="default",
permWorkDir=None, verbosity=1):
"""
Starts a swarm, given an dictionary configuration.
@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.
@param outDir {string} Optional path to write swarm details (defaults to
current working directory).
@param outputLabel {string} Optional label for output (defaults to "default").
@param permWorkDir {string} Optional location of working directory (defaults
to current working directory).
@param verbosity {int} Optional (1,2,3) increasing verbosity of output.
@returns {object} Model parameters
"""
global g_currentVerbosityLevel
g_currentVerbosityLevel = verbosity
# Generate the description and permutations.py files in the same directory
# for reference.
if outDir is None:
outDir = os.getcwd()
if permWorkDir is None:
permWorkDir = os.getcwd()
_checkOverwrite(options, outDir)
_generateExpFilesFromSwarmDescription(swarmConfig, outDir)
options["expDescConfig"] = swarmConfig
options["outputLabel"] = outputLabel
options["outDir"] = outDir
options["permWorkDir"] = permWorkDir
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions) | [
"def",
"runWithConfig",
"(",
"swarmConfig",
",",
"options",
",",
"outDir",
"=",
"None",
",",
"outputLabel",
"=",
"\"default\"",
",",
"permWorkDir",
"=",
"None",
",",
"verbosity",
"=",
"1",
")",
":",
"global",
"g_currentVerbosityLevel",
"g_currentVerbosityLevel",
"=",
"verbosity",
"# Generate the description and permutations.py files in the same directory",
"# for reference.",
"if",
"outDir",
"is",
"None",
":",
"outDir",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"permWorkDir",
"is",
"None",
":",
"permWorkDir",
"=",
"os",
".",
"getcwd",
"(",
")",
"_checkOverwrite",
"(",
"options",
",",
"outDir",
")",
"_generateExpFilesFromSwarmDescription",
"(",
"swarmConfig",
",",
"outDir",
")",
"options",
"[",
"\"expDescConfig\"",
"]",
"=",
"swarmConfig",
"options",
"[",
"\"outputLabel\"",
"]",
"=",
"outputLabel",
"options",
"[",
"\"outDir\"",
"]",
"=",
"outDir",
"options",
"[",
"\"permWorkDir\"",
"]",
"=",
"permWorkDir",
"runOptions",
"=",
"_injectDefaultOptions",
"(",
"options",
")",
"_validateOptions",
"(",
"runOptions",
")",
"return",
"_runAction",
"(",
"runOptions",
")"
] | Starts a swarm, given an dictionary configuration.
@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.
@param outDir {string} Optional path to write swarm details (defaults to
current working directory).
@param outputLabel {string} Optional label for output (defaults to "default").
@param permWorkDir {string} Optional location of working directory (defaults
to current working directory).
@param verbosity {int} Optional (1,2,3) increasing verbosity of output.
@returns {object} Model parameters | [
"Starts",
"a",
"swarm",
"given",
"an",
"dictionary",
"configuration",
".",
"@param",
"swarmConfig",
"{",
"dict",
"}",
"A",
"complete",
"[",
"swarm",
"description",
"]",
"(",
"http",
":",
"//",
"nupic",
".",
"docs",
".",
"numenta",
".",
"org",
"/",
"0",
".",
"7",
".",
"0",
".",
"dev0",
"/",
"guides",
"/",
"swarming",
"/",
"running",
".",
"html#the",
"-",
"swarm",
"-",
"description",
")",
"object",
".",
"@param",
"outDir",
"{",
"string",
"}",
"Optional",
"path",
"to",
"write",
"swarm",
"details",
"(",
"defaults",
"to",
"current",
"working",
"directory",
")",
".",
"@param",
"outputLabel",
"{",
"string",
"}",
"Optional",
"label",
"for",
"output",
"(",
"defaults",
"to",
"default",
")",
".",
"@param",
"permWorkDir",
"{",
"string",
"}",
"Optional",
"location",
"of",
"working",
"directory",
"(",
"defaults",
"to",
"current",
"working",
"directory",
")",
".",
"@param",
"verbosity",
"{",
"int",
"}",
"Optional",
"(",
"1",
"2",
"3",
")",
"increasing",
"verbosity",
"of",
"output",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L234-L271 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | runWithJsonFile | def runWithJsonFile(expJsonFilePath, options, outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a JSON file containing configuration.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param expJsonFilePath {string} Path to a JSON file containing the complete
[swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description).
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {int} Swarm job id.
"""
if "verbosityCount" in options:
verbosity = options["verbosityCount"]
del options["verbosityCount"]
else:
verbosity = 1
_setupInterruptHandling()
with open(expJsonFilePath, "r") as jsonFile:
expJsonConfig = json.loads(jsonFile.read())
outDir = os.path.dirname(expJsonFilePath)
return runWithConfig(expJsonConfig, options, outDir=outDir,
outputLabel=outputLabel, permWorkDir=permWorkDir,
verbosity=verbosity) | python | def runWithJsonFile(expJsonFilePath, options, outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a JSON file containing configuration.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param expJsonFilePath {string} Path to a JSON file containing the complete
[swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description).
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {int} Swarm job id.
"""
if "verbosityCount" in options:
verbosity = options["verbosityCount"]
del options["verbosityCount"]
else:
verbosity = 1
_setupInterruptHandling()
with open(expJsonFilePath, "r") as jsonFile:
expJsonConfig = json.loads(jsonFile.read())
outDir = os.path.dirname(expJsonFilePath)
return runWithConfig(expJsonConfig, options, outDir=outDir,
outputLabel=outputLabel, permWorkDir=permWorkDir,
verbosity=verbosity) | [
"def",
"runWithJsonFile",
"(",
"expJsonFilePath",
",",
"options",
",",
"outputLabel",
",",
"permWorkDir",
")",
":",
"if",
"\"verbosityCount\"",
"in",
"options",
":",
"verbosity",
"=",
"options",
"[",
"\"verbosityCount\"",
"]",
"del",
"options",
"[",
"\"verbosityCount\"",
"]",
"else",
":",
"verbosity",
"=",
"1",
"_setupInterruptHandling",
"(",
")",
"with",
"open",
"(",
"expJsonFilePath",
",",
"\"r\"",
")",
"as",
"jsonFile",
":",
"expJsonConfig",
"=",
"json",
".",
"loads",
"(",
"jsonFile",
".",
"read",
"(",
")",
")",
"outDir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"expJsonFilePath",
")",
"return",
"runWithConfig",
"(",
"expJsonConfig",
",",
"options",
",",
"outDir",
"=",
"outDir",
",",
"outputLabel",
"=",
"outputLabel",
",",
"permWorkDir",
"=",
"permWorkDir",
",",
"verbosity",
"=",
"verbosity",
")"
] | Starts a swarm, given a path to a JSON file containing configuration.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param expJsonFilePath {string} Path to a JSON file containing the complete
[swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description).
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {int} Swarm job id. | [
"Starts",
"a",
"swarm",
"given",
"a",
"path",
"to",
"a",
"JSON",
"file",
"containing",
"configuration",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L275-L304 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | runWithPermutationsScript | def runWithPermutationsScript(permutationsFilePath, options,
outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters.
"""
global g_currentVerbosityLevel
if "verbosityCount" in options:
g_currentVerbosityLevel = options["verbosityCount"]
del options["verbosityCount"]
else:
g_currentVerbosityLevel = 1
_setupInterruptHandling()
options["permutationsScriptPath"] = permutationsFilePath
options["outputLabel"] = outputLabel
options["outDir"] = permWorkDir
options["permWorkDir"] = permWorkDir
# Assume it's a permutations python script
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions) | python | def runWithPermutationsScript(permutationsFilePath, options,
outputLabel, permWorkDir):
"""
Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters.
"""
global g_currentVerbosityLevel
if "verbosityCount" in options:
g_currentVerbosityLevel = options["verbosityCount"]
del options["verbosityCount"]
else:
g_currentVerbosityLevel = 1
_setupInterruptHandling()
options["permutationsScriptPath"] = permutationsFilePath
options["outputLabel"] = outputLabel
options["outDir"] = permWorkDir
options["permWorkDir"] = permWorkDir
# Assume it's a permutations python script
runOptions = _injectDefaultOptions(options)
_validateOptions(runOptions)
return _runAction(runOptions) | [
"def",
"runWithPermutationsScript",
"(",
"permutationsFilePath",
",",
"options",
",",
"outputLabel",
",",
"permWorkDir",
")",
":",
"global",
"g_currentVerbosityLevel",
"if",
"\"verbosityCount\"",
"in",
"options",
":",
"g_currentVerbosityLevel",
"=",
"options",
"[",
"\"verbosityCount\"",
"]",
"del",
"options",
"[",
"\"verbosityCount\"",
"]",
"else",
":",
"g_currentVerbosityLevel",
"=",
"1",
"_setupInterruptHandling",
"(",
")",
"options",
"[",
"\"permutationsScriptPath\"",
"]",
"=",
"permutationsFilePath",
"options",
"[",
"\"outputLabel\"",
"]",
"=",
"outputLabel",
"options",
"[",
"\"outDir\"",
"]",
"=",
"permWorkDir",
"options",
"[",
"\"permWorkDir\"",
"]",
"=",
"permWorkDir",
"# Assume it's a permutations python script",
"runOptions",
"=",
"_injectDefaultOptions",
"(",
"options",
")",
"_validateOptions",
"(",
"runOptions",
")",
"return",
"_runAction",
"(",
"runOptions",
")"
] | Starts a swarm, given a path to a permutations.py script.
This function is meant to be used with a CLI wrapper that passes command line
arguments in through the options parameter.
@param permutationsFilePath {string} Path to permutations.py.
@param options {dict} CLI options.
@param outputLabel {string} Label for output.
@param permWorkDir {string} Location of working directory.
@returns {object} Model parameters. | [
"Starts",
"a",
"swarm",
"given",
"a",
"path",
"to",
"a",
"permutations",
".",
"py",
"script",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L308-L341 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _backupFile | def _backupFile(filePath):
"""Back up a file
Parameters:
----------------------------------------------------------------------
retval: Filepath of the back-up
"""
assert os.path.exists(filePath)
stampNum = 0
(prefix, suffix) = os.path.splitext(filePath)
while True:
backupPath = "%s.%d%s" % (prefix, stampNum, suffix)
stampNum += 1
if not os.path.exists(backupPath):
break
shutil.copyfile(filePath, backupPath)
return backupPath | python | def _backupFile(filePath):
"""Back up a file
Parameters:
----------------------------------------------------------------------
retval: Filepath of the back-up
"""
assert os.path.exists(filePath)
stampNum = 0
(prefix, suffix) = os.path.splitext(filePath)
while True:
backupPath = "%s.%d%s" % (prefix, stampNum, suffix)
stampNum += 1
if not os.path.exists(backupPath):
break
shutil.copyfile(filePath, backupPath)
return backupPath | [
"def",
"_backupFile",
"(",
"filePath",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"filePath",
")",
"stampNum",
"=",
"0",
"(",
"prefix",
",",
"suffix",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filePath",
")",
"while",
"True",
":",
"backupPath",
"=",
"\"%s.%d%s\"",
"%",
"(",
"prefix",
",",
"stampNum",
",",
"suffix",
")",
"stampNum",
"+=",
"1",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"backupPath",
")",
":",
"break",
"shutil",
".",
"copyfile",
"(",
"filePath",
",",
"backupPath",
")",
"return",
"backupPath"
] | Back up a file
Parameters:
----------------------------------------------------------------------
retval: Filepath of the back-up | [
"Back",
"up",
"a",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1862-L1880 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _iterModels | def _iterModels(modelIDs):
"""Creates an iterator that returns ModelInfo elements for the given modelIDs
WARNING: The order of ModelInfo elements returned by the iterator
may not match the order of the given modelIDs
Parameters:
----------------------------------------------------------------------
modelIDs: A sequence of model identifiers (e.g., as returned by
_HyperSearchJob.queryModelIDs()).
retval: Iterator that returns ModelInfo elements for the given
modelIDs (NOTE:possibly in a different order)
"""
class ModelInfoIterator(object):
"""ModelInfo iterator implementation class
"""
# Maximum number of ModelInfo elements to load into cache whenever
# cache empties
__CACHE_LIMIT = 1000
debug=False
def __init__(self, modelIDs):
"""
Parameters:
----------------------------------------------------------------------
modelIDs: a sequence of Nupic model identifiers for which this
iterator will return _NupicModelInfo instances.
NOTE: The returned instances are NOT guaranteed to be in
the same order as the IDs in modelIDs sequence.
retval: nothing
"""
# Make our own copy in case caller changes model id list during iteration
self.__modelIDs = tuple(modelIDs)
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __init__; numModelIDs=%s" % len(self.__modelIDs))
self.__nextIndex = 0
self.__modelCache = collections.deque()
return
def __iter__(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: self
"""
return self
def next(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
return self.__getNext()
def __getNext(self):
"""Implementation of the next() Iterator Protocol function.
When the modelInfo cache becomes empty, queries Nupic and fills the cache
with the next set of NupicModelInfo instances.
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __getNext(); modelCacheLen=%s" % (
len(self.__modelCache)))
if not self.__modelCache:
self.__fillCache()
if not self.__modelCache:
raise StopIteration()
return self.__modelCache.popleft()
def __fillCache(self):
"""Queries Nupic and fills an empty modelInfo cache with the next set of
_NupicModelInfo instances
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert (not self.__modelCache)
# Assemble a list of model IDs to look up
numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0
if self.__nextIndex >= numModelIDs:
return
idRange = self.__nextIndex + self.__CACHE_LIMIT
if idRange > numModelIDs:
idRange = numModelIDs
lookupIDs = self.__modelIDs[self.__nextIndex:idRange]
self.__nextIndex += (idRange - self.__nextIndex)
# Query Nupic for model info of all models in the look-up list
# NOTE: the order of results may not be the same as lookupIDs
infoList = _clientJobsDB().modelsInfo(lookupIDs)
assert len(infoList) == len(lookupIDs), \
"modelsInfo returned %s elements; expected %s." % \
(len(infoList), len(lookupIDs))
# Create _NupicModelInfo instances and add them to cache
for rawInfo in infoList:
modelInfo = _NupicModelInfo(rawInfo=rawInfo)
self.__modelCache.append(modelInfo)
assert len(self.__modelCache) == len(lookupIDs), \
"Added %s elements to modelCache; expected %s." % \
(len(self.__modelCache), len(lookupIDs))
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s" % \
(len(self.__modelCache),))
return ModelInfoIterator(modelIDs) | python | def _iterModels(modelIDs):
"""Creates an iterator that returns ModelInfo elements for the given modelIDs
WARNING: The order of ModelInfo elements returned by the iterator
may not match the order of the given modelIDs
Parameters:
----------------------------------------------------------------------
modelIDs: A sequence of model identifiers (e.g., as returned by
_HyperSearchJob.queryModelIDs()).
retval: Iterator that returns ModelInfo elements for the given
modelIDs (NOTE:possibly in a different order)
"""
class ModelInfoIterator(object):
"""ModelInfo iterator implementation class
"""
# Maximum number of ModelInfo elements to load into cache whenever
# cache empties
__CACHE_LIMIT = 1000
debug=False
def __init__(self, modelIDs):
"""
Parameters:
----------------------------------------------------------------------
modelIDs: a sequence of Nupic model identifiers for which this
iterator will return _NupicModelInfo instances.
NOTE: The returned instances are NOT guaranteed to be in
the same order as the IDs in modelIDs sequence.
retval: nothing
"""
# Make our own copy in case caller changes model id list during iteration
self.__modelIDs = tuple(modelIDs)
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __init__; numModelIDs=%s" % len(self.__modelIDs))
self.__nextIndex = 0
self.__modelCache = collections.deque()
return
def __iter__(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: self
"""
return self
def next(self):
"""Iterator Protocol function
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
return self.__getNext()
def __getNext(self):
"""Implementation of the next() Iterator Protocol function.
When the modelInfo cache becomes empty, queries Nupic and fills the cache
with the next set of NupicModelInfo instances.
Parameters:
----------------------------------------------------------------------
retval: A _NupicModelInfo instance or raises StopIteration to
signal end of iteration.
"""
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: __getNext(); modelCacheLen=%s" % (
len(self.__modelCache)))
if not self.__modelCache:
self.__fillCache()
if not self.__modelCache:
raise StopIteration()
return self.__modelCache.popleft()
def __fillCache(self):
"""Queries Nupic and fills an empty modelInfo cache with the next set of
_NupicModelInfo instances
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert (not self.__modelCache)
# Assemble a list of model IDs to look up
numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0
if self.__nextIndex >= numModelIDs:
return
idRange = self.__nextIndex + self.__CACHE_LIMIT
if idRange > numModelIDs:
idRange = numModelIDs
lookupIDs = self.__modelIDs[self.__nextIndex:idRange]
self.__nextIndex += (idRange - self.__nextIndex)
# Query Nupic for model info of all models in the look-up list
# NOTE: the order of results may not be the same as lookupIDs
infoList = _clientJobsDB().modelsInfo(lookupIDs)
assert len(infoList) == len(lookupIDs), \
"modelsInfo returned %s elements; expected %s." % \
(len(infoList), len(lookupIDs))
# Create _NupicModelInfo instances and add them to cache
for rawInfo in infoList:
modelInfo = _NupicModelInfo(rawInfo=rawInfo)
self.__modelCache.append(modelInfo)
assert len(self.__modelCache) == len(lookupIDs), \
"Added %s elements to modelCache; expected %s." % \
(len(self.__modelCache), len(lookupIDs))
if self.debug:
_emit(Verbosity.DEBUG,
"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s" % \
(len(self.__modelCache),))
return ModelInfoIterator(modelIDs) | [
"def",
"_iterModels",
"(",
"modelIDs",
")",
":",
"class",
"ModelInfoIterator",
"(",
"object",
")",
":",
"\"\"\"ModelInfo iterator implementation class\n \"\"\"",
"# Maximum number of ModelInfo elements to load into cache whenever",
"# cache empties",
"__CACHE_LIMIT",
"=",
"1000",
"debug",
"=",
"False",
"def",
"__init__",
"(",
"self",
",",
"modelIDs",
")",
":",
"\"\"\"\n Parameters:\n ----------------------------------------------------------------------\n modelIDs: a sequence of Nupic model identifiers for which this\n iterator will return _NupicModelInfo instances.\n NOTE: The returned instances are NOT guaranteed to be in\n the same order as the IDs in modelIDs sequence.\n retval: nothing\n \"\"\"",
"# Make our own copy in case caller changes model id list during iteration",
"self",
".",
"__modelIDs",
"=",
"tuple",
"(",
"modelIDs",
")",
"if",
"self",
".",
"debug",
":",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"MODELITERATOR: __init__; numModelIDs=%s\"",
"%",
"len",
"(",
"self",
".",
"__modelIDs",
")",
")",
"self",
".",
"__nextIndex",
"=",
"0",
"self",
".",
"__modelCache",
"=",
"collections",
".",
"deque",
"(",
")",
"return",
"def",
"__iter__",
"(",
"self",
")",
":",
"\"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: self\n \"\"\"",
"return",
"self",
"def",
"next",
"(",
"self",
")",
":",
"\"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"",
"return",
"self",
".",
"__getNext",
"(",
")",
"def",
"__getNext",
"(",
"self",
")",
":",
"\"\"\"Implementation of the next() Iterator Protocol function.\n\n When the modelInfo cache becomes empty, queries Nupic and fills the cache\n with the next set of NupicModelInfo instances.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"",
"if",
"self",
".",
"debug",
":",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"MODELITERATOR: __getNext(); modelCacheLen=%s\"",
"%",
"(",
"len",
"(",
"self",
".",
"__modelCache",
")",
")",
")",
"if",
"not",
"self",
".",
"__modelCache",
":",
"self",
".",
"__fillCache",
"(",
")",
"if",
"not",
"self",
".",
"__modelCache",
":",
"raise",
"StopIteration",
"(",
")",
"return",
"self",
".",
"__modelCache",
".",
"popleft",
"(",
")",
"def",
"__fillCache",
"(",
"self",
")",
":",
"\"\"\"Queries Nupic and fills an empty modelInfo cache with the next set of\n _NupicModelInfo instances\n\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing\n \"\"\"",
"assert",
"(",
"not",
"self",
".",
"__modelCache",
")",
"# Assemble a list of model IDs to look up",
"numModelIDs",
"=",
"len",
"(",
"self",
".",
"__modelIDs",
")",
"if",
"self",
".",
"__modelIDs",
"else",
"0",
"if",
"self",
".",
"__nextIndex",
">=",
"numModelIDs",
":",
"return",
"idRange",
"=",
"self",
".",
"__nextIndex",
"+",
"self",
".",
"__CACHE_LIMIT",
"if",
"idRange",
">",
"numModelIDs",
":",
"idRange",
"=",
"numModelIDs",
"lookupIDs",
"=",
"self",
".",
"__modelIDs",
"[",
"self",
".",
"__nextIndex",
":",
"idRange",
"]",
"self",
".",
"__nextIndex",
"+=",
"(",
"idRange",
"-",
"self",
".",
"__nextIndex",
")",
"# Query Nupic for model info of all models in the look-up list",
"# NOTE: the order of results may not be the same as lookupIDs",
"infoList",
"=",
"_clientJobsDB",
"(",
")",
".",
"modelsInfo",
"(",
"lookupIDs",
")",
"assert",
"len",
"(",
"infoList",
")",
"==",
"len",
"(",
"lookupIDs",
")",
",",
"\"modelsInfo returned %s elements; expected %s.\"",
"%",
"(",
"len",
"(",
"infoList",
")",
",",
"len",
"(",
"lookupIDs",
")",
")",
"# Create _NupicModelInfo instances and add them to cache",
"for",
"rawInfo",
"in",
"infoList",
":",
"modelInfo",
"=",
"_NupicModelInfo",
"(",
"rawInfo",
"=",
"rawInfo",
")",
"self",
".",
"__modelCache",
".",
"append",
"(",
"modelInfo",
")",
"assert",
"len",
"(",
"self",
".",
"__modelCache",
")",
"==",
"len",
"(",
"lookupIDs",
")",
",",
"\"Added %s elements to modelCache; expected %s.\"",
"%",
"(",
"len",
"(",
"self",
".",
"__modelCache",
")",
",",
"len",
"(",
"lookupIDs",
")",
")",
"if",
"self",
".",
"debug",
":",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\"",
"%",
"(",
"len",
"(",
"self",
".",
"__modelCache",
")",
",",
")",
")",
"return",
"ModelInfoIterator",
"(",
"modelIDs",
")"
] | Creates an iterator that returns ModelInfo elements for the given modelIDs
WARNING: The order of ModelInfo elements returned by the iterator
may not match the order of the given modelIDs
Parameters:
----------------------------------------------------------------------
modelIDs: A sequence of model identifiers (e.g., as returned by
_HyperSearchJob.queryModelIDs()).
retval: Iterator that returns ModelInfo elements for the given
modelIDs (NOTE:possibly in a different order) | [
"Creates",
"an",
"iterator",
"that",
"returns",
"ModelInfo",
"elements",
"for",
"the",
"given",
"modelIDs"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1898-L2041 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.pickupSearch | def pickupSearch(self):
"""Pick up the latest search from a saved jobID and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
self.__searchJob = self.loadSavedHyperSearchJob(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"])
self.monitorSearchJob() | python | def pickupSearch(self):
"""Pick up the latest search from a saved jobID and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
self.__searchJob = self.loadSavedHyperSearchJob(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"])
self.monitorSearchJob() | [
"def",
"pickupSearch",
"(",
"self",
")",
":",
"self",
".",
"__searchJob",
"=",
"self",
".",
"loadSavedHyperSearchJob",
"(",
"permWorkDir",
"=",
"self",
".",
"_options",
"[",
"\"permWorkDir\"",
"]",
",",
"outputLabel",
"=",
"self",
".",
"_options",
"[",
"\"outputLabel\"",
"]",
")",
"self",
".",
"monitorSearchJob",
"(",
")"
] | Pick up the latest search from a saved jobID and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing | [
"Pick",
"up",
"the",
"latest",
"search",
"from",
"a",
"saved",
"jobID",
"and",
"monitor",
"it",
"to",
"completion",
"Parameters",
":",
"----------------------------------------------------------------------",
"retval",
":",
"nothing"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L442-L453 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.monitorSearchJob | def monitorSearchJob(self):
"""
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert self.__searchJob is not None
jobID = self.__searchJob.getJobID()
startTime = time.time()
lastUpdateTime = datetime.now()
# Monitor HyperSearch and report progress
# NOTE: may be -1 if it can't be determined
expectedNumModels = self.__searchJob.getExpectedNumModels(
searchMethod = self._options["searchMethod"])
lastNumFinished = 0
finishedModelIDs = set()
finishedModelStats = _ModelStats()
# Keep track of the worker state, results, and milestones from the job
# record
lastWorkerState = None
lastJobResults = None
lastModelMilestones = None
lastEngStatus = None
hyperSearchFinished = False
while not hyperSearchFinished:
jobInfo = self.__searchJob.getJobStatus(self._workers)
# Check for job completion BEFORE processing models; NOTE: this permits us
# to process any models that we may not have accounted for in the
# previous iteration.
hyperSearchFinished = jobInfo.isFinished()
# Look for newly completed models, and process them
modelIDs = self.__searchJob.queryModelIDs()
_emit(Verbosity.DEBUG,
"Current number of models is %d (%d of them completed)" % (
len(modelIDs), len(finishedModelIDs)))
if len(modelIDs) > 0:
# Build a list of modelIDs to check for completion
checkModelIDs = []
for modelID in modelIDs:
if modelID not in finishedModelIDs:
checkModelIDs.append(modelID)
del modelIDs
# Process newly completed models
if checkModelIDs:
_emit(Verbosity.DEBUG,
"Checking %d models..." % (len(checkModelIDs)))
errorCompletionMsg = None
for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)):
_emit(Verbosity.DEBUG,
"[%s] Checking completion: %s" % (i, modelInfo))
if modelInfo.isFinished():
finishedModelIDs.add(modelInfo.getModelID())
finishedModelStats.update(modelInfo)
if (modelInfo.getCompletionReason().isError() and
not errorCompletionMsg):
errorCompletionMsg = modelInfo.getCompletionMsg()
# Update the set of all encountered metrics keys (we will use
# these to print column names in reports.csv)
metrics = modelInfo.getReportMetrics()
self.__foundMetrcsKeySet.update(metrics.keys())
numFinished = len(finishedModelIDs)
# Print current completion stats
if numFinished != lastNumFinished:
lastNumFinished = numFinished
if expectedNumModels is None:
expModelsStr = ""
else:
expModelsStr = "of %s" % (expectedNumModels)
stats = finishedModelStats
print ("<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: "
"%s; %s: %s; %s: %s; %s: %s; %s: %s]" % (
jobID,
numFinished,
expModelsStr,
#stats.numCompletedSuccess,
(stats.numCompletedEOF+stats.numCompletedStopped),
"EOF" if stats.numCompletedEOF else "eof",
stats.numCompletedEOF,
"STOPPED" if stats.numCompletedStopped else "stopped",
stats.numCompletedStopped,
"KILLED" if stats.numCompletedKilled else "killed",
stats.numCompletedKilled,
"ERROR" if stats.numCompletedError else "error",
stats.numCompletedError,
"ORPHANED" if stats.numCompletedError else "orphaned",
stats.numCompletedOrphaned,
"UNKNOWN" if stats.numCompletedOther else "unknown",
stats.numCompletedOther))
# Print the first error message from the latest batch of completed
# models
if errorCompletionMsg:
print "ERROR MESSAGE: %s" % errorCompletionMsg
# Print the new worker state, if it changed
workerState = jobInfo.getWorkerState()
if workerState != lastWorkerState:
print "##>> UPDATED WORKER STATE: \n%s" % (pprint.pformat(workerState,
indent=4))
lastWorkerState = workerState
# Print the new job results, if it changed
jobResults = jobInfo.getResults()
if jobResults != lastJobResults:
print "####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)" \
% (pprint.pformat(jobResults, indent=4), time.time()-startTime)
lastJobResults = jobResults
# Print the new model milestones if they changed
modelMilestones = jobInfo.getModelMilestones()
if modelMilestones != lastModelMilestones:
print "##>> UPDATED MODEL MILESTONES: \n%s" % (
pprint.pformat(modelMilestones, indent=4))
lastModelMilestones = modelMilestones
# Print the new engine status if it changed
engStatus = jobInfo.getEngStatus()
if engStatus != lastEngStatus:
print "##>> UPDATED STATUS: \n%s" % (engStatus)
lastEngStatus = engStatus
# Sleep before next check
if not hyperSearchFinished:
if self._options["timeout"] != None:
if ((datetime.now() - lastUpdateTime) >
timedelta(minutes=self._options["timeout"])):
print "Timeout reached, exiting"
self.__cjDAO.jobCancel(jobID)
sys.exit(1)
time.sleep(1)
# Tabulate results
modelIDs = self.__searchJob.queryModelIDs()
print "Evaluated %s models" % len(modelIDs)
print "HyperSearch finished!"
jobInfo = self.__searchJob.getJobStatus(self._workers)
print "Worker completion message: %s" % (jobInfo.getWorkerCompletionMsg()) | python | def monitorSearchJob(self):
"""
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
assert self.__searchJob is not None
jobID = self.__searchJob.getJobID()
startTime = time.time()
lastUpdateTime = datetime.now()
# Monitor HyperSearch and report progress
# NOTE: may be -1 if it can't be determined
expectedNumModels = self.__searchJob.getExpectedNumModels(
searchMethod = self._options["searchMethod"])
lastNumFinished = 0
finishedModelIDs = set()
finishedModelStats = _ModelStats()
# Keep track of the worker state, results, and milestones from the job
# record
lastWorkerState = None
lastJobResults = None
lastModelMilestones = None
lastEngStatus = None
hyperSearchFinished = False
while not hyperSearchFinished:
jobInfo = self.__searchJob.getJobStatus(self._workers)
# Check for job completion BEFORE processing models; NOTE: this permits us
# to process any models that we may not have accounted for in the
# previous iteration.
hyperSearchFinished = jobInfo.isFinished()
# Look for newly completed models, and process them
modelIDs = self.__searchJob.queryModelIDs()
_emit(Verbosity.DEBUG,
"Current number of models is %d (%d of them completed)" % (
len(modelIDs), len(finishedModelIDs)))
if len(modelIDs) > 0:
# Build a list of modelIDs to check for completion
checkModelIDs = []
for modelID in modelIDs:
if modelID not in finishedModelIDs:
checkModelIDs.append(modelID)
del modelIDs
# Process newly completed models
if checkModelIDs:
_emit(Verbosity.DEBUG,
"Checking %d models..." % (len(checkModelIDs)))
errorCompletionMsg = None
for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)):
_emit(Verbosity.DEBUG,
"[%s] Checking completion: %s" % (i, modelInfo))
if modelInfo.isFinished():
finishedModelIDs.add(modelInfo.getModelID())
finishedModelStats.update(modelInfo)
if (modelInfo.getCompletionReason().isError() and
not errorCompletionMsg):
errorCompletionMsg = modelInfo.getCompletionMsg()
# Update the set of all encountered metrics keys (we will use
# these to print column names in reports.csv)
metrics = modelInfo.getReportMetrics()
self.__foundMetrcsKeySet.update(metrics.keys())
numFinished = len(finishedModelIDs)
# Print current completion stats
if numFinished != lastNumFinished:
lastNumFinished = numFinished
if expectedNumModels is None:
expModelsStr = ""
else:
expModelsStr = "of %s" % (expectedNumModels)
stats = finishedModelStats
print ("<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: "
"%s; %s: %s; %s: %s; %s: %s; %s: %s]" % (
jobID,
numFinished,
expModelsStr,
#stats.numCompletedSuccess,
(stats.numCompletedEOF+stats.numCompletedStopped),
"EOF" if stats.numCompletedEOF else "eof",
stats.numCompletedEOF,
"STOPPED" if stats.numCompletedStopped else "stopped",
stats.numCompletedStopped,
"KILLED" if stats.numCompletedKilled else "killed",
stats.numCompletedKilled,
"ERROR" if stats.numCompletedError else "error",
stats.numCompletedError,
"ORPHANED" if stats.numCompletedError else "orphaned",
stats.numCompletedOrphaned,
"UNKNOWN" if stats.numCompletedOther else "unknown",
stats.numCompletedOther))
# Print the first error message from the latest batch of completed
# models
if errorCompletionMsg:
print "ERROR MESSAGE: %s" % errorCompletionMsg
# Print the new worker state, if it changed
workerState = jobInfo.getWorkerState()
if workerState != lastWorkerState:
print "##>> UPDATED WORKER STATE: \n%s" % (pprint.pformat(workerState,
indent=4))
lastWorkerState = workerState
# Print the new job results, if it changed
jobResults = jobInfo.getResults()
if jobResults != lastJobResults:
print "####>> UPDATED JOB RESULTS: \n%s (elapsed time: %g secs)" \
% (pprint.pformat(jobResults, indent=4), time.time()-startTime)
lastJobResults = jobResults
# Print the new model milestones if they changed
modelMilestones = jobInfo.getModelMilestones()
if modelMilestones != lastModelMilestones:
print "##>> UPDATED MODEL MILESTONES: \n%s" % (
pprint.pformat(modelMilestones, indent=4))
lastModelMilestones = modelMilestones
# Print the new engine status if it changed
engStatus = jobInfo.getEngStatus()
if engStatus != lastEngStatus:
print "##>> UPDATED STATUS: \n%s" % (engStatus)
lastEngStatus = engStatus
# Sleep before next check
if not hyperSearchFinished:
if self._options["timeout"] != None:
if ((datetime.now() - lastUpdateTime) >
timedelta(minutes=self._options["timeout"])):
print "Timeout reached, exiting"
self.__cjDAO.jobCancel(jobID)
sys.exit(1)
time.sleep(1)
# Tabulate results
modelIDs = self.__searchJob.queryModelIDs()
print "Evaluated %s models" % len(modelIDs)
print "HyperSearch finished!"
jobInfo = self.__searchJob.getJobStatus(self._workers)
print "Worker completion message: %s" % (jobInfo.getWorkerCompletionMsg()) | [
"def",
"monitorSearchJob",
"(",
"self",
")",
":",
"assert",
"self",
".",
"__searchJob",
"is",
"not",
"None",
"jobID",
"=",
"self",
".",
"__searchJob",
".",
"getJobID",
"(",
")",
"startTime",
"=",
"time",
".",
"time",
"(",
")",
"lastUpdateTime",
"=",
"datetime",
".",
"now",
"(",
")",
"# Monitor HyperSearch and report progress",
"# NOTE: may be -1 if it can't be determined",
"expectedNumModels",
"=",
"self",
".",
"__searchJob",
".",
"getExpectedNumModels",
"(",
"searchMethod",
"=",
"self",
".",
"_options",
"[",
"\"searchMethod\"",
"]",
")",
"lastNumFinished",
"=",
"0",
"finishedModelIDs",
"=",
"set",
"(",
")",
"finishedModelStats",
"=",
"_ModelStats",
"(",
")",
"# Keep track of the worker state, results, and milestones from the job",
"# record",
"lastWorkerState",
"=",
"None",
"lastJobResults",
"=",
"None",
"lastModelMilestones",
"=",
"None",
"lastEngStatus",
"=",
"None",
"hyperSearchFinished",
"=",
"False",
"while",
"not",
"hyperSearchFinished",
":",
"jobInfo",
"=",
"self",
".",
"__searchJob",
".",
"getJobStatus",
"(",
"self",
".",
"_workers",
")",
"# Check for job completion BEFORE processing models; NOTE: this permits us",
"# to process any models that we may not have accounted for in the",
"# previous iteration.",
"hyperSearchFinished",
"=",
"jobInfo",
".",
"isFinished",
"(",
")",
"# Look for newly completed models, and process them",
"modelIDs",
"=",
"self",
".",
"__searchJob",
".",
"queryModelIDs",
"(",
")",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"Current number of models is %d (%d of them completed)\"",
"%",
"(",
"len",
"(",
"modelIDs",
")",
",",
"len",
"(",
"finishedModelIDs",
")",
")",
")",
"if",
"len",
"(",
"modelIDs",
")",
">",
"0",
":",
"# Build a list of modelIDs to check for completion",
"checkModelIDs",
"=",
"[",
"]",
"for",
"modelID",
"in",
"modelIDs",
":",
"if",
"modelID",
"not",
"in",
"finishedModelIDs",
":",
"checkModelIDs",
".",
"append",
"(",
"modelID",
")",
"del",
"modelIDs",
"# Process newly completed models",
"if",
"checkModelIDs",
":",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"Checking %d models...\"",
"%",
"(",
"len",
"(",
"checkModelIDs",
")",
")",
")",
"errorCompletionMsg",
"=",
"None",
"for",
"(",
"i",
",",
"modelInfo",
")",
"in",
"enumerate",
"(",
"_iterModels",
"(",
"checkModelIDs",
")",
")",
":",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"[%s] Checking completion: %s\"",
"%",
"(",
"i",
",",
"modelInfo",
")",
")",
"if",
"modelInfo",
".",
"isFinished",
"(",
")",
":",
"finishedModelIDs",
".",
"add",
"(",
"modelInfo",
".",
"getModelID",
"(",
")",
")",
"finishedModelStats",
".",
"update",
"(",
"modelInfo",
")",
"if",
"(",
"modelInfo",
".",
"getCompletionReason",
"(",
")",
".",
"isError",
"(",
")",
"and",
"not",
"errorCompletionMsg",
")",
":",
"errorCompletionMsg",
"=",
"modelInfo",
".",
"getCompletionMsg",
"(",
")",
"# Update the set of all encountered metrics keys (we will use",
"# these to print column names in reports.csv)",
"metrics",
"=",
"modelInfo",
".",
"getReportMetrics",
"(",
")",
"self",
".",
"__foundMetrcsKeySet",
".",
"update",
"(",
"metrics",
".",
"keys",
"(",
")",
")",
"numFinished",
"=",
"len",
"(",
"finishedModelIDs",
")",
"# Print current completion stats",
"if",
"numFinished",
"!=",
"lastNumFinished",
":",
"lastNumFinished",
"=",
"numFinished",
"if",
"expectedNumModels",
"is",
"None",
":",
"expModelsStr",
"=",
"\"\"",
"else",
":",
"expModelsStr",
"=",
"\"of %s\"",
"%",
"(",
"expectedNumModels",
")",
"stats",
"=",
"finishedModelStats",
"print",
"(",
"\"<jobID: %s> %s %s models finished [success: %s; %s: %s; %s: \"",
"\"%s; %s: %s; %s: %s; %s: %s; %s: %s]\"",
"%",
"(",
"jobID",
",",
"numFinished",
",",
"expModelsStr",
",",
"#stats.numCompletedSuccess,",
"(",
"stats",
".",
"numCompletedEOF",
"+",
"stats",
".",
"numCompletedStopped",
")",
",",
"\"EOF\"",
"if",
"stats",
".",
"numCompletedEOF",
"else",
"\"eof\"",
",",
"stats",
".",
"numCompletedEOF",
",",
"\"STOPPED\"",
"if",
"stats",
".",
"numCompletedStopped",
"else",
"\"stopped\"",
",",
"stats",
".",
"numCompletedStopped",
",",
"\"KILLED\"",
"if",
"stats",
".",
"numCompletedKilled",
"else",
"\"killed\"",
",",
"stats",
".",
"numCompletedKilled",
",",
"\"ERROR\"",
"if",
"stats",
".",
"numCompletedError",
"else",
"\"error\"",
",",
"stats",
".",
"numCompletedError",
",",
"\"ORPHANED\"",
"if",
"stats",
".",
"numCompletedError",
"else",
"\"orphaned\"",
",",
"stats",
".",
"numCompletedOrphaned",
",",
"\"UNKNOWN\"",
"if",
"stats",
".",
"numCompletedOther",
"else",
"\"unknown\"",
",",
"stats",
".",
"numCompletedOther",
")",
")",
"# Print the first error message from the latest batch of completed",
"# models",
"if",
"errorCompletionMsg",
":",
"print",
"\"ERROR MESSAGE: %s\"",
"%",
"errorCompletionMsg",
"# Print the new worker state, if it changed",
"workerState",
"=",
"jobInfo",
".",
"getWorkerState",
"(",
")",
"if",
"workerState",
"!=",
"lastWorkerState",
":",
"print",
"\"##>> UPDATED WORKER STATE: \\n%s\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"workerState",
",",
"indent",
"=",
"4",
")",
")",
"lastWorkerState",
"=",
"workerState",
"# Print the new job results, if it changed",
"jobResults",
"=",
"jobInfo",
".",
"getResults",
"(",
")",
"if",
"jobResults",
"!=",
"lastJobResults",
":",
"print",
"\"####>> UPDATED JOB RESULTS: \\n%s (elapsed time: %g secs)\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"jobResults",
",",
"indent",
"=",
"4",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"startTime",
")",
"lastJobResults",
"=",
"jobResults",
"# Print the new model milestones if they changed",
"modelMilestones",
"=",
"jobInfo",
".",
"getModelMilestones",
"(",
")",
"if",
"modelMilestones",
"!=",
"lastModelMilestones",
":",
"print",
"\"##>> UPDATED MODEL MILESTONES: \\n%s\"",
"%",
"(",
"pprint",
".",
"pformat",
"(",
"modelMilestones",
",",
"indent",
"=",
"4",
")",
")",
"lastModelMilestones",
"=",
"modelMilestones",
"# Print the new engine status if it changed",
"engStatus",
"=",
"jobInfo",
".",
"getEngStatus",
"(",
")",
"if",
"engStatus",
"!=",
"lastEngStatus",
":",
"print",
"\"##>> UPDATED STATUS: \\n%s\"",
"%",
"(",
"engStatus",
")",
"lastEngStatus",
"=",
"engStatus",
"# Sleep before next check",
"if",
"not",
"hyperSearchFinished",
":",
"if",
"self",
".",
"_options",
"[",
"\"timeout\"",
"]",
"!=",
"None",
":",
"if",
"(",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"lastUpdateTime",
")",
">",
"timedelta",
"(",
"minutes",
"=",
"self",
".",
"_options",
"[",
"\"timeout\"",
"]",
")",
")",
":",
"print",
"\"Timeout reached, exiting\"",
"self",
".",
"__cjDAO",
".",
"jobCancel",
"(",
"jobID",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"# Tabulate results",
"modelIDs",
"=",
"self",
".",
"__searchJob",
".",
"queryModelIDs",
"(",
")",
"print",
"\"Evaluated %s models\"",
"%",
"len",
"(",
"modelIDs",
")",
"print",
"\"HyperSearch finished!\"",
"jobInfo",
"=",
"self",
".",
"__searchJob",
".",
"getJobStatus",
"(",
"self",
".",
"_workers",
")",
"print",
"\"Worker completion message: %s\"",
"%",
"(",
"jobInfo",
".",
"getWorkerCompletionMsg",
"(",
")",
")"
] | Parameters:
----------------------------------------------------------------------
retval: nothing | [
"Parameters",
":",
"----------------------------------------------------------------------",
"retval",
":",
"nothing"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L457-L614 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner._launchWorkers | def _launchWorkers(self, cmdLine, numWorkers):
""" Launch worker processes to execute the given command line
Parameters:
-----------------------------------------------
cmdLine: The command line for each worker
numWorkers: number of workers to launch
"""
self._workers = []
for i in range(numWorkers):
stdout = tempfile.NamedTemporaryFile(delete=False)
stderr = tempfile.NamedTemporaryFile(delete=False)
p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True,
stdin=None, stdout=stdout, stderr=stderr)
p._stderr_file = stderr
p._stdout_file = stdout
self._workers.append(p) | python | def _launchWorkers(self, cmdLine, numWorkers):
""" Launch worker processes to execute the given command line
Parameters:
-----------------------------------------------
cmdLine: The command line for each worker
numWorkers: number of workers to launch
"""
self._workers = []
for i in range(numWorkers):
stdout = tempfile.NamedTemporaryFile(delete=False)
stderr = tempfile.NamedTemporaryFile(delete=False)
p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True,
stdin=None, stdout=stdout, stderr=stderr)
p._stderr_file = stderr
p._stdout_file = stdout
self._workers.append(p) | [
"def",
"_launchWorkers",
"(",
"self",
",",
"cmdLine",
",",
"numWorkers",
")",
":",
"self",
".",
"_workers",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numWorkers",
")",
":",
"stdout",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"stderr",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmdLine",
",",
"bufsize",
"=",
"1",
",",
"env",
"=",
"os",
".",
"environ",
",",
"shell",
"=",
"True",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
")",
"p",
".",
"_stderr_file",
"=",
"stderr",
"p",
".",
"_stdout_file",
"=",
"stdout",
"self",
".",
"_workers",
".",
"append",
"(",
"p",
")"
] | Launch worker processes to execute the given command line
Parameters:
-----------------------------------------------
cmdLine: The command line for each worker
numWorkers: number of workers to launch | [
"Launch",
"worker",
"processes",
"to",
"execute",
"the",
"given",
"command",
"line"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L618-L635 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.__startSearch | def __startSearch(self):
"""Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
"""
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob | python | def __startSearch(self):
"""Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
"""
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob | [
"def",
"__startSearch",
"(",
"self",
")",
":",
"# This search uses a pre-existing permutations script",
"params",
"=",
"_ClientJobUtils",
".",
"makeSearchJobParamsDict",
"(",
"options",
"=",
"self",
".",
"_options",
",",
"forRunning",
"=",
"True",
")",
"if",
"self",
".",
"_options",
"[",
"\"action\"",
"]",
"==",
"\"dryRun\"",
":",
"args",
"=",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
",",
"\"--params=%s\"",
"%",
"(",
"json",
".",
"dumps",
"(",
"params",
")",
")",
"]",
"print",
"print",
"\"==================================================================\"",
"print",
"\"RUNNING PERMUTATIONS INLINE as \\\"DRY RUN\\\"...\"",
"print",
"\"==================================================================\"",
"jobID",
"=",
"hypersearch_worker",
".",
"main",
"(",
"args",
")",
"else",
":",
"cmdLine",
"=",
"_setUpExports",
"(",
"self",
".",
"_options",
"[",
"\"exports\"",
"]",
")",
"# Begin the new search. The {JOBID} string is replaced by the actual",
"# jobID returned from jobInsert.",
"cmdLine",
"+=",
"\"$HYPERSEARCH\"",
"maxWorkers",
"=",
"self",
".",
"_options",
"[",
"\"maxWorkers\"",
"]",
"jobID",
"=",
"self",
".",
"__cjDAO",
".",
"jobInsert",
"(",
"client",
"=",
"\"GRP\"",
",",
"cmdLine",
"=",
"cmdLine",
",",
"params",
"=",
"json",
".",
"dumps",
"(",
"params",
")",
",",
"minimumWorkers",
"=",
"1",
",",
"maximumWorkers",
"=",
"maxWorkers",
",",
"jobType",
"=",
"self",
".",
"__cjDAO",
".",
"JOB_TYPE_HS",
")",
"cmdLine",
"=",
"\"python -m nupic.swarming.hypersearch_worker\"",
"\" --jobID=%d\"",
"%",
"(",
"jobID",
")",
"self",
".",
"_launchWorkers",
"(",
"cmdLine",
",",
"maxWorkers",
")",
"searchJob",
"=",
"_HyperSearchJob",
"(",
"jobID",
")",
"# Save search ID to file (this is used for report generation)",
"self",
".",
"__saveHyperSearchJobID",
"(",
"permWorkDir",
"=",
"self",
".",
"_options",
"[",
"\"permWorkDir\"",
"]",
",",
"outputLabel",
"=",
"self",
".",
"_options",
"[",
"\"outputLabel\"",
"]",
",",
"hyperSearchJob",
"=",
"searchJob",
")",
"if",
"self",
".",
"_options",
"[",
"\"action\"",
"]",
"==",
"\"dryRun\"",
":",
"print",
"\"Successfully executed \\\"dry-run\\\" hypersearch, jobID=%d\"",
"%",
"(",
"jobID",
")",
"else",
":",
"print",
"\"Successfully submitted new HyperSearch job, jobID=%d\"",
"%",
"(",
"jobID",
")",
"_emit",
"(",
"Verbosity",
".",
"DEBUG",
",",
"\"Each worker executing the command line: %s\"",
"%",
"(",
"cmdLine",
",",
")",
")",
"return",
"searchJob"
] | Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job | [
"Starts",
"HyperSearch",
"as",
"a",
"worker",
"or",
"runs",
"it",
"inline",
"for",
"the",
"dryRun",
"action"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L639-L694 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.generateReport | def generateReport(cls,
options,
replaceReport,
hyperSearchJob,
metricsKeys):
"""Prints all available results in the given HyperSearch job and emits
model information to the permutations report csv.
The job may be completed or still in progress.
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
replaceReport: True to replace existing report csv, if any; False to
append to existing report csv, if any
hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved
jobID, if any
metricsKeys: sequence of report metrics key names to include in report;
if None, will pre-scan all modelInfos to generate a complete
list of metrics key names.
retval: model parameters
"""
# Load _HyperSearchJob instance from storage, if not provided
if hyperSearchJob is None:
hyperSearchJob = cls.loadSavedHyperSearchJob(
permWorkDir=options["permWorkDir"],
outputLabel=options["outputLabel"])
modelIDs = hyperSearchJob.queryModelIDs()
bestModel = None
# If metricsKeys was not provided, pre-scan modelInfos to create the list;
# this is needed by _ReportCSVWriter
# Also scan the parameters to generate a list of encoders and search
# parameters
metricstmp = set()
searchVar = set()
for modelInfo in _iterModels(modelIDs):
if modelInfo.isFinished():
vars = modelInfo.getParamLabels().keys()
searchVar.update(vars)
metrics = modelInfo.getReportMetrics()
metricstmp.update(metrics.keys())
if metricsKeys is None:
metricsKeys = metricstmp
# Create a csv report writer
reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob,
metricsKeys=metricsKeys,
searchVar=searchVar,
outputDirAbsPath=options["permWorkDir"],
outputLabel=options["outputLabel"],
replaceReport=replaceReport)
# Tallies of experiment dispositions
modelStats = _ModelStats()
#numCompletedOther = long(0)
print "\nResults from all experiments:"
print "----------------------------------------------------------------"
# Get common optimization metric info from permutations script
searchParams = hyperSearchJob.getParams()
(optimizationMetricKey, maximizeMetric) = (
_PermutationUtils.getOptimizationMetricInfo(searchParams))
# Print metrics, while looking for the best model
formatStr = None
# NOTE: we may find additional metrics if HyperSearch is still running
foundMetricsKeySet = set(metricsKeys)
sortedMetricsKeys = []
# pull out best Model from jobs table
jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID())
# Try to return a decent error message if the job was cancelled for some
# reason.
if jobInfo.cancel == 1:
raise Exception(jobInfo.workerCompletionMsg)
try:
results = json.loads(jobInfo.results)
except Exception, e:
print "json.loads(jobInfo.results) raised an exception. " \
"Here is some info to help with debugging:"
print "jobInfo: ", jobInfo
print "jobInfo.results: ", jobInfo.results
print "EXCEPTION: ", e
raise
bestModelNum = results["bestModel"]
bestModelIterIndex = None
# performance metrics for the entire job
totalWallTime = 0
totalRecords = 0
# At the end, we will sort the models by their score on the optimization
# metric
scoreModelIDDescList = []
for (i, modelInfo) in enumerate(_iterModels(modelIDs)):
# Output model info to report csv
reportWriter.emit(modelInfo)
# Update job metrics
totalRecords+=modelInfo.getNumRecords()
format = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
if modelInfo.isFinished():
endTime = modelInfo.getEndTime()
st = datetime.strptime(startTime, format)
et = datetime.strptime(endTime, format)
totalWallTime+=(et-st).seconds
# Tabulate experiment dispositions
modelStats.update(modelInfo)
# For convenience
expDesc = modelInfo.getModelDescription()
reportMetrics = modelInfo.getReportMetrics()
optimizationMetrics = modelInfo.getOptimizationMetrics()
if modelInfo.getModelID() == bestModelNum:
bestModel = modelInfo
bestModelIterIndex=i
bestMetric = optimizationMetrics.values()[0]
# Keep track of the best-performing model
if optimizationMetrics:
assert len(optimizationMetrics) == 1, (
"expected 1 opt key, but got %d (%s) in %s" % (
len(optimizationMetrics), optimizationMetrics, modelInfo))
# Append to our list of modelIDs and scores
if modelInfo.getCompletionReason().isEOF():
scoreModelIDDescList.append((optimizationMetrics.values()[0],
modelInfo.getModelID(),
modelInfo.getGeneratedDescriptionFile(),
modelInfo.getParamLabels()))
print "[%d] Experiment %s\n(%s):" % (i, modelInfo, expDesc)
if (modelInfo.isFinished() and
not (modelInfo.getCompletionReason().isStopped or
modelInfo.getCompletionReason().isEOF())):
print ">> COMPLETION MESSAGE: %s" % modelInfo.getCompletionMsg()
if reportMetrics:
# Update our metrics key set and format string
foundMetricsKeySet.update(reportMetrics.iterkeys())
if len(sortedMetricsKeys) != len(foundMetricsKeySet):
sortedMetricsKeys = sorted(foundMetricsKeySet)
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
formatStr = " %%-%ds" % (maxKeyLen+2)
# Print metrics
for key in sortedMetricsKeys:
if key in reportMetrics:
if key == optimizationMetricKey:
m = "%r (*)" % reportMetrics[key]
else:
m = "%r" % reportMetrics[key]
print formatStr % (key+":"), m
print
# Summarize results
print "--------------------------------------------------------------"
if len(modelIDs) > 0:
print "%d experiments total (%s).\n" % (
len(modelIDs),
("all completed successfully"
if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) ==
len(modelIDs)
else "WARNING: %d models have not completed or there were errors" % (
len(modelIDs) - (
modelStats.numCompletedKilled + modelStats.numCompletedEOF +
modelStats.numCompletedStopped))))
if modelStats.numStatusOther > 0:
print "ERROR: models with unexpected status: %d" % (
modelStats.numStatusOther)
print "WaitingToStart: %d" % modelStats.numStatusWaitingToStart
print "Running: %d" % modelStats.numStatusRunning
print "Completed: %d" % modelStats.numStatusCompleted
if modelStats.numCompletedOther > 0:
print " ERROR: models with unexpected completion reason: %d" % (
modelStats.numCompletedOther)
print " ran to EOF: %d" % modelStats.numCompletedEOF
print " ran to stop signal: %d" % modelStats.numCompletedStopped
print " were orphaned: %d" % modelStats.numCompletedOrphaned
print " killed off: %d" % modelStats.numCompletedKilled
print " failed: %d" % modelStats.numCompletedError
assert modelStats.numStatusOther == 0, "numStatusOther=%s" % (
modelStats.numStatusOther)
assert modelStats.numCompletedOther == 0, "numCompletedOther=%s" % (
modelStats.numCompletedOther)
else:
print "0 experiments total."
# Print out the field contributions
print
global gCurrentSearch
jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers)
jobResults = jobStatus.getResults()
if "fieldContributions" in jobResults:
print "Field Contributions:"
pprint.pprint(jobResults["fieldContributions"], indent=4)
else:
print "Field contributions info not available"
# Did we have an optimize key?
if bestModel is not None:
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
maxKeyLen = max(maxKeyLen, len(optimizationMetricKey))
formatStr = " %%-%ds" % (maxKeyLen+2)
bestMetricValue = bestModel.getOptimizationMetrics().values()[0]
optimizationMetricName = bestModel.getOptimizationMetrics().keys()[0]
print
print "Best results on the optimization metric %s (maximize=%s):" % (
optimizationMetricName, maximizeMetric)
print "[%d] Experiment %s (%s):" % (
bestModelIterIndex, bestModel, bestModel.getModelDescription())
print formatStr % (optimizationMetricName+":"), bestMetricValue
print
print "Total number of Records processed: %d" % totalRecords
print
print "Total wall time for all models: %d" % totalWallTime
hsJobParams = hyperSearchJob.getParams()
# Were we asked to write out the top N model description files?
if options["genTopNDescriptions"] > 0:
print "\nGenerating description files for top %d models..." % (
options["genTopNDescriptions"])
scoreModelIDDescList.sort()
scoreModelIDDescList = scoreModelIDDescList[
0:options["genTopNDescriptions"]]
i = -1
for (score, modelID, description, paramLabels) in scoreModelIDDescList:
i += 1
outDir = os.path.join(options["permWorkDir"], "model_%d" % (i))
print "Generating description file for model %s at %s" % \
(modelID, outDir)
if not os.path.exists(outDir):
os.makedirs(outDir)
# Fix up the location to the base description file.
# importBaseDescription() chooses the file relative to the calling file.
# The calling file is in outDir.
# The base description is in the user-specified "outDir"
base_description_path = os.path.join(options["outDir"],
"description.py")
base_description_relpath = os.path.relpath(base_description_path,
start=outDir)
description = description.replace(
"importBaseDescription('base.py', config)",
"importBaseDescription('%s', config)" % base_description_relpath)
fd = open(os.path.join(outDir, "description.py"), "wb")
fd.write(description)
fd.close()
# Generate a csv file with the parameter settings in it
fd = open(os.path.join(outDir, "params.csv"), "wb")
writer = csv.writer(fd)
colNames = paramLabels.keys()
colNames.sort()
writer.writerow(colNames)
row = [paramLabels[x] for x in colNames]
writer.writerow(row)
fd.close()
print "Generating model params file..."
# Generate a model params file alongside the description.py
mod = imp.load_source("description", os.path.join(outDir,
"description.py"))
model_description = mod.descriptionInterface.getModelDescription()
fd = open(os.path.join(outDir, "model_params.py"), "wb")
fd.write("%s\nMODEL_PARAMS = %s" % (getCopyrightHead(),
pprint.pformat(model_description)))
fd.close()
print
reportWriter.finalize()
return model_description | python | def generateReport(cls,
options,
replaceReport,
hyperSearchJob,
metricsKeys):
"""Prints all available results in the given HyperSearch job and emits
model information to the permutations report csv.
The job may be completed or still in progress.
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
replaceReport: True to replace existing report csv, if any; False to
append to existing report csv, if any
hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved
jobID, if any
metricsKeys: sequence of report metrics key names to include in report;
if None, will pre-scan all modelInfos to generate a complete
list of metrics key names.
retval: model parameters
"""
# Load _HyperSearchJob instance from storage, if not provided
if hyperSearchJob is None:
hyperSearchJob = cls.loadSavedHyperSearchJob(
permWorkDir=options["permWorkDir"],
outputLabel=options["outputLabel"])
modelIDs = hyperSearchJob.queryModelIDs()
bestModel = None
# If metricsKeys was not provided, pre-scan modelInfos to create the list;
# this is needed by _ReportCSVWriter
# Also scan the parameters to generate a list of encoders and search
# parameters
metricstmp = set()
searchVar = set()
for modelInfo in _iterModels(modelIDs):
if modelInfo.isFinished():
vars = modelInfo.getParamLabels().keys()
searchVar.update(vars)
metrics = modelInfo.getReportMetrics()
metricstmp.update(metrics.keys())
if metricsKeys is None:
metricsKeys = metricstmp
# Create a csv report writer
reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob,
metricsKeys=metricsKeys,
searchVar=searchVar,
outputDirAbsPath=options["permWorkDir"],
outputLabel=options["outputLabel"],
replaceReport=replaceReport)
# Tallies of experiment dispositions
modelStats = _ModelStats()
#numCompletedOther = long(0)
print "\nResults from all experiments:"
print "----------------------------------------------------------------"
# Get common optimization metric info from permutations script
searchParams = hyperSearchJob.getParams()
(optimizationMetricKey, maximizeMetric) = (
_PermutationUtils.getOptimizationMetricInfo(searchParams))
# Print metrics, while looking for the best model
formatStr = None
# NOTE: we may find additional metrics if HyperSearch is still running
foundMetricsKeySet = set(metricsKeys)
sortedMetricsKeys = []
# pull out best Model from jobs table
jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID())
# Try to return a decent error message if the job was cancelled for some
# reason.
if jobInfo.cancel == 1:
raise Exception(jobInfo.workerCompletionMsg)
try:
results = json.loads(jobInfo.results)
except Exception, e:
print "json.loads(jobInfo.results) raised an exception. " \
"Here is some info to help with debugging:"
print "jobInfo: ", jobInfo
print "jobInfo.results: ", jobInfo.results
print "EXCEPTION: ", e
raise
bestModelNum = results["bestModel"]
bestModelIterIndex = None
# performance metrics for the entire job
totalWallTime = 0
totalRecords = 0
# At the end, we will sort the models by their score on the optimization
# metric
scoreModelIDDescList = []
for (i, modelInfo) in enumerate(_iterModels(modelIDs)):
# Output model info to report csv
reportWriter.emit(modelInfo)
# Update job metrics
totalRecords+=modelInfo.getNumRecords()
format = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
if modelInfo.isFinished():
endTime = modelInfo.getEndTime()
st = datetime.strptime(startTime, format)
et = datetime.strptime(endTime, format)
totalWallTime+=(et-st).seconds
# Tabulate experiment dispositions
modelStats.update(modelInfo)
# For convenience
expDesc = modelInfo.getModelDescription()
reportMetrics = modelInfo.getReportMetrics()
optimizationMetrics = modelInfo.getOptimizationMetrics()
if modelInfo.getModelID() == bestModelNum:
bestModel = modelInfo
bestModelIterIndex=i
bestMetric = optimizationMetrics.values()[0]
# Keep track of the best-performing model
if optimizationMetrics:
assert len(optimizationMetrics) == 1, (
"expected 1 opt key, but got %d (%s) in %s" % (
len(optimizationMetrics), optimizationMetrics, modelInfo))
# Append to our list of modelIDs and scores
if modelInfo.getCompletionReason().isEOF():
scoreModelIDDescList.append((optimizationMetrics.values()[0],
modelInfo.getModelID(),
modelInfo.getGeneratedDescriptionFile(),
modelInfo.getParamLabels()))
print "[%d] Experiment %s\n(%s):" % (i, modelInfo, expDesc)
if (modelInfo.isFinished() and
not (modelInfo.getCompletionReason().isStopped or
modelInfo.getCompletionReason().isEOF())):
print ">> COMPLETION MESSAGE: %s" % modelInfo.getCompletionMsg()
if reportMetrics:
# Update our metrics key set and format string
foundMetricsKeySet.update(reportMetrics.iterkeys())
if len(sortedMetricsKeys) != len(foundMetricsKeySet):
sortedMetricsKeys = sorted(foundMetricsKeySet)
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
formatStr = " %%-%ds" % (maxKeyLen+2)
# Print metrics
for key in sortedMetricsKeys:
if key in reportMetrics:
if key == optimizationMetricKey:
m = "%r (*)" % reportMetrics[key]
else:
m = "%r" % reportMetrics[key]
print formatStr % (key+":"), m
print
# Summarize results
print "--------------------------------------------------------------"
if len(modelIDs) > 0:
print "%d experiments total (%s).\n" % (
len(modelIDs),
("all completed successfully"
if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) ==
len(modelIDs)
else "WARNING: %d models have not completed or there were errors" % (
len(modelIDs) - (
modelStats.numCompletedKilled + modelStats.numCompletedEOF +
modelStats.numCompletedStopped))))
if modelStats.numStatusOther > 0:
print "ERROR: models with unexpected status: %d" % (
modelStats.numStatusOther)
print "WaitingToStart: %d" % modelStats.numStatusWaitingToStart
print "Running: %d" % modelStats.numStatusRunning
print "Completed: %d" % modelStats.numStatusCompleted
if modelStats.numCompletedOther > 0:
print " ERROR: models with unexpected completion reason: %d" % (
modelStats.numCompletedOther)
print " ran to EOF: %d" % modelStats.numCompletedEOF
print " ran to stop signal: %d" % modelStats.numCompletedStopped
print " were orphaned: %d" % modelStats.numCompletedOrphaned
print " killed off: %d" % modelStats.numCompletedKilled
print " failed: %d" % modelStats.numCompletedError
assert modelStats.numStatusOther == 0, "numStatusOther=%s" % (
modelStats.numStatusOther)
assert modelStats.numCompletedOther == 0, "numCompletedOther=%s" % (
modelStats.numCompletedOther)
else:
print "0 experiments total."
# Print out the field contributions
print
global gCurrentSearch
jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers)
jobResults = jobStatus.getResults()
if "fieldContributions" in jobResults:
print "Field Contributions:"
pprint.pprint(jobResults["fieldContributions"], indent=4)
else:
print "Field contributions info not available"
# Did we have an optimize key?
if bestModel is not None:
maxKeyLen = max([len(k) for k in sortedMetricsKeys])
maxKeyLen = max(maxKeyLen, len(optimizationMetricKey))
formatStr = " %%-%ds" % (maxKeyLen+2)
bestMetricValue = bestModel.getOptimizationMetrics().values()[0]
optimizationMetricName = bestModel.getOptimizationMetrics().keys()[0]
print
print "Best results on the optimization metric %s (maximize=%s):" % (
optimizationMetricName, maximizeMetric)
print "[%d] Experiment %s (%s):" % (
bestModelIterIndex, bestModel, bestModel.getModelDescription())
print formatStr % (optimizationMetricName+":"), bestMetricValue
print
print "Total number of Records processed: %d" % totalRecords
print
print "Total wall time for all models: %d" % totalWallTime
hsJobParams = hyperSearchJob.getParams()
# Were we asked to write out the top N model description files?
if options["genTopNDescriptions"] > 0:
print "\nGenerating description files for top %d models..." % (
options["genTopNDescriptions"])
scoreModelIDDescList.sort()
scoreModelIDDescList = scoreModelIDDescList[
0:options["genTopNDescriptions"]]
i = -1
for (score, modelID, description, paramLabels) in scoreModelIDDescList:
i += 1
outDir = os.path.join(options["permWorkDir"], "model_%d" % (i))
print "Generating description file for model %s at %s" % \
(modelID, outDir)
if not os.path.exists(outDir):
os.makedirs(outDir)
# Fix up the location to the base description file.
# importBaseDescription() chooses the file relative to the calling file.
# The calling file is in outDir.
# The base description is in the user-specified "outDir"
base_description_path = os.path.join(options["outDir"],
"description.py")
base_description_relpath = os.path.relpath(base_description_path,
start=outDir)
description = description.replace(
"importBaseDescription('base.py', config)",
"importBaseDescription('%s', config)" % base_description_relpath)
fd = open(os.path.join(outDir, "description.py"), "wb")
fd.write(description)
fd.close()
# Generate a csv file with the parameter settings in it
fd = open(os.path.join(outDir, "params.csv"), "wb")
writer = csv.writer(fd)
colNames = paramLabels.keys()
colNames.sort()
writer.writerow(colNames)
row = [paramLabels[x] for x in colNames]
writer.writerow(row)
fd.close()
print "Generating model params file..."
# Generate a model params file alongside the description.py
mod = imp.load_source("description", os.path.join(outDir,
"description.py"))
model_description = mod.descriptionInterface.getModelDescription()
fd = open(os.path.join(outDir, "model_params.py"), "wb")
fd.write("%s\nMODEL_PARAMS = %s" % (getCopyrightHead(),
pprint.pformat(model_description)))
fd.close()
print
reportWriter.finalize()
return model_description | [
"def",
"generateReport",
"(",
"cls",
",",
"options",
",",
"replaceReport",
",",
"hyperSearchJob",
",",
"metricsKeys",
")",
":",
"# Load _HyperSearchJob instance from storage, if not provided",
"if",
"hyperSearchJob",
"is",
"None",
":",
"hyperSearchJob",
"=",
"cls",
".",
"loadSavedHyperSearchJob",
"(",
"permWorkDir",
"=",
"options",
"[",
"\"permWorkDir\"",
"]",
",",
"outputLabel",
"=",
"options",
"[",
"\"outputLabel\"",
"]",
")",
"modelIDs",
"=",
"hyperSearchJob",
".",
"queryModelIDs",
"(",
")",
"bestModel",
"=",
"None",
"# If metricsKeys was not provided, pre-scan modelInfos to create the list;",
"# this is needed by _ReportCSVWriter",
"# Also scan the parameters to generate a list of encoders and search",
"# parameters",
"metricstmp",
"=",
"set",
"(",
")",
"searchVar",
"=",
"set",
"(",
")",
"for",
"modelInfo",
"in",
"_iterModels",
"(",
"modelIDs",
")",
":",
"if",
"modelInfo",
".",
"isFinished",
"(",
")",
":",
"vars",
"=",
"modelInfo",
".",
"getParamLabels",
"(",
")",
".",
"keys",
"(",
")",
"searchVar",
".",
"update",
"(",
"vars",
")",
"metrics",
"=",
"modelInfo",
".",
"getReportMetrics",
"(",
")",
"metricstmp",
".",
"update",
"(",
"metrics",
".",
"keys",
"(",
")",
")",
"if",
"metricsKeys",
"is",
"None",
":",
"metricsKeys",
"=",
"metricstmp",
"# Create a csv report writer",
"reportWriter",
"=",
"_ReportCSVWriter",
"(",
"hyperSearchJob",
"=",
"hyperSearchJob",
",",
"metricsKeys",
"=",
"metricsKeys",
",",
"searchVar",
"=",
"searchVar",
",",
"outputDirAbsPath",
"=",
"options",
"[",
"\"permWorkDir\"",
"]",
",",
"outputLabel",
"=",
"options",
"[",
"\"outputLabel\"",
"]",
",",
"replaceReport",
"=",
"replaceReport",
")",
"# Tallies of experiment dispositions",
"modelStats",
"=",
"_ModelStats",
"(",
")",
"#numCompletedOther = long(0)",
"print",
"\"\\nResults from all experiments:\"",
"print",
"\"----------------------------------------------------------------\"",
"# Get common optimization metric info from permutations script",
"searchParams",
"=",
"hyperSearchJob",
".",
"getParams",
"(",
")",
"(",
"optimizationMetricKey",
",",
"maximizeMetric",
")",
"=",
"(",
"_PermutationUtils",
".",
"getOptimizationMetricInfo",
"(",
"searchParams",
")",
")",
"# Print metrics, while looking for the best model",
"formatStr",
"=",
"None",
"# NOTE: we may find additional metrics if HyperSearch is still running",
"foundMetricsKeySet",
"=",
"set",
"(",
"metricsKeys",
")",
"sortedMetricsKeys",
"=",
"[",
"]",
"# pull out best Model from jobs table",
"jobInfo",
"=",
"_clientJobsDB",
"(",
")",
".",
"jobInfo",
"(",
"hyperSearchJob",
".",
"getJobID",
"(",
")",
")",
"# Try to return a decent error message if the job was cancelled for some",
"# reason.",
"if",
"jobInfo",
".",
"cancel",
"==",
"1",
":",
"raise",
"Exception",
"(",
"jobInfo",
".",
"workerCompletionMsg",
")",
"try",
":",
"results",
"=",
"json",
".",
"loads",
"(",
"jobInfo",
".",
"results",
")",
"except",
"Exception",
",",
"e",
":",
"print",
"\"json.loads(jobInfo.results) raised an exception. \"",
"\"Here is some info to help with debugging:\"",
"print",
"\"jobInfo: \"",
",",
"jobInfo",
"print",
"\"jobInfo.results: \"",
",",
"jobInfo",
".",
"results",
"print",
"\"EXCEPTION: \"",
",",
"e",
"raise",
"bestModelNum",
"=",
"results",
"[",
"\"bestModel\"",
"]",
"bestModelIterIndex",
"=",
"None",
"# performance metrics for the entire job",
"totalWallTime",
"=",
"0",
"totalRecords",
"=",
"0",
"# At the end, we will sort the models by their score on the optimization",
"# metric",
"scoreModelIDDescList",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"modelInfo",
")",
"in",
"enumerate",
"(",
"_iterModels",
"(",
"modelIDs",
")",
")",
":",
"# Output model info to report csv",
"reportWriter",
".",
"emit",
"(",
"modelInfo",
")",
"# Update job metrics",
"totalRecords",
"+=",
"modelInfo",
".",
"getNumRecords",
"(",
")",
"format",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
"startTime",
"=",
"modelInfo",
".",
"getStartTime",
"(",
")",
"if",
"modelInfo",
".",
"isFinished",
"(",
")",
":",
"endTime",
"=",
"modelInfo",
".",
"getEndTime",
"(",
")",
"st",
"=",
"datetime",
".",
"strptime",
"(",
"startTime",
",",
"format",
")",
"et",
"=",
"datetime",
".",
"strptime",
"(",
"endTime",
",",
"format",
")",
"totalWallTime",
"+=",
"(",
"et",
"-",
"st",
")",
".",
"seconds",
"# Tabulate experiment dispositions",
"modelStats",
".",
"update",
"(",
"modelInfo",
")",
"# For convenience",
"expDesc",
"=",
"modelInfo",
".",
"getModelDescription",
"(",
")",
"reportMetrics",
"=",
"modelInfo",
".",
"getReportMetrics",
"(",
")",
"optimizationMetrics",
"=",
"modelInfo",
".",
"getOptimizationMetrics",
"(",
")",
"if",
"modelInfo",
".",
"getModelID",
"(",
")",
"==",
"bestModelNum",
":",
"bestModel",
"=",
"modelInfo",
"bestModelIterIndex",
"=",
"i",
"bestMetric",
"=",
"optimizationMetrics",
".",
"values",
"(",
")",
"[",
"0",
"]",
"# Keep track of the best-performing model",
"if",
"optimizationMetrics",
":",
"assert",
"len",
"(",
"optimizationMetrics",
")",
"==",
"1",
",",
"(",
"\"expected 1 opt key, but got %d (%s) in %s\"",
"%",
"(",
"len",
"(",
"optimizationMetrics",
")",
",",
"optimizationMetrics",
",",
"modelInfo",
")",
")",
"# Append to our list of modelIDs and scores",
"if",
"modelInfo",
".",
"getCompletionReason",
"(",
")",
".",
"isEOF",
"(",
")",
":",
"scoreModelIDDescList",
".",
"append",
"(",
"(",
"optimizationMetrics",
".",
"values",
"(",
")",
"[",
"0",
"]",
",",
"modelInfo",
".",
"getModelID",
"(",
")",
",",
"modelInfo",
".",
"getGeneratedDescriptionFile",
"(",
")",
",",
"modelInfo",
".",
"getParamLabels",
"(",
")",
")",
")",
"print",
"\"[%d] Experiment %s\\n(%s):\"",
"%",
"(",
"i",
",",
"modelInfo",
",",
"expDesc",
")",
"if",
"(",
"modelInfo",
".",
"isFinished",
"(",
")",
"and",
"not",
"(",
"modelInfo",
".",
"getCompletionReason",
"(",
")",
".",
"isStopped",
"or",
"modelInfo",
".",
"getCompletionReason",
"(",
")",
".",
"isEOF",
"(",
")",
")",
")",
":",
"print",
"\">> COMPLETION MESSAGE: %s\"",
"%",
"modelInfo",
".",
"getCompletionMsg",
"(",
")",
"if",
"reportMetrics",
":",
"# Update our metrics key set and format string",
"foundMetricsKeySet",
".",
"update",
"(",
"reportMetrics",
".",
"iterkeys",
"(",
")",
")",
"if",
"len",
"(",
"sortedMetricsKeys",
")",
"!=",
"len",
"(",
"foundMetricsKeySet",
")",
":",
"sortedMetricsKeys",
"=",
"sorted",
"(",
"foundMetricsKeySet",
")",
"maxKeyLen",
"=",
"max",
"(",
"[",
"len",
"(",
"k",
")",
"for",
"k",
"in",
"sortedMetricsKeys",
"]",
")",
"formatStr",
"=",
"\" %%-%ds\"",
"%",
"(",
"maxKeyLen",
"+",
"2",
")",
"# Print metrics",
"for",
"key",
"in",
"sortedMetricsKeys",
":",
"if",
"key",
"in",
"reportMetrics",
":",
"if",
"key",
"==",
"optimizationMetricKey",
":",
"m",
"=",
"\"%r (*)\"",
"%",
"reportMetrics",
"[",
"key",
"]",
"else",
":",
"m",
"=",
"\"%r\"",
"%",
"reportMetrics",
"[",
"key",
"]",
"print",
"formatStr",
"%",
"(",
"key",
"+",
"\":\"",
")",
",",
"m",
"print",
"# Summarize results",
"print",
"\"--------------------------------------------------------------\"",
"if",
"len",
"(",
"modelIDs",
")",
">",
"0",
":",
"print",
"\"%d experiments total (%s).\\n\"",
"%",
"(",
"len",
"(",
"modelIDs",
")",
",",
"(",
"\"all completed successfully\"",
"if",
"(",
"modelStats",
".",
"numCompletedKilled",
"+",
"modelStats",
".",
"numCompletedEOF",
")",
"==",
"len",
"(",
"modelIDs",
")",
"else",
"\"WARNING: %d models have not completed or there were errors\"",
"%",
"(",
"len",
"(",
"modelIDs",
")",
"-",
"(",
"modelStats",
".",
"numCompletedKilled",
"+",
"modelStats",
".",
"numCompletedEOF",
"+",
"modelStats",
".",
"numCompletedStopped",
")",
")",
")",
")",
"if",
"modelStats",
".",
"numStatusOther",
">",
"0",
":",
"print",
"\"ERROR: models with unexpected status: %d\"",
"%",
"(",
"modelStats",
".",
"numStatusOther",
")",
"print",
"\"WaitingToStart: %d\"",
"%",
"modelStats",
".",
"numStatusWaitingToStart",
"print",
"\"Running: %d\"",
"%",
"modelStats",
".",
"numStatusRunning",
"print",
"\"Completed: %d\"",
"%",
"modelStats",
".",
"numStatusCompleted",
"if",
"modelStats",
".",
"numCompletedOther",
">",
"0",
":",
"print",
"\" ERROR: models with unexpected completion reason: %d\"",
"%",
"(",
"modelStats",
".",
"numCompletedOther",
")",
"print",
"\" ran to EOF: %d\"",
"%",
"modelStats",
".",
"numCompletedEOF",
"print",
"\" ran to stop signal: %d\"",
"%",
"modelStats",
".",
"numCompletedStopped",
"print",
"\" were orphaned: %d\"",
"%",
"modelStats",
".",
"numCompletedOrphaned",
"print",
"\" killed off: %d\"",
"%",
"modelStats",
".",
"numCompletedKilled",
"print",
"\" failed: %d\"",
"%",
"modelStats",
".",
"numCompletedError",
"assert",
"modelStats",
".",
"numStatusOther",
"==",
"0",
",",
"\"numStatusOther=%s\"",
"%",
"(",
"modelStats",
".",
"numStatusOther",
")",
"assert",
"modelStats",
".",
"numCompletedOther",
"==",
"0",
",",
"\"numCompletedOther=%s\"",
"%",
"(",
"modelStats",
".",
"numCompletedOther",
")",
"else",
":",
"print",
"\"0 experiments total.\"",
"# Print out the field contributions",
"print",
"global",
"gCurrentSearch",
"jobStatus",
"=",
"hyperSearchJob",
".",
"getJobStatus",
"(",
"gCurrentSearch",
".",
"_workers",
")",
"jobResults",
"=",
"jobStatus",
".",
"getResults",
"(",
")",
"if",
"\"fieldContributions\"",
"in",
"jobResults",
":",
"print",
"\"Field Contributions:\"",
"pprint",
".",
"pprint",
"(",
"jobResults",
"[",
"\"fieldContributions\"",
"]",
",",
"indent",
"=",
"4",
")",
"else",
":",
"print",
"\"Field contributions info not available\"",
"# Did we have an optimize key?",
"if",
"bestModel",
"is",
"not",
"None",
":",
"maxKeyLen",
"=",
"max",
"(",
"[",
"len",
"(",
"k",
")",
"for",
"k",
"in",
"sortedMetricsKeys",
"]",
")",
"maxKeyLen",
"=",
"max",
"(",
"maxKeyLen",
",",
"len",
"(",
"optimizationMetricKey",
")",
")",
"formatStr",
"=",
"\" %%-%ds\"",
"%",
"(",
"maxKeyLen",
"+",
"2",
")",
"bestMetricValue",
"=",
"bestModel",
".",
"getOptimizationMetrics",
"(",
")",
".",
"values",
"(",
")",
"[",
"0",
"]",
"optimizationMetricName",
"=",
"bestModel",
".",
"getOptimizationMetrics",
"(",
")",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"print",
"print",
"\"Best results on the optimization metric %s (maximize=%s):\"",
"%",
"(",
"optimizationMetricName",
",",
"maximizeMetric",
")",
"print",
"\"[%d] Experiment %s (%s):\"",
"%",
"(",
"bestModelIterIndex",
",",
"bestModel",
",",
"bestModel",
".",
"getModelDescription",
"(",
")",
")",
"print",
"formatStr",
"%",
"(",
"optimizationMetricName",
"+",
"\":\"",
")",
",",
"bestMetricValue",
"print",
"print",
"\"Total number of Records processed: %d\"",
"%",
"totalRecords",
"print",
"print",
"\"Total wall time for all models: %d\"",
"%",
"totalWallTime",
"hsJobParams",
"=",
"hyperSearchJob",
".",
"getParams",
"(",
")",
"# Were we asked to write out the top N model description files?",
"if",
"options",
"[",
"\"genTopNDescriptions\"",
"]",
">",
"0",
":",
"print",
"\"\\nGenerating description files for top %d models...\"",
"%",
"(",
"options",
"[",
"\"genTopNDescriptions\"",
"]",
")",
"scoreModelIDDescList",
".",
"sort",
"(",
")",
"scoreModelIDDescList",
"=",
"scoreModelIDDescList",
"[",
"0",
":",
"options",
"[",
"\"genTopNDescriptions\"",
"]",
"]",
"i",
"=",
"-",
"1",
"for",
"(",
"score",
",",
"modelID",
",",
"description",
",",
"paramLabels",
")",
"in",
"scoreModelIDDescList",
":",
"i",
"+=",
"1",
"outDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
"[",
"\"permWorkDir\"",
"]",
",",
"\"model_%d\"",
"%",
"(",
"i",
")",
")",
"print",
"\"Generating description file for model %s at %s\"",
"%",
"(",
"modelID",
",",
"outDir",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"outDir",
")",
":",
"os",
".",
"makedirs",
"(",
"outDir",
")",
"# Fix up the location to the base description file.",
"# importBaseDescription() chooses the file relative to the calling file.",
"# The calling file is in outDir.",
"# The base description is in the user-specified \"outDir\"",
"base_description_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
"[",
"\"outDir\"",
"]",
",",
"\"description.py\"",
")",
"base_description_relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"base_description_path",
",",
"start",
"=",
"outDir",
")",
"description",
"=",
"description",
".",
"replace",
"(",
"\"importBaseDescription('base.py', config)\"",
",",
"\"importBaseDescription('%s', config)\"",
"%",
"base_description_relpath",
")",
"fd",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outDir",
",",
"\"description.py\"",
")",
",",
"\"wb\"",
")",
"fd",
".",
"write",
"(",
"description",
")",
"fd",
".",
"close",
"(",
")",
"# Generate a csv file with the parameter settings in it",
"fd",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outDir",
",",
"\"params.csv\"",
")",
",",
"\"wb\"",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"fd",
")",
"colNames",
"=",
"paramLabels",
".",
"keys",
"(",
")",
"colNames",
".",
"sort",
"(",
")",
"writer",
".",
"writerow",
"(",
"colNames",
")",
"row",
"=",
"[",
"paramLabels",
"[",
"x",
"]",
"for",
"x",
"in",
"colNames",
"]",
"writer",
".",
"writerow",
"(",
"row",
")",
"fd",
".",
"close",
"(",
")",
"print",
"\"Generating model params file...\"",
"# Generate a model params file alongside the description.py",
"mod",
"=",
"imp",
".",
"load_source",
"(",
"\"description\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"outDir",
",",
"\"description.py\"",
")",
")",
"model_description",
"=",
"mod",
".",
"descriptionInterface",
".",
"getModelDescription",
"(",
")",
"fd",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outDir",
",",
"\"model_params.py\"",
")",
",",
"\"wb\"",
")",
"fd",
".",
"write",
"(",
"\"%s\\nMODEL_PARAMS = %s\"",
"%",
"(",
"getCopyrightHead",
"(",
")",
",",
"pprint",
".",
"pformat",
"(",
"model_description",
")",
")",
")",
"fd",
".",
"close",
"(",
")",
"print",
"reportWriter",
".",
"finalize",
"(",
")",
"return",
"model_description"
] | Prints all available results in the given HyperSearch job and emits
model information to the permutations report csv.
The job may be completed or still in progress.
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
replaceReport: True to replace existing report csv, if any; False to
append to existing report csv, if any
hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved
jobID, if any
metricsKeys: sequence of report metrics key names to include in report;
if None, will pre-scan all modelInfos to generate a complete
list of metrics key names.
retval: model parameters | [
"Prints",
"all",
"available",
"results",
"in",
"the",
"given",
"HyperSearch",
"job",
"and",
"emits",
"model",
"information",
"to",
"the",
"permutations",
"report",
"csv",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L743-L1031 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.loadSavedHyperSearchJob | def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):
"""Instantiates a _HyperSearchJob instance from info saved in file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: _HyperSearchJob instance; raises exception if not found
"""
jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,
outputLabel=outputLabel)
searchJob = _HyperSearchJob(nupicJobID=jobID)
return searchJob | python | def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):
"""Instantiates a _HyperSearchJob instance from info saved in file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: _HyperSearchJob instance; raises exception if not found
"""
jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,
outputLabel=outputLabel)
searchJob = _HyperSearchJob(nupicJobID=jobID)
return searchJob | [
"def",
"loadSavedHyperSearchJob",
"(",
"cls",
",",
"permWorkDir",
",",
"outputLabel",
")",
":",
"jobID",
"=",
"cls",
".",
"__loadHyperSearchJobID",
"(",
"permWorkDir",
"=",
"permWorkDir",
",",
"outputLabel",
"=",
"outputLabel",
")",
"searchJob",
"=",
"_HyperSearchJob",
"(",
"nupicJobID",
"=",
"jobID",
")",
"return",
"searchJob"
] | Instantiates a _HyperSearchJob instance from info saved in file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: _HyperSearchJob instance; raises exception if not found | [
"Instantiates",
"a",
"_HyperSearchJob",
"instance",
"from",
"info",
"saved",
"in",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1036-L1049 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.__saveHyperSearchJobID | def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):
"""Saves the given _HyperSearchJob instance's jobID to file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
hyperSearchJob: _HyperSearchJob instance
retval: nothing
"""
jobID = hyperSearchJob.getJobID()
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
if os.path.exists(filePath):
_backupFile(filePath)
d = dict(hyperSearchJobID = jobID)
with open(filePath, "wb") as jobIdPickleFile:
pickle.dump(d, jobIdPickleFile) | python | def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):
"""Saves the given _HyperSearchJob instance's jobID to file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
hyperSearchJob: _HyperSearchJob instance
retval: nothing
"""
jobID = hyperSearchJob.getJobID()
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
if os.path.exists(filePath):
_backupFile(filePath)
d = dict(hyperSearchJobID = jobID)
with open(filePath, "wb") as jobIdPickleFile:
pickle.dump(d, jobIdPickleFile) | [
"def",
"__saveHyperSearchJobID",
"(",
"cls",
",",
"permWorkDir",
",",
"outputLabel",
",",
"hyperSearchJob",
")",
":",
"jobID",
"=",
"hyperSearchJob",
".",
"getJobID",
"(",
")",
"filePath",
"=",
"cls",
".",
"__getHyperSearchJobIDFilePath",
"(",
"permWorkDir",
"=",
"permWorkDir",
",",
"outputLabel",
"=",
"outputLabel",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filePath",
")",
":",
"_backupFile",
"(",
"filePath",
")",
"d",
"=",
"dict",
"(",
"hyperSearchJobID",
"=",
"jobID",
")",
"with",
"open",
"(",
"filePath",
",",
"\"wb\"",
")",
"as",
"jobIdPickleFile",
":",
"pickle",
".",
"dump",
"(",
"d",
",",
"jobIdPickleFile",
")"
] | Saves the given _HyperSearchJob instance's jobID to file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
hyperSearchJob: _HyperSearchJob instance
retval: nothing | [
"Saves",
"the",
"given",
"_HyperSearchJob",
"instance",
"s",
"jobID",
"to",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1054-L1074 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.__loadHyperSearchJobID | def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):
"""Loads a saved jobID from file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: HyperSearch jobID; raises exception if not found.
"""
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
jobID = None
with open(filePath, "r") as jobIdPickleFile:
jobInfo = pickle.load(jobIdPickleFile)
jobID = jobInfo["hyperSearchJobID"]
return jobID | python | def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):
"""Loads a saved jobID from file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: HyperSearch jobID; raises exception if not found.
"""
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
jobID = None
with open(filePath, "r") as jobIdPickleFile:
jobInfo = pickle.load(jobIdPickleFile)
jobID = jobInfo["hyperSearchJobID"]
return jobID | [
"def",
"__loadHyperSearchJobID",
"(",
"cls",
",",
"permWorkDir",
",",
"outputLabel",
")",
":",
"filePath",
"=",
"cls",
".",
"__getHyperSearchJobIDFilePath",
"(",
"permWorkDir",
"=",
"permWorkDir",
",",
"outputLabel",
"=",
"outputLabel",
")",
"jobID",
"=",
"None",
"with",
"open",
"(",
"filePath",
",",
"\"r\"",
")",
"as",
"jobIdPickleFile",
":",
"jobInfo",
"=",
"pickle",
".",
"load",
"(",
"jobIdPickleFile",
")",
"jobID",
"=",
"jobInfo",
"[",
"\"hyperSearchJobID\"",
"]",
"return",
"jobID"
] | Loads a saved jobID from file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: HyperSearch jobID; raises exception if not found. | [
"Loads",
"a",
"saved",
"jobID",
"from",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1079-L1096 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchRunner.__getHyperSearchJobIDFilePath | def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath | python | def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
"""Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
"""
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath | [
"def",
"__getHyperSearchJobIDFilePath",
"(",
"cls",
",",
"permWorkDir",
",",
"outputLabel",
")",
":",
"# Get the base path and figure out the path of the report file.",
"basePath",
"=",
"permWorkDir",
"# Form the name of the output csv file that will contain all the results",
"filename",
"=",
"\"%s_HyperSearchJobID.pkl\"",
"%",
"(",
"outputLabel",
",",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basePath",
",",
"filename",
")",
"return",
"filepath"
] | Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID | [
"Returns",
"filepath",
"where",
"to",
"store",
"HyperSearch",
"JobID"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1101-L1117 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _ReportCSVWriter.emit | def emit(self, modelInfo):
"""Emit model info to csv file
Parameters:
----------------------------------------------------------------------
modelInfo: _NupicModelInfo instance
retval: nothing
"""
# Open/init csv file, if needed
if self.__csvFileObj is None:
# sets up self.__sortedVariableNames and self.__csvFileObj
self.__openAndInitCSVFile(modelInfo)
csv = self.__csvFileObj
# Emit model info row to report.csv
print >> csv, "%s, " % (self.__searchJobID),
print >> csv, "%s, " % (modelInfo.getModelID()),
print >> csv, "%s, " % (modelInfo.statusAsString()),
if modelInfo.isFinished():
print >> csv, "%s, " % (modelInfo.getCompletionReason()),
else:
print >> csv, "NA, ",
if not modelInfo.isWaitingToStart():
print >> csv, "%s, " % (modelInfo.getStartTime()),
else:
print >> csv, "NA, ",
if modelInfo.isFinished():
dateFormat = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
endTime = modelInfo.getEndTime()
print >> csv, "%s, " % endTime,
st = datetime.strptime(startTime, dateFormat)
et = datetime.strptime(endTime, dateFormat)
print >> csv, "%s, " % (str((et - st).seconds)),
else:
print >> csv, "NA, ",
print >> csv, "NA, ",
print >> csv, "%s, " % str(modelInfo.getModelDescription()),
print >> csv, "%s, " % str(modelInfo.getNumRecords()),
paramLabelsDict = modelInfo.getParamLabels()
for key in self.__sortedVariableNames:
# Some values are complex structures,.. which need to be represented as
# strings
if key in paramLabelsDict:
print >> csv, "%s, " % (paramLabelsDict[key]),
else:
print >> csv, "None, ",
metrics = modelInfo.getReportMetrics()
for key in self.__sortedMetricsKeys:
value = metrics.get(key, "NA")
value = str(value)
value = value.replace("\n", " ")
print >> csv, "%s, " % (value),
print >> csv | python | def emit(self, modelInfo):
"""Emit model info to csv file
Parameters:
----------------------------------------------------------------------
modelInfo: _NupicModelInfo instance
retval: nothing
"""
# Open/init csv file, if needed
if self.__csvFileObj is None:
# sets up self.__sortedVariableNames and self.__csvFileObj
self.__openAndInitCSVFile(modelInfo)
csv = self.__csvFileObj
# Emit model info row to report.csv
print >> csv, "%s, " % (self.__searchJobID),
print >> csv, "%s, " % (modelInfo.getModelID()),
print >> csv, "%s, " % (modelInfo.statusAsString()),
if modelInfo.isFinished():
print >> csv, "%s, " % (modelInfo.getCompletionReason()),
else:
print >> csv, "NA, ",
if not modelInfo.isWaitingToStart():
print >> csv, "%s, " % (modelInfo.getStartTime()),
else:
print >> csv, "NA, ",
if modelInfo.isFinished():
dateFormat = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
endTime = modelInfo.getEndTime()
print >> csv, "%s, " % endTime,
st = datetime.strptime(startTime, dateFormat)
et = datetime.strptime(endTime, dateFormat)
print >> csv, "%s, " % (str((et - st).seconds)),
else:
print >> csv, "NA, ",
print >> csv, "NA, ",
print >> csv, "%s, " % str(modelInfo.getModelDescription()),
print >> csv, "%s, " % str(modelInfo.getNumRecords()),
paramLabelsDict = modelInfo.getParamLabels()
for key in self.__sortedVariableNames:
# Some values are complex structures,.. which need to be represented as
# strings
if key in paramLabelsDict:
print >> csv, "%s, " % (paramLabelsDict[key]),
else:
print >> csv, "None, ",
metrics = modelInfo.getReportMetrics()
for key in self.__sortedMetricsKeys:
value = metrics.get(key, "NA")
value = str(value)
value = value.replace("\n", " ")
print >> csv, "%s, " % (value),
print >> csv | [
"def",
"emit",
"(",
"self",
",",
"modelInfo",
")",
":",
"# Open/init csv file, if needed",
"if",
"self",
".",
"__csvFileObj",
"is",
"None",
":",
"# sets up self.__sortedVariableNames and self.__csvFileObj",
"self",
".",
"__openAndInitCSVFile",
"(",
"modelInfo",
")",
"csv",
"=",
"self",
".",
"__csvFileObj",
"# Emit model info row to report.csv",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"self",
".",
"__searchJobID",
")",
",",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"modelInfo",
".",
"getModelID",
"(",
")",
")",
",",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"modelInfo",
".",
"statusAsString",
"(",
")",
")",
",",
"if",
"modelInfo",
".",
"isFinished",
"(",
")",
":",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"modelInfo",
".",
"getCompletionReason",
"(",
")",
")",
",",
"else",
":",
"print",
">>",
"csv",
",",
"\"NA, \"",
",",
"if",
"not",
"modelInfo",
".",
"isWaitingToStart",
"(",
")",
":",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"modelInfo",
".",
"getStartTime",
"(",
")",
")",
",",
"else",
":",
"print",
">>",
"csv",
",",
"\"NA, \"",
",",
"if",
"modelInfo",
".",
"isFinished",
"(",
")",
":",
"dateFormat",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
"startTime",
"=",
"modelInfo",
".",
"getStartTime",
"(",
")",
"endTime",
"=",
"modelInfo",
".",
"getEndTime",
"(",
")",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"endTime",
",",
"st",
"=",
"datetime",
".",
"strptime",
"(",
"startTime",
",",
"dateFormat",
")",
"et",
"=",
"datetime",
".",
"strptime",
"(",
"endTime",
",",
"dateFormat",
")",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"str",
"(",
"(",
"et",
"-",
"st",
")",
".",
"seconds",
")",
")",
",",
"else",
":",
"print",
">>",
"csv",
",",
"\"NA, \"",
",",
"print",
">>",
"csv",
",",
"\"NA, \"",
",",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"str",
"(",
"modelInfo",
".",
"getModelDescription",
"(",
")",
")",
",",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"str",
"(",
"modelInfo",
".",
"getNumRecords",
"(",
")",
")",
",",
"paramLabelsDict",
"=",
"modelInfo",
".",
"getParamLabels",
"(",
")",
"for",
"key",
"in",
"self",
".",
"__sortedVariableNames",
":",
"# Some values are complex structures,.. which need to be represented as",
"# strings",
"if",
"key",
"in",
"paramLabelsDict",
":",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"paramLabelsDict",
"[",
"key",
"]",
")",
",",
"else",
":",
"print",
">>",
"csv",
",",
"\"None, \"",
",",
"metrics",
"=",
"modelInfo",
".",
"getReportMetrics",
"(",
")",
"for",
"key",
"in",
"self",
".",
"__sortedMetricsKeys",
":",
"value",
"=",
"metrics",
".",
"get",
"(",
"key",
",",
"\"NA\"",
")",
"value",
"=",
"str",
"(",
"value",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"(",
"value",
")",
",",
"print",
">>",
"csv"
] | Emit model info to csv file
Parameters:
----------------------------------------------------------------------
modelInfo: _NupicModelInfo instance
retval: nothing | [
"Emit",
"model",
"info",
"to",
"csv",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1212-L1267 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _ReportCSVWriter.finalize | def finalize(self):
"""Close file and print report/backup csv file paths
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
if self.__csvFileObj is not None:
# Done with file
self.__csvFileObj.close()
self.__csvFileObj = None
print "Report csv saved in %s" % (self.__reportCSVPath,)
if self.__backupCSVPath:
print "Previous report csv file was backed up to %s" % \
(self.__backupCSVPath,)
else:
print "Nothing was written to report csv file." | python | def finalize(self):
"""Close file and print report/backup csv file paths
Parameters:
----------------------------------------------------------------------
retval: nothing
"""
if self.__csvFileObj is not None:
# Done with file
self.__csvFileObj.close()
self.__csvFileObj = None
print "Report csv saved in %s" % (self.__reportCSVPath,)
if self.__backupCSVPath:
print "Previous report csv file was backed up to %s" % \
(self.__backupCSVPath,)
else:
print "Nothing was written to report csv file." | [
"def",
"finalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"__csvFileObj",
"is",
"not",
"None",
":",
"# Done with file",
"self",
".",
"__csvFileObj",
".",
"close",
"(",
")",
"self",
".",
"__csvFileObj",
"=",
"None",
"print",
"\"Report csv saved in %s\"",
"%",
"(",
"self",
".",
"__reportCSVPath",
",",
")",
"if",
"self",
".",
"__backupCSVPath",
":",
"print",
"\"Previous report csv file was backed up to %s\"",
"%",
"(",
"self",
".",
"__backupCSVPath",
",",
")",
"else",
":",
"print",
"\"Nothing was written to report csv file.\""
] | Close file and print report/backup csv file paths
Parameters:
----------------------------------------------------------------------
retval: nothing | [
"Close",
"file",
"and",
"print",
"report",
"/",
"backup",
"csv",
"file",
"paths"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1271-L1289 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _ReportCSVWriter.__openAndInitCSVFile | def __openAndInitCSVFile(self, modelInfo):
"""
- Backs up old report csv file;
- opens the report csv file in append or overwrite mode (per
self.__replaceReport);
- emits column fields;
- sets up self.__sortedVariableNames, self.__csvFileObj,
self.__backupCSVPath, and self.__reportCSVPath
Parameters:
----------------------------------------------------------------------
modelInfo: First _NupicModelInfo instance passed to emit()
retval: nothing
"""
# Get the base path and figure out the path of the report file.
basePath = self.__outputDirAbsPath
# Form the name of the output csv file that will contain all the results
reportCSVName = "%s_Report.csv" % (self.__outputLabel,)
reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)
# If a report CSV file already exists, back it up
backupCSVPath = None
if os.path.exists(reportCSVPath):
backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)
# Open report file
if self.__replaceReport:
mode = "w"
else:
mode = "a"
csv = self.__csvFileObj = open(reportCSVPath, mode)
# If we are appending, add some blank line separators
if not self.__replaceReport and backupCSVPath:
print >> csv
print >> csv
# Print the column names
print >> csv, "jobID, ",
print >> csv, "modelID, ",
print >> csv, "status, " ,
print >> csv, "completionReason, ",
print >> csv, "startTime, ",
print >> csv, "endTime, ",
print >> csv, "runtime(s), " ,
print >> csv, "expDesc, ",
print >> csv, "numRecords, ",
for key in self.__sortedVariableNames:
print >> csv, "%s, " % key,
for key in self.__sortedMetricsKeys:
print >> csv, "%s, " % key,
print >> csv | python | def __openAndInitCSVFile(self, modelInfo):
"""
- Backs up old report csv file;
- opens the report csv file in append or overwrite mode (per
self.__replaceReport);
- emits column fields;
- sets up self.__sortedVariableNames, self.__csvFileObj,
self.__backupCSVPath, and self.__reportCSVPath
Parameters:
----------------------------------------------------------------------
modelInfo: First _NupicModelInfo instance passed to emit()
retval: nothing
"""
# Get the base path and figure out the path of the report file.
basePath = self.__outputDirAbsPath
# Form the name of the output csv file that will contain all the results
reportCSVName = "%s_Report.csv" % (self.__outputLabel,)
reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)
# If a report CSV file already exists, back it up
backupCSVPath = None
if os.path.exists(reportCSVPath):
backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)
# Open report file
if self.__replaceReport:
mode = "w"
else:
mode = "a"
csv = self.__csvFileObj = open(reportCSVPath, mode)
# If we are appending, add some blank line separators
if not self.__replaceReport and backupCSVPath:
print >> csv
print >> csv
# Print the column names
print >> csv, "jobID, ",
print >> csv, "modelID, ",
print >> csv, "status, " ,
print >> csv, "completionReason, ",
print >> csv, "startTime, ",
print >> csv, "endTime, ",
print >> csv, "runtime(s), " ,
print >> csv, "expDesc, ",
print >> csv, "numRecords, ",
for key in self.__sortedVariableNames:
print >> csv, "%s, " % key,
for key in self.__sortedMetricsKeys:
print >> csv, "%s, " % key,
print >> csv | [
"def",
"__openAndInitCSVFile",
"(",
"self",
",",
"modelInfo",
")",
":",
"# Get the base path and figure out the path of the report file.",
"basePath",
"=",
"self",
".",
"__outputDirAbsPath",
"# Form the name of the output csv file that will contain all the results",
"reportCSVName",
"=",
"\"%s_Report.csv\"",
"%",
"(",
"self",
".",
"__outputLabel",
",",
")",
"reportCSVPath",
"=",
"self",
".",
"__reportCSVPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basePath",
",",
"reportCSVName",
")",
"# If a report CSV file already exists, back it up",
"backupCSVPath",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"reportCSVPath",
")",
":",
"backupCSVPath",
"=",
"self",
".",
"__backupCSVPath",
"=",
"_backupFile",
"(",
"reportCSVPath",
")",
"# Open report file",
"if",
"self",
".",
"__replaceReport",
":",
"mode",
"=",
"\"w\"",
"else",
":",
"mode",
"=",
"\"a\"",
"csv",
"=",
"self",
".",
"__csvFileObj",
"=",
"open",
"(",
"reportCSVPath",
",",
"mode",
")",
"# If we are appending, add some blank line separators",
"if",
"not",
"self",
".",
"__replaceReport",
"and",
"backupCSVPath",
":",
"print",
">>",
"csv",
"print",
">>",
"csv",
"# Print the column names",
"print",
">>",
"csv",
",",
"\"jobID, \"",
",",
"print",
">>",
"csv",
",",
"\"modelID, \"",
",",
"print",
">>",
"csv",
",",
"\"status, \"",
",",
"print",
">>",
"csv",
",",
"\"completionReason, \"",
",",
"print",
">>",
"csv",
",",
"\"startTime, \"",
",",
"print",
">>",
"csv",
",",
"\"endTime, \"",
",",
"print",
">>",
"csv",
",",
"\"runtime(s), \"",
",",
"print",
">>",
"csv",
",",
"\"expDesc, \"",
",",
"print",
">>",
"csv",
",",
"\"numRecords, \"",
",",
"for",
"key",
"in",
"self",
".",
"__sortedVariableNames",
":",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"key",
",",
"for",
"key",
"in",
"self",
".",
"__sortedMetricsKeys",
":",
"print",
">>",
"csv",
",",
"\"%s, \"",
"%",
"key",
",",
"print",
">>",
"csv"
] | - Backs up old report csv file;
- opens the report csv file in append or overwrite mode (per
self.__replaceReport);
- emits column fields;
- sets up self.__sortedVariableNames, self.__csvFileObj,
self.__backupCSVPath, and self.__reportCSVPath
Parameters:
----------------------------------------------------------------------
modelInfo: First _NupicModelInfo instance passed to emit()
retval: nothing | [
"-",
"Backs",
"up",
"old",
"report",
"csv",
"file",
";",
"-",
"opens",
"the",
"report",
"csv",
"file",
"in",
"append",
"or",
"overwrite",
"mode",
"(",
"per",
"self",
".",
"__replaceReport",
")",
";",
"-",
"emits",
"column",
"fields",
";",
"-",
"sets",
"up",
"self",
".",
"__sortedVariableNames",
"self",
".",
"__csvFileObj",
"self",
".",
"__backupCSVPath",
"and",
"self",
".",
"__reportCSVPath"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1293-L1347 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicJob.getJobStatus | def getJobStatus(self, workers):
"""
Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance
"""
jobInfo = self.JobStatus(self.__nupicJobID, workers)
return jobInfo | python | def getJobStatus(self, workers):
"""
Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance
"""
jobInfo = self.JobStatus(self.__nupicJobID, workers)
return jobInfo | [
"def",
"getJobStatus",
"(",
"self",
",",
"workers",
")",
":",
"jobInfo",
"=",
"self",
".",
"JobStatus",
"(",
"self",
".",
"__nupicJobID",
",",
"workers",
")",
"return",
"jobInfo"
] | Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance | [
"Parameters",
":",
"----------------------------------------------------------------------",
"workers",
":",
"If",
"this",
"job",
"was",
"launched",
"outside",
"of",
"the",
"nupic",
"job",
"engine",
"then",
"this",
"is",
"an",
"array",
"of",
"subprocess",
"Popen",
"instances",
"one",
"for",
"each",
"worker",
"retval",
":",
"_NupicJob",
".",
"JobStatus",
"instance"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1387-L1397 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _HyperSearchJob.queryModelIDs | def queryModelIDs(self):
"""Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
"""
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs | python | def queryModelIDs(self):
"""Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
"""
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs | [
"def",
"queryModelIDs",
"(",
"self",
")",
":",
"jobID",
"=",
"self",
".",
"getJobID",
"(",
")",
"modelCounterPairs",
"=",
"_clientJobsDB",
"(",
")",
".",
"modelsGetUpdateCounters",
"(",
"jobID",
")",
"modelIDs",
"=",
"tuple",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"modelCounterPairs",
")",
"return",
"modelIDs"
] | Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs | [
"Queuries",
"DB",
"for",
"model",
"IDs",
"of",
"all",
"currently",
"instantiated",
"models",
"associated",
"with",
"this",
"HyperSearch",
"job",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1743-L1757 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _ClientJobUtils.makeSearchJobParamsDict | def makeSearchJobParamsDict(cls, options, forRunning=False):
"""Constructs a dictionary of HyperSearch parameters suitable for converting
to json and passing as the params argument to ClientJobsDAO.jobInsert()
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
forRunning: True if the params are for running a Hypersearch job; False
if params are for introspection only.
retval: A dictionary of HyperSearch parameters for
ClientJobsDAO.jobInsert()
"""
if options["searchMethod"] == "v2":
hsVersion = "v2"
else:
raise Exception("Unsupported search method: %r" % options["searchMethod"])
maxModels = options["maxPermutations"]
if options["action"] == "dryRun" and maxModels is None:
maxModels = 1
useTerminators = options["useTerminators"]
if useTerminators is None:
params = {
"hsVersion": hsVersion,
"maxModels": maxModels,
}
else:
params = {
"hsVersion": hsVersion,
"useTerminators": useTerminators,
"maxModels": maxModels,
}
if forRunning:
params["persistentJobGUID"] = str(uuid.uuid1())
if options["permutationsScriptPath"]:
params["permutationsPyFilename"] = options["permutationsScriptPath"]
elif options["expDescConfig"]:
params["description"] = options["expDescConfig"]
else:
with open(options["expDescJsonPath"], mode="r") as fp:
params["description"] = json.load(fp)
return params | python | def makeSearchJobParamsDict(cls, options, forRunning=False):
"""Constructs a dictionary of HyperSearch parameters suitable for converting
to json and passing as the params argument to ClientJobsDAO.jobInsert()
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
forRunning: True if the params are for running a Hypersearch job; False
if params are for introspection only.
retval: A dictionary of HyperSearch parameters for
ClientJobsDAO.jobInsert()
"""
if options["searchMethod"] == "v2":
hsVersion = "v2"
else:
raise Exception("Unsupported search method: %r" % options["searchMethod"])
maxModels = options["maxPermutations"]
if options["action"] == "dryRun" and maxModels is None:
maxModels = 1
useTerminators = options["useTerminators"]
if useTerminators is None:
params = {
"hsVersion": hsVersion,
"maxModels": maxModels,
}
else:
params = {
"hsVersion": hsVersion,
"useTerminators": useTerminators,
"maxModels": maxModels,
}
if forRunning:
params["persistentJobGUID"] = str(uuid.uuid1())
if options["permutationsScriptPath"]:
params["permutationsPyFilename"] = options["permutationsScriptPath"]
elif options["expDescConfig"]:
params["description"] = options["expDescConfig"]
else:
with open(options["expDescJsonPath"], mode="r") as fp:
params["description"] = json.load(fp)
return params | [
"def",
"makeSearchJobParamsDict",
"(",
"cls",
",",
"options",
",",
"forRunning",
"=",
"False",
")",
":",
"if",
"options",
"[",
"\"searchMethod\"",
"]",
"==",
"\"v2\"",
":",
"hsVersion",
"=",
"\"v2\"",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported search method: %r\"",
"%",
"options",
"[",
"\"searchMethod\"",
"]",
")",
"maxModels",
"=",
"options",
"[",
"\"maxPermutations\"",
"]",
"if",
"options",
"[",
"\"action\"",
"]",
"==",
"\"dryRun\"",
"and",
"maxModels",
"is",
"None",
":",
"maxModels",
"=",
"1",
"useTerminators",
"=",
"options",
"[",
"\"useTerminators\"",
"]",
"if",
"useTerminators",
"is",
"None",
":",
"params",
"=",
"{",
"\"hsVersion\"",
":",
"hsVersion",
",",
"\"maxModels\"",
":",
"maxModels",
",",
"}",
"else",
":",
"params",
"=",
"{",
"\"hsVersion\"",
":",
"hsVersion",
",",
"\"useTerminators\"",
":",
"useTerminators",
",",
"\"maxModels\"",
":",
"maxModels",
",",
"}",
"if",
"forRunning",
":",
"params",
"[",
"\"persistentJobGUID\"",
"]",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"if",
"options",
"[",
"\"permutationsScriptPath\"",
"]",
":",
"params",
"[",
"\"permutationsPyFilename\"",
"]",
"=",
"options",
"[",
"\"permutationsScriptPath\"",
"]",
"elif",
"options",
"[",
"\"expDescConfig\"",
"]",
":",
"params",
"[",
"\"description\"",
"]",
"=",
"options",
"[",
"\"expDescConfig\"",
"]",
"else",
":",
"with",
"open",
"(",
"options",
"[",
"\"expDescJsonPath\"",
"]",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"fp",
":",
"params",
"[",
"\"description\"",
"]",
"=",
"json",
".",
"load",
"(",
"fp",
")",
"return",
"params"
] | Constructs a dictionary of HyperSearch parameters suitable for converting
to json and passing as the params argument to ClientJobsDAO.jobInsert()
Parameters:
----------------------------------------------------------------------
options: NupicRunPermutations options dict
forRunning: True if the params are for running a Hypersearch job; False
if params are for introspection only.
retval: A dictionary of HyperSearch parameters for
ClientJobsDAO.jobInsert() | [
"Constructs",
"a",
"dictionary",
"of",
"HyperSearch",
"parameters",
"suitable",
"for",
"converting",
"to",
"json",
"and",
"passing",
"as",
"the",
"params",
"argument",
"to",
"ClientJobsDAO",
".",
"jobInsert",
"()",
"Parameters",
":",
"----------------------------------------------------------------------",
"options",
":",
"NupicRunPermutations",
"options",
"dict",
"forRunning",
":",
"True",
"if",
"the",
"params",
"are",
"for",
"running",
"a",
"Hypersearch",
"job",
";",
"False",
"if",
"params",
"are",
"for",
"introspection",
"only",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1783-L1828 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _PermutationUtils.getOptimizationMetricInfo | def getOptimizationMetricInfo(cls, searchJobParams):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
searchJobParams:
Parameter for passing as the searchParams arg to
Hypersearch constructor.
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
if searchJobParams["hsVersion"] == "v2":
search = HypersearchV2(searchParams=searchJobParams)
else:
raise RuntimeError("Unsupported hypersearch version \"%s\"" % \
(searchJobParams["hsVersion"]))
info = search.getOptimizationMetricInfo()
return info | python | def getOptimizationMetricInfo(cls, searchJobParams):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
searchJobParams:
Parameter for passing as the searchParams arg to
Hypersearch constructor.
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
if searchJobParams["hsVersion"] == "v2":
search = HypersearchV2(searchParams=searchJobParams)
else:
raise RuntimeError("Unsupported hypersearch version \"%s\"" % \
(searchJobParams["hsVersion"]))
info = search.getOptimizationMetricInfo()
return info | [
"def",
"getOptimizationMetricInfo",
"(",
"cls",
",",
"searchJobParams",
")",
":",
"if",
"searchJobParams",
"[",
"\"hsVersion\"",
"]",
"==",
"\"v2\"",
":",
"search",
"=",
"HypersearchV2",
"(",
"searchParams",
"=",
"searchJobParams",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unsupported hypersearch version \\\"%s\\\"\"",
"%",
"(",
"searchJobParams",
"[",
"\"hsVersion\"",
"]",
")",
")",
"info",
"=",
"search",
".",
"getOptimizationMetricInfo",
"(",
")",
"return",
"info"
] | Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
searchJobParams:
Parameter for passing as the searchParams arg to
Hypersearch constructor.
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it. | [
"Retrives",
"the",
"optimization",
"key",
"name",
"and",
"optimization",
"function",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1838-L1858 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicModelInfo.getModelDescription | def getModelDescription(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Printable description of the model.
"""
params = self.__unwrapParams()
if "experimentName" in params:
return params["experimentName"]
else:
paramSettings = self.getParamLabels()
# Form a csv friendly string representation of this model
items = []
for key, value in paramSettings.items():
items.append("%s_%s" % (key, value))
return ".".join(items) | python | def getModelDescription(self):
"""
Parameters:
----------------------------------------------------------------------
retval: Printable description of the model.
"""
params = self.__unwrapParams()
if "experimentName" in params:
return params["experimentName"]
else:
paramSettings = self.getParamLabels()
# Form a csv friendly string representation of this model
items = []
for key, value in paramSettings.items():
items.append("%s_%s" % (key, value))
return ".".join(items) | [
"def",
"getModelDescription",
"(",
"self",
")",
":",
"params",
"=",
"self",
".",
"__unwrapParams",
"(",
")",
"if",
"\"experimentName\"",
"in",
"params",
":",
"return",
"params",
"[",
"\"experimentName\"",
"]",
"else",
":",
"paramSettings",
"=",
"self",
".",
"getParamLabels",
"(",
")",
"# Form a csv friendly string representation of this model",
"items",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"paramSettings",
".",
"items",
"(",
")",
":",
"items",
".",
"append",
"(",
"\"%s_%s\"",
"%",
"(",
"key",
",",
"value",
")",
")",
"return",
"\".\"",
".",
"join",
"(",
"items",
")"
] | Parameters:
----------------------------------------------------------------------
retval: Printable description of the model. | [
"Parameters",
":",
"----------------------------------------------------------------------",
"retval",
":",
"Printable",
"description",
"of",
"the",
"model",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L2116-L2133 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicModelInfo.getParamLabels | def getParamLabels(self):
"""
Parameters:
----------------------------------------------------------------------
retval: a dictionary of model parameter labels. For each entry
the key is the name of the parameter and the value
is the value chosen for it.
"""
params = self.__unwrapParams()
# Hypersearch v2 stores the flattened parameter settings in "particleState"
if "particleState" in params:
retval = dict()
queue = [(pair, retval) for pair in
params["particleState"]["varStates"].iteritems()]
while len(queue) > 0:
pair, output = queue.pop()
k, v = pair
if ("position" in v and "bestPosition" in v and
"velocity" in v):
output[k] = v["position"]
else:
if k not in output:
output[k] = dict()
queue.extend((pair, output[k]) for pair in v.iteritems())
return retval | python | def getParamLabels(self):
"""
Parameters:
----------------------------------------------------------------------
retval: a dictionary of model parameter labels. For each entry
the key is the name of the parameter and the value
is the value chosen for it.
"""
params = self.__unwrapParams()
# Hypersearch v2 stores the flattened parameter settings in "particleState"
if "particleState" in params:
retval = dict()
queue = [(pair, retval) for pair in
params["particleState"]["varStates"].iteritems()]
while len(queue) > 0:
pair, output = queue.pop()
k, v = pair
if ("position" in v and "bestPosition" in v and
"velocity" in v):
output[k] = v["position"]
else:
if k not in output:
output[k] = dict()
queue.extend((pair, output[k]) for pair in v.iteritems())
return retval | [
"def",
"getParamLabels",
"(",
"self",
")",
":",
"params",
"=",
"self",
".",
"__unwrapParams",
"(",
")",
"# Hypersearch v2 stores the flattened parameter settings in \"particleState\"",
"if",
"\"particleState\"",
"in",
"params",
":",
"retval",
"=",
"dict",
"(",
")",
"queue",
"=",
"[",
"(",
"pair",
",",
"retval",
")",
"for",
"pair",
"in",
"params",
"[",
"\"particleState\"",
"]",
"[",
"\"varStates\"",
"]",
".",
"iteritems",
"(",
")",
"]",
"while",
"len",
"(",
"queue",
")",
">",
"0",
":",
"pair",
",",
"output",
"=",
"queue",
".",
"pop",
"(",
")",
"k",
",",
"v",
"=",
"pair",
"if",
"(",
"\"position\"",
"in",
"v",
"and",
"\"bestPosition\"",
"in",
"v",
"and",
"\"velocity\"",
"in",
"v",
")",
":",
"output",
"[",
"k",
"]",
"=",
"v",
"[",
"\"position\"",
"]",
"else",
":",
"if",
"k",
"not",
"in",
"output",
":",
"output",
"[",
"k",
"]",
"=",
"dict",
"(",
")",
"queue",
".",
"extend",
"(",
"(",
"pair",
",",
"output",
"[",
"k",
"]",
")",
"for",
"pair",
"in",
"v",
".",
"iteritems",
"(",
")",
")",
"return",
"retval"
] | Parameters:
----------------------------------------------------------------------
retval: a dictionary of model parameter labels. For each entry
the key is the name of the parameter and the value
is the value chosen for it. | [
"Parameters",
":",
"----------------------------------------------------------------------",
"retval",
":",
"a",
"dictionary",
"of",
"model",
"parameter",
"labels",
".",
"For",
"each",
"entry",
"the",
"key",
"is",
"the",
"name",
"of",
"the",
"parameter",
"and",
"the",
"value",
"is",
"the",
"value",
"chosen",
"for",
"it",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L2158-L2183 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicModelInfo.__unwrapParams | def __unwrapParams(self):
"""Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params
"""
if self.__cachedParams is None:
self.__cachedParams = json.loads(self.__rawInfo.params)
assert self.__cachedParams is not None, \
"%s resulted in None" % self.__rawInfo.params
return self.__cachedParams | python | def __unwrapParams(self):
"""Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params
"""
if self.__cachedParams is None:
self.__cachedParams = json.loads(self.__rawInfo.params)
assert self.__cachedParams is not None, \
"%s resulted in None" % self.__rawInfo.params
return self.__cachedParams | [
"def",
"__unwrapParams",
"(",
"self",
")",
":",
"if",
"self",
".",
"__cachedParams",
"is",
"None",
":",
"self",
".",
"__cachedParams",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"__rawInfo",
".",
"params",
")",
"assert",
"self",
".",
"__cachedParams",
"is",
"not",
"None",
",",
"\"%s resulted in None\"",
"%",
"self",
".",
"__rawInfo",
".",
"params",
"return",
"self",
".",
"__cachedParams"
] | Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params | [
"Unwraps",
"self",
".",
"__rawInfo",
".",
"params",
"into",
"the",
"equivalent",
"python",
"dictionary",
"and",
"caches",
"it",
"in",
"self",
".",
"__cachedParams",
".",
"Returns",
"the",
"unwrapped",
"params"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L2187-L2201 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicModelInfo.getAllMetrics | def getAllMetrics(self):
"""Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren't any.
"""
result = self.getReportMetrics()
result.update(self.getOptimizationMetrics())
return result | python | def getAllMetrics(self):
"""Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren't any.
"""
result = self.getReportMetrics()
result.update(self.getOptimizationMetrics())
return result | [
"def",
"getAllMetrics",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"getReportMetrics",
"(",
")",
"result",
".",
"update",
"(",
"self",
".",
"getOptimizationMetrics",
"(",
")",
")",
"return",
"result"
] | Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren't any. | [
"Retrives",
"a",
"dictionary",
"of",
"metrics",
"that",
"combines",
"all",
"report",
"and",
"optimization",
"metrics"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L2227-L2238 | valid |
numenta/nupic | src/nupic/swarming/permutations_runner.py | _NupicModelInfo.__unwrapResults | def __unwrapResults(self):
"""Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: ModelResults namedtuple instance
"""
if self.__cachedResults is None:
if self.__rawInfo.results is not None:
resultList = json.loads(self.__rawInfo.results)
assert len(resultList) == 2, \
"Expected 2 elements, but got %s (%s)." % (
len(resultList), resultList)
self.__cachedResults = self.ModelResults(
reportMetrics=resultList[0],
optimizationMetrics=resultList[1])
else:
self.__cachedResults = self.ModelResults(
reportMetrics={},
optimizationMetrics={})
return self.__cachedResults | python | def __unwrapResults(self):
"""Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: ModelResults namedtuple instance
"""
if self.__cachedResults is None:
if self.__rawInfo.results is not None:
resultList = json.loads(self.__rawInfo.results)
assert len(resultList) == 2, \
"Expected 2 elements, but got %s (%s)." % (
len(resultList), resultList)
self.__cachedResults = self.ModelResults(
reportMetrics=resultList[0],
optimizationMetrics=resultList[1])
else:
self.__cachedResults = self.ModelResults(
reportMetrics={},
optimizationMetrics={})
return self.__cachedResults | [
"def",
"__unwrapResults",
"(",
"self",
")",
":",
"if",
"self",
".",
"__cachedResults",
"is",
"None",
":",
"if",
"self",
".",
"__rawInfo",
".",
"results",
"is",
"not",
"None",
":",
"resultList",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"__rawInfo",
".",
"results",
")",
"assert",
"len",
"(",
"resultList",
")",
"==",
"2",
",",
"\"Expected 2 elements, but got %s (%s).\"",
"%",
"(",
"len",
"(",
"resultList",
")",
",",
"resultList",
")",
"self",
".",
"__cachedResults",
"=",
"self",
".",
"ModelResults",
"(",
"reportMetrics",
"=",
"resultList",
"[",
"0",
"]",
",",
"optimizationMetrics",
"=",
"resultList",
"[",
"1",
"]",
")",
"else",
":",
"self",
".",
"__cachedResults",
"=",
"self",
".",
"ModelResults",
"(",
"reportMetrics",
"=",
"{",
"}",
",",
"optimizationMetrics",
"=",
"{",
"}",
")",
"return",
"self",
".",
"__cachedResults"
] | Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: ModelResults namedtuple instance | [
"Unwraps",
"self",
".",
"__rawInfo",
".",
"results",
"and",
"caches",
"it",
"in",
"self",
".",
"__cachedResults",
";",
"Returns",
"the",
"unwrapped",
"params"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L2250-L2273 | valid |
numenta/nupic | src/nupic/data/generators/distributions.py | Distributions.getData | def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records | python | def getData(self, n):
"""Returns the next n values for the distribution as a list."""
records = [self.getNext() for x in range(n)]
return records | [
"def",
"getData",
"(",
"self",
",",
"n",
")",
":",
"records",
"=",
"[",
"self",
".",
"getNext",
"(",
")",
"for",
"x",
"in",
"range",
"(",
"n",
")",
"]",
"return",
"records"
] | Returns the next n values for the distribution as a list. | [
"Returns",
"the",
"next",
"n",
"values",
"for",
"the",
"distribution",
"as",
"a",
"list",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/distributions.py#L50-L54 | valid |
numenta/nupic | src/nupic/swarming/hypersearch/model_terminator.py | ModelTerminator.getTerminationCallbacks | def getTerminationCallbacks(self, terminationFunc):
""" Returns the periodic checks to see if the model should
continue running.
Parameters:
-----------------------------------------------------------------------
terminationFunc: The function that will be called in the model main loop
as a wrapper around this function. Must have a parameter
called 'index'
Returns: A list of PeriodicActivityRequest objects.
"""
activities = [None] * len(ModelTerminator._MILESTONES)
for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES):
cb = functools.partial(terminationFunc, index=index)
activities[index] = PeriodicActivityRequest(repeating =False,
period = iteration,
cb=cb) | python | def getTerminationCallbacks(self, terminationFunc):
""" Returns the periodic checks to see if the model should
continue running.
Parameters:
-----------------------------------------------------------------------
terminationFunc: The function that will be called in the model main loop
as a wrapper around this function. Must have a parameter
called 'index'
Returns: A list of PeriodicActivityRequest objects.
"""
activities = [None] * len(ModelTerminator._MILESTONES)
for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES):
cb = functools.partial(terminationFunc, index=index)
activities[index] = PeriodicActivityRequest(repeating =False,
period = iteration,
cb=cb) | [
"def",
"getTerminationCallbacks",
"(",
"self",
",",
"terminationFunc",
")",
":",
"activities",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"ModelTerminator",
".",
"_MILESTONES",
")",
"for",
"index",
",",
"(",
"iteration",
",",
"_",
")",
"in",
"enumerate",
"(",
"ModelTerminator",
".",
"_MILESTONES",
")",
":",
"cb",
"=",
"functools",
".",
"partial",
"(",
"terminationFunc",
",",
"index",
"=",
"index",
")",
"activities",
"[",
"index",
"]",
"=",
"PeriodicActivityRequest",
"(",
"repeating",
"=",
"False",
",",
"period",
"=",
"iteration",
",",
"cb",
"=",
"cb",
")"
] | Returns the periodic checks to see if the model should
continue running.
Parameters:
-----------------------------------------------------------------------
terminationFunc: The function that will be called in the model main loop
as a wrapper around this function. Must have a parameter
called 'index'
Returns: A list of PeriodicActivityRequest objects. | [
"Returns",
"the",
"periodic",
"checks",
"to",
"see",
"if",
"the",
"model",
"should",
"continue",
"running",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/model_terminator.py#L59-L76 | valid |
numenta/nupic | src/nupic/support/group_by.py | groupby2 | def groupby2(*args):
""" Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
"""
generatorList = [] # list of each list's (k, group) tuples
if len(args) % 2 == 1:
raise ValueError("Must have a key function for every list.")
advanceList = []
# populate above lists
for i in xrange(0, len(args), 2):
listn = args[i]
fn = args[i + 1]
if listn is not None:
generatorList.append(groupby(listn, fn))
advanceList.append(True) # start by advancing everyone.
else:
generatorList.append(None)
advanceList.append(False)
n = len(generatorList)
nextList = [None] * n
# while all lists aren't exhausted walk through each group in order
while True:
for i in xrange(n):
if advanceList[i]:
try:
nextList[i] = generatorList[i].next()
except StopIteration:
nextList[i] = None
# no more values to process in any of the generators
if all(entry is None for entry in nextList):
break
# the minimum key value in the nextList
minKeyVal = min(nextVal[0] for nextVal in nextList
if nextVal is not None)
# populate the tuple to return based on minKeyVal
retGroups = [minKeyVal]
for i in xrange(n):
if nextList[i] is not None and nextList[i][0] == minKeyVal:
retGroups.append(nextList[i][1])
advanceList[i] = True
else:
advanceList[i] = False
retGroups.append(None)
yield tuple(retGroups) | python | def groupby2(*args):
""" Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator.
"""
generatorList = [] # list of each list's (k, group) tuples
if len(args) % 2 == 1:
raise ValueError("Must have a key function for every list.")
advanceList = []
# populate above lists
for i in xrange(0, len(args), 2):
listn = args[i]
fn = args[i + 1]
if listn is not None:
generatorList.append(groupby(listn, fn))
advanceList.append(True) # start by advancing everyone.
else:
generatorList.append(None)
advanceList.append(False)
n = len(generatorList)
nextList = [None] * n
# while all lists aren't exhausted walk through each group in order
while True:
for i in xrange(n):
if advanceList[i]:
try:
nextList[i] = generatorList[i].next()
except StopIteration:
nextList[i] = None
# no more values to process in any of the generators
if all(entry is None for entry in nextList):
break
# the minimum key value in the nextList
minKeyVal = min(nextVal[0] for nextVal in nextList
if nextVal is not None)
# populate the tuple to return based on minKeyVal
retGroups = [minKeyVal]
for i in xrange(n):
if nextList[i] is not None and nextList[i][0] == minKeyVal:
retGroups.append(nextList[i][1])
advanceList[i] = True
else:
advanceList[i] = False
retGroups.append(None)
yield tuple(retGroups) | [
"def",
"groupby2",
"(",
"*",
"args",
")",
":",
"generatorList",
"=",
"[",
"]",
"# list of each list's (k, group) tuples",
"if",
"len",
"(",
"args",
")",
"%",
"2",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Must have a key function for every list.\"",
")",
"advanceList",
"=",
"[",
"]",
"# populate above lists",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"args",
")",
",",
"2",
")",
":",
"listn",
"=",
"args",
"[",
"i",
"]",
"fn",
"=",
"args",
"[",
"i",
"+",
"1",
"]",
"if",
"listn",
"is",
"not",
"None",
":",
"generatorList",
".",
"append",
"(",
"groupby",
"(",
"listn",
",",
"fn",
")",
")",
"advanceList",
".",
"append",
"(",
"True",
")",
"# start by advancing everyone.",
"else",
":",
"generatorList",
".",
"append",
"(",
"None",
")",
"advanceList",
".",
"append",
"(",
"False",
")",
"n",
"=",
"len",
"(",
"generatorList",
")",
"nextList",
"=",
"[",
"None",
"]",
"*",
"n",
"# while all lists aren't exhausted walk through each group in order",
"while",
"True",
":",
"for",
"i",
"in",
"xrange",
"(",
"n",
")",
":",
"if",
"advanceList",
"[",
"i",
"]",
":",
"try",
":",
"nextList",
"[",
"i",
"]",
"=",
"generatorList",
"[",
"i",
"]",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"nextList",
"[",
"i",
"]",
"=",
"None",
"# no more values to process in any of the generators",
"if",
"all",
"(",
"entry",
"is",
"None",
"for",
"entry",
"in",
"nextList",
")",
":",
"break",
"# the minimum key value in the nextList",
"minKeyVal",
"=",
"min",
"(",
"nextVal",
"[",
"0",
"]",
"for",
"nextVal",
"in",
"nextList",
"if",
"nextVal",
"is",
"not",
"None",
")",
"# populate the tuple to return based on minKeyVal",
"retGroups",
"=",
"[",
"minKeyVal",
"]",
"for",
"i",
"in",
"xrange",
"(",
"n",
")",
":",
"if",
"nextList",
"[",
"i",
"]",
"is",
"not",
"None",
"and",
"nextList",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"minKeyVal",
":",
"retGroups",
".",
"append",
"(",
"nextList",
"[",
"i",
"]",
"[",
"1",
"]",
")",
"advanceList",
"[",
"i",
"]",
"=",
"True",
"else",
":",
"advanceList",
"[",
"i",
"]",
"=",
"False",
"retGroups",
".",
"append",
"(",
"None",
")",
"yield",
"tuple",
"(",
"retGroups",
")"
] | Like itertools.groupby, with the following additions:
- Supports multiple sequences. Instead of returning (k, g), each iteration
returns (k, g0, g1, ...), with one `g` for each input sequence. The value of
each `g` is either a non-empty iterator or `None`.
- It treats the value `None` as an empty sequence. So you can make subsequent
calls to groupby2 on any `g` value.
.. note:: Read up on groupby here:
https://docs.python.org/dev/library/itertools.html#itertools.groupby
:param args: (list) Parameters alternating between sorted lists and their
respective key functions. The lists should be sorted with
respect to their key function.
:returns: (tuple) A n + 1 dimensional tuple, where the first element is the
key of the iteration, and the other n entries are groups of
objects that share this key. Each group corresponds to the an
input sequence. `groupby2` is a generator that returns a tuple
for every iteration. If an input sequence has no members with
the current key, None is returned in place of a generator. | [
"Like",
"itertools",
".",
"groupby",
"with",
"the",
"following",
"additions",
":"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/group_by.py#L25-L96 | valid |
numenta/nupic | src/nupic/data/stream_reader.py | StreamReader._openStream | def _openStream(dataUrl,
isBlocking, # pylint: disable=W0613
maxTimeout, # pylint: disable=W0613
bookmark,
firstRecordIdx):
"""Open the underlying file stream
This only supports 'file://' prefixed paths.
:returns: record stream instance
:rtype: FileRecordStream
"""
filePath = dataUrl[len(FILE_PREF):]
if not os.path.isabs(filePath):
filePath = os.path.join(os.getcwd(), filePath)
return FileRecordStream(streamID=filePath,
write=False,
bookmark=bookmark,
firstRecord=firstRecordIdx) | python | def _openStream(dataUrl,
isBlocking, # pylint: disable=W0613
maxTimeout, # pylint: disable=W0613
bookmark,
firstRecordIdx):
"""Open the underlying file stream
This only supports 'file://' prefixed paths.
:returns: record stream instance
:rtype: FileRecordStream
"""
filePath = dataUrl[len(FILE_PREF):]
if not os.path.isabs(filePath):
filePath = os.path.join(os.getcwd(), filePath)
return FileRecordStream(streamID=filePath,
write=False,
bookmark=bookmark,
firstRecord=firstRecordIdx) | [
"def",
"_openStream",
"(",
"dataUrl",
",",
"isBlocking",
",",
"# pylint: disable=W0613",
"maxTimeout",
",",
"# pylint: disable=W0613",
"bookmark",
",",
"firstRecordIdx",
")",
":",
"filePath",
"=",
"dataUrl",
"[",
"len",
"(",
"FILE_PREF",
")",
":",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"filePath",
")",
":",
"filePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"filePath",
")",
"return",
"FileRecordStream",
"(",
"streamID",
"=",
"filePath",
",",
"write",
"=",
"False",
",",
"bookmark",
"=",
"bookmark",
",",
"firstRecord",
"=",
"firstRecordIdx",
")"
] | Open the underlying file stream
This only supports 'file://' prefixed paths.
:returns: record stream instance
:rtype: FileRecordStream | [
"Open",
"the",
"underlying",
"file",
"stream",
"This",
"only",
"supports",
"file",
":",
"//",
"prefixed",
"paths",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/stream_reader.py#L281-L298 | valid |
numenta/nupic | src/nupic/data/stream_reader.py | StreamReader.getNextRecord | def getNextRecord(self):
""" Returns combined data from all sources (values only).
:returns: None on EOF; empty sequence on timeout.
"""
# Keep reading from the raw input till we get enough for an aggregated
# record
while True:
# Reached EOF due to lastRow constraint?
if self._sourceLastRecordIdx is not None and \
self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:
preAggValues = None # indicates EOF
bookmark = self._recordStore.getBookmark()
else:
# Get the raw record and bookmark
preAggValues = self._recordStore.getNextRecord()
bookmark = self._recordStore.getBookmark()
if preAggValues == (): # means timeout error occurred
if self._eofOnTimeout:
preAggValues = None # act as if we got EOF
else:
return preAggValues # Timeout indicator
self._logger.debug('Read source record #%d: %r',
self._recordStore.getNextRecordIdx()-1, preAggValues)
# Perform aggregation
(fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)
# Update the aggregated record bookmark if we got a real record back
if fieldValues is not None:
self._aggBookmark = aggBookmark
# Reached EOF?
if preAggValues is None and fieldValues is None:
return None
# Return it if we have a record
if fieldValues is not None:
break
# Do we need to re-order the fields in the record?
if self._needFieldsFiltering:
values = []
srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))
for name in self._streamFieldNames:
values.append(srcDict[name])
fieldValues = values
# Write to debug output?
if self._writer is not None:
self._writer.appendRecord(fieldValues)
self._recordCount += 1
self._logger.debug('Returning aggregated record #%d from getNextRecord(): '
'%r. Bookmark: %r',
self._recordCount-1, fieldValues, self._aggBookmark)
return fieldValues | python | def getNextRecord(self):
""" Returns combined data from all sources (values only).
:returns: None on EOF; empty sequence on timeout.
"""
# Keep reading from the raw input till we get enough for an aggregated
# record
while True:
# Reached EOF due to lastRow constraint?
if self._sourceLastRecordIdx is not None and \
self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:
preAggValues = None # indicates EOF
bookmark = self._recordStore.getBookmark()
else:
# Get the raw record and bookmark
preAggValues = self._recordStore.getNextRecord()
bookmark = self._recordStore.getBookmark()
if preAggValues == (): # means timeout error occurred
if self._eofOnTimeout:
preAggValues = None # act as if we got EOF
else:
return preAggValues # Timeout indicator
self._logger.debug('Read source record #%d: %r',
self._recordStore.getNextRecordIdx()-1, preAggValues)
# Perform aggregation
(fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)
# Update the aggregated record bookmark if we got a real record back
if fieldValues is not None:
self._aggBookmark = aggBookmark
# Reached EOF?
if preAggValues is None and fieldValues is None:
return None
# Return it if we have a record
if fieldValues is not None:
break
# Do we need to re-order the fields in the record?
if self._needFieldsFiltering:
values = []
srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))
for name in self._streamFieldNames:
values.append(srcDict[name])
fieldValues = values
# Write to debug output?
if self._writer is not None:
self._writer.appendRecord(fieldValues)
self._recordCount += 1
self._logger.debug('Returning aggregated record #%d from getNextRecord(): '
'%r. Bookmark: %r',
self._recordCount-1, fieldValues, self._aggBookmark)
return fieldValues | [
"def",
"getNextRecord",
"(",
"self",
")",
":",
"# Keep reading from the raw input till we get enough for an aggregated",
"# record",
"while",
"True",
":",
"# Reached EOF due to lastRow constraint?",
"if",
"self",
".",
"_sourceLastRecordIdx",
"is",
"not",
"None",
"and",
"self",
".",
"_recordStore",
".",
"getNextRecordIdx",
"(",
")",
">=",
"self",
".",
"_sourceLastRecordIdx",
":",
"preAggValues",
"=",
"None",
"# indicates EOF",
"bookmark",
"=",
"self",
".",
"_recordStore",
".",
"getBookmark",
"(",
")",
"else",
":",
"# Get the raw record and bookmark",
"preAggValues",
"=",
"self",
".",
"_recordStore",
".",
"getNextRecord",
"(",
")",
"bookmark",
"=",
"self",
".",
"_recordStore",
".",
"getBookmark",
"(",
")",
"if",
"preAggValues",
"==",
"(",
")",
":",
"# means timeout error occurred",
"if",
"self",
".",
"_eofOnTimeout",
":",
"preAggValues",
"=",
"None",
"# act as if we got EOF",
"else",
":",
"return",
"preAggValues",
"# Timeout indicator",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Read source record #%d: %r'",
",",
"self",
".",
"_recordStore",
".",
"getNextRecordIdx",
"(",
")",
"-",
"1",
",",
"preAggValues",
")",
"# Perform aggregation",
"(",
"fieldValues",
",",
"aggBookmark",
")",
"=",
"self",
".",
"_aggregator",
".",
"next",
"(",
"preAggValues",
",",
"bookmark",
")",
"# Update the aggregated record bookmark if we got a real record back",
"if",
"fieldValues",
"is",
"not",
"None",
":",
"self",
".",
"_aggBookmark",
"=",
"aggBookmark",
"# Reached EOF?",
"if",
"preAggValues",
"is",
"None",
"and",
"fieldValues",
"is",
"None",
":",
"return",
"None",
"# Return it if we have a record",
"if",
"fieldValues",
"is",
"not",
"None",
":",
"break",
"# Do we need to re-order the fields in the record?",
"if",
"self",
".",
"_needFieldsFiltering",
":",
"values",
"=",
"[",
"]",
"srcDict",
"=",
"dict",
"(",
"zip",
"(",
"self",
".",
"_recordStoreFieldNames",
",",
"fieldValues",
")",
")",
"for",
"name",
"in",
"self",
".",
"_streamFieldNames",
":",
"values",
".",
"append",
"(",
"srcDict",
"[",
"name",
"]",
")",
"fieldValues",
"=",
"values",
"# Write to debug output?",
"if",
"self",
".",
"_writer",
"is",
"not",
"None",
":",
"self",
".",
"_writer",
".",
"appendRecord",
"(",
"fieldValues",
")",
"self",
".",
"_recordCount",
"+=",
"1",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Returning aggregated record #%d from getNextRecord(): '",
"'%r. Bookmark: %r'",
",",
"self",
".",
"_recordCount",
"-",
"1",
",",
"fieldValues",
",",
"self",
".",
"_aggBookmark",
")",
"return",
"fieldValues"
] | Returns combined data from all sources (values only).
:returns: None on EOF; empty sequence on timeout. | [
"Returns",
"combined",
"data",
"from",
"all",
"sources",
"(",
"values",
"only",
")",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/stream_reader.py#L307-L372 | valid |
numenta/nupic | src/nupic/data/stream_reader.py | StreamReader.getDataRowCount | def getDataRowCount(self):
"""
Iterates through stream to calculate total records after aggregation.
This will alter the bookmark state.
"""
inputRowCountAfterAggregation = 0
while True:
record = self.getNextRecord()
if record is None:
return inputRowCountAfterAggregation
inputRowCountAfterAggregation += 1
if inputRowCountAfterAggregation > 10000:
raise RuntimeError('No end of datastream found.') | python | def getDataRowCount(self):
"""
Iterates through stream to calculate total records after aggregation.
This will alter the bookmark state.
"""
inputRowCountAfterAggregation = 0
while True:
record = self.getNextRecord()
if record is None:
return inputRowCountAfterAggregation
inputRowCountAfterAggregation += 1
if inputRowCountAfterAggregation > 10000:
raise RuntimeError('No end of datastream found.') | [
"def",
"getDataRowCount",
"(",
"self",
")",
":",
"inputRowCountAfterAggregation",
"=",
"0",
"while",
"True",
":",
"record",
"=",
"self",
".",
"getNextRecord",
"(",
")",
"if",
"record",
"is",
"None",
":",
"return",
"inputRowCountAfterAggregation",
"inputRowCountAfterAggregation",
"+=",
"1",
"if",
"inputRowCountAfterAggregation",
">",
"10000",
":",
"raise",
"RuntimeError",
"(",
"'No end of datastream found.'",
")"
] | Iterates through stream to calculate total records after aggregation.
This will alter the bookmark state. | [
"Iterates",
"through",
"stream",
"to",
"calculate",
"total",
"records",
"after",
"aggregation",
".",
"This",
"will",
"alter",
"the",
"bookmark",
"state",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/stream_reader.py#L375-L388 | valid |
numenta/nupic | src/nupic/data/stream_reader.py | StreamReader.getStats | def getStats(self):
"""
TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields).
"""
# The record store returns a dict of stats, each value in this dict is
# a list with one item per field of the record store
# {
# 'min' : [f1_min, f2_min, f3_min],
# 'max' : [f1_max, f2_max, f3_max]
# }
recordStoreStats = self._recordStore.getStats()
# We need to convert each item to represent the fields of the *stream*
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
streamStats[key] = streamValues
return streamStats | python | def getStats(self):
"""
TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields).
"""
# The record store returns a dict of stats, each value in this dict is
# a list with one item per field of the record store
# {
# 'min' : [f1_min, f2_min, f3_min],
# 'max' : [f1_max, f2_max, f3_max]
# }
recordStoreStats = self._recordStore.getStats()
# We need to convert each item to represent the fields of the *stream*
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
streamStats[key] = streamValues
return streamStats | [
"def",
"getStats",
"(",
"self",
")",
":",
"# The record store returns a dict of stats, each value in this dict is",
"# a list with one item per field of the record store",
"# {",
"# 'min' : [f1_min, f2_min, f3_min],",
"# 'max' : [f1_max, f2_max, f3_max]",
"# }",
"recordStoreStats",
"=",
"self",
".",
"_recordStore",
".",
"getStats",
"(",
")",
"# We need to convert each item to represent the fields of the *stream*",
"streamStats",
"=",
"dict",
"(",
")",
"for",
"(",
"key",
",",
"values",
")",
"in",
"recordStoreStats",
".",
"items",
"(",
")",
":",
"fieldStats",
"=",
"dict",
"(",
"zip",
"(",
"self",
".",
"_recordStoreFieldNames",
",",
"values",
")",
")",
"streamValues",
"=",
"[",
"]",
"for",
"name",
"in",
"self",
".",
"_streamFieldNames",
":",
"streamValues",
".",
"append",
"(",
"fieldStats",
"[",
"name",
"]",
")",
"streamStats",
"[",
"key",
"]",
"=",
"streamValues",
"return",
"streamStats"
] | TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields). | [
"TODO",
":",
"This",
"method",
"needs",
"to",
"be",
"enhanced",
"to",
"get",
"the",
"stats",
"on",
"the",
"*",
"aggregated",
"*",
"records",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/stream_reader.py#L468-L493 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine.get | def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError("Invalid number")
return self._patterns[number] | python | def get(self, number):
"""
Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits
"""
if not number in self._patterns:
raise IndexError("Invalid number")
return self._patterns[number] | [
"def",
"get",
"(",
"self",
",",
"number",
")",
":",
"if",
"not",
"number",
"in",
"self",
".",
"_patterns",
":",
"raise",
"IndexError",
"(",
"\"Invalid number\"",
")",
"return",
"self",
".",
"_patterns",
"[",
"number",
"]"
] | Return a pattern for a number.
@param number (int) Number of pattern
@return (set) Indices of on bits | [
"Return",
"a",
"pattern",
"for",
"a",
"number",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L61-L72 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine.addNoise | def addNoise(self, bits, amount):
"""
Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern
"""
newBits = set()
for bit in bits:
if self._random.getReal64() < amount:
newBits.add(self._random.getUInt32(self._n))
else:
newBits.add(bit)
return newBits | python | def addNoise(self, bits, amount):
"""
Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern
"""
newBits = set()
for bit in bits:
if self._random.getReal64() < amount:
newBits.add(self._random.getUInt32(self._n))
else:
newBits.add(bit)
return newBits | [
"def",
"addNoise",
"(",
"self",
",",
"bits",
",",
"amount",
")",
":",
"newBits",
"=",
"set",
"(",
")",
"for",
"bit",
"in",
"bits",
":",
"if",
"self",
".",
"_random",
".",
"getReal64",
"(",
")",
"<",
"amount",
":",
"newBits",
".",
"add",
"(",
"self",
".",
"_random",
".",
"getUInt32",
"(",
"self",
".",
"_n",
")",
")",
"else",
":",
"newBits",
".",
"add",
"(",
"bit",
")",
"return",
"newBits"
] | Add noise to pattern.
@param bits (set) Indices of on bits
@param amount (float) Probability of switching an on bit with a random bit
@return (set) Indices of on bits in noisy pattern | [
"Add",
"noise",
"to",
"pattern",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L75-L92 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine.numbersForBit | def numbersForBit(self, bit):
"""
Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers
"""
if bit >= self._n:
raise IndexError("Invalid bit")
numbers = set()
for index, pattern in self._patterns.iteritems():
if bit in pattern:
numbers.add(index)
return numbers | python | def numbersForBit(self, bit):
"""
Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers
"""
if bit >= self._n:
raise IndexError("Invalid bit")
numbers = set()
for index, pattern in self._patterns.iteritems():
if bit in pattern:
numbers.add(index)
return numbers | [
"def",
"numbersForBit",
"(",
"self",
",",
"bit",
")",
":",
"if",
"bit",
">=",
"self",
".",
"_n",
":",
"raise",
"IndexError",
"(",
"\"Invalid bit\"",
")",
"numbers",
"=",
"set",
"(",
")",
"for",
"index",
",",
"pattern",
"in",
"self",
".",
"_patterns",
".",
"iteritems",
"(",
")",
":",
"if",
"bit",
"in",
"pattern",
":",
"numbers",
".",
"add",
"(",
"index",
")",
"return",
"numbers"
] | Return the set of pattern numbers that match a bit.
@param bit (int) Index of bit
@return (set) Indices of numbers | [
"Return",
"the",
"set",
"of",
"pattern",
"numbers",
"that",
"match",
"a",
"bit",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L95-L112 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine.numberMapForBits | def numberMapForBits(self, bits):
"""
Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits.
"""
numberMap = dict()
for bit in bits:
numbers = self.numbersForBit(bit)
for number in numbers:
if not number in numberMap:
numberMap[number] = set()
numberMap[number].add(bit)
return numberMap | python | def numberMapForBits(self, bits):
"""
Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits.
"""
numberMap = dict()
for bit in bits:
numbers = self.numbersForBit(bit)
for number in numbers:
if not number in numberMap:
numberMap[number] = set()
numberMap[number].add(bit)
return numberMap | [
"def",
"numberMapForBits",
"(",
"self",
",",
"bits",
")",
":",
"numberMap",
"=",
"dict",
"(",
")",
"for",
"bit",
"in",
"bits",
":",
"numbers",
"=",
"self",
".",
"numbersForBit",
"(",
"bit",
")",
"for",
"number",
"in",
"numbers",
":",
"if",
"not",
"number",
"in",
"numberMap",
":",
"numberMap",
"[",
"number",
"]",
"=",
"set",
"(",
")",
"numberMap",
"[",
"number",
"]",
".",
"add",
"(",
"bit",
")",
"return",
"numberMap"
] | Return a map from number to matching on bits,
for all numbers that match a set of bits.
@param bits (set) Indices of bits
@return (dict) Mapping from number => on bits. | [
"Return",
"a",
"map",
"from",
"number",
"to",
"matching",
"on",
"bits",
"for",
"all",
"numbers",
"that",
"match",
"a",
"set",
"of",
"bits",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L115-L135 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine.prettyPrintPattern | def prettyPrintPattern(self, bits, verbosity=1):
"""
Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
numberMap = self.numberMapForBits(bits)
text = ""
numberList = []
numberItems = sorted(numberMap.iteritems(),
key=lambda (number, bits): len(bits),
reverse=True)
for number, bits in numberItems:
if verbosity > 2:
strBits = [str(n) for n in bits]
numberText = "{0} (bits: {1})".format(number, ",".join(strBits))
elif verbosity > 1:
numberText = "{0} ({1} bits)".format(number, len(bits))
else:
numberText = str(number)
numberList.append(numberText)
text += "[{0}]".format(", ".join(numberList))
return text | python | def prettyPrintPattern(self, bits, verbosity=1):
"""
Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
numberMap = self.numberMapForBits(bits)
text = ""
numberList = []
numberItems = sorted(numberMap.iteritems(),
key=lambda (number, bits): len(bits),
reverse=True)
for number, bits in numberItems:
if verbosity > 2:
strBits = [str(n) for n in bits]
numberText = "{0} (bits: {1})".format(number, ",".join(strBits))
elif verbosity > 1:
numberText = "{0} ({1} bits)".format(number, len(bits))
else:
numberText = str(number)
numberList.append(numberText)
text += "[{0}]".format(", ".join(numberList))
return text | [
"def",
"prettyPrintPattern",
"(",
"self",
",",
"bits",
",",
"verbosity",
"=",
"1",
")",
":",
"numberMap",
"=",
"self",
".",
"numberMapForBits",
"(",
"bits",
")",
"text",
"=",
"\"\"",
"numberList",
"=",
"[",
"]",
"numberItems",
"=",
"sorted",
"(",
"numberMap",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"lambda",
"(",
"number",
",",
"bits",
")",
":",
"len",
"(",
"bits",
")",
",",
"reverse",
"=",
"True",
")",
"for",
"number",
",",
"bits",
"in",
"numberItems",
":",
"if",
"verbosity",
">",
"2",
":",
"strBits",
"=",
"[",
"str",
"(",
"n",
")",
"for",
"n",
"in",
"bits",
"]",
"numberText",
"=",
"\"{0} (bits: {1})\"",
".",
"format",
"(",
"number",
",",
"\",\"",
".",
"join",
"(",
"strBits",
")",
")",
"elif",
"verbosity",
">",
"1",
":",
"numberText",
"=",
"\"{0} ({1} bits)\"",
".",
"format",
"(",
"number",
",",
"len",
"(",
"bits",
")",
")",
"else",
":",
"numberText",
"=",
"str",
"(",
"number",
")",
"numberList",
".",
"append",
"(",
"numberText",
")",
"text",
"+=",
"\"[{0}]\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"numberList",
")",
")",
"return",
"text"
] | Pretty print a pattern.
@param bits (set) Indices of on bits
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text | [
"Pretty",
"print",
"a",
"pattern",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L138-L169 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine._generate | def _generate(self):
"""
Generates set of random patterns.
"""
candidates = np.array(range(self._n), np.uint32)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern) | python | def _generate(self):
"""
Generates set of random patterns.
"""
candidates = np.array(range(self._n), np.uint32)
for i in xrange(self._num):
self._random.shuffle(candidates)
pattern = candidates[0:self._getW()]
self._patterns[i] = set(pattern) | [
"def",
"_generate",
"(",
"self",
")",
":",
"candidates",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"self",
".",
"_n",
")",
",",
"np",
".",
"uint32",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"_num",
")",
":",
"self",
".",
"_random",
".",
"shuffle",
"(",
"candidates",
")",
"pattern",
"=",
"candidates",
"[",
"0",
":",
"self",
".",
"_getW",
"(",
")",
"]",
"self",
".",
"_patterns",
"[",
"i",
"]",
"=",
"set",
"(",
"pattern",
")"
] | Generates set of random patterns. | [
"Generates",
"set",
"of",
"random",
"patterns",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L172-L180 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | PatternMachine._getW | def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.getUInt32(len(w))]
else:
return w | python | def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.getUInt32(len(w))]
else:
return w | [
"def",
"_getW",
"(",
"self",
")",
":",
"w",
"=",
"self",
".",
"_w",
"if",
"type",
"(",
"w",
")",
"is",
"list",
":",
"return",
"w",
"[",
"self",
".",
"_random",
".",
"getUInt32",
"(",
"len",
"(",
"w",
")",
")",
"]",
"else",
":",
"return",
"w"
] | Gets a value of `w` for use in generating a pattern. | [
"Gets",
"a",
"value",
"of",
"w",
"for",
"use",
"in",
"generating",
"a",
"pattern",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L183-L192 | valid |
numenta/nupic | src/nupic/data/generators/pattern_machine.py | ConsecutivePatternMachine._generate | def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern | python | def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern | [
"def",
"_generate",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"_n",
"w",
"=",
"self",
".",
"_w",
"assert",
"type",
"(",
"w",
")",
"is",
"int",
",",
"\"List for w not supported\"",
"for",
"i",
"in",
"xrange",
"(",
"n",
"/",
"w",
")",
":",
"pattern",
"=",
"set",
"(",
"xrange",
"(",
"i",
"*",
"w",
",",
"(",
"i",
"+",
"1",
")",
"*",
"w",
")",
")",
"self",
".",
"_patterns",
"[",
"i",
"]",
"=",
"pattern"
] | Generates set of consecutive patterns. | [
"Generates",
"set",
"of",
"consecutive",
"patterns",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L202-L213 | valid |
numenta/nupic | src/nupic/algorithms/sdr_classifier.py | SDRClassifier.compute | def compute(self, recordNum, patternNZ, classification, learn, infer):
"""
Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
:param recordNum: Record number of this input pattern. Record numbers
normally increase sequentially by 1 each time unless there are missing
records in the dataset. Knowing this information insures that we don't get
confused by missing records.
:param patternNZ: List of the active indices from the output below. When the
input is from TemporalMemory, this list should be the indices of the
active cells.
:param classification: Dict of the classification information where:
- bucketIdx: list of indices of the encoder bucket
- actValue: list of actual values going into the encoder
Classification could be None for inference mode.
:param learn: (bool) if true, learn this sample
:param infer: (bool) if true, perform inference
:return: Dict containing inference results, there is one entry for each
step in self.steps, where the key is the number of steps, and
the value is an array containing the relative likelihood for
each bucketIdx starting from bucketIdx 0.
There is also an entry containing the average actual value to
use for each bucket. The key is 'actualValues'.
for example:
.. code-block:: python
{1 : [0.1, 0.3, 0.2, 0.7],
4 : [0.2, 0.4, 0.3, 0.5],
'actualValues': [1.5, 3,5, 5,5, 7.6],
}
"""
if self.verbosity >= 1:
print " learn:", learn
print " recordNum:", recordNum
print " patternNZ (%d):" % len(patternNZ), patternNZ
print " classificationIn:", classification
# ensures that recordNum increases monotonically
if len(self._patternNZHistory) > 0:
if recordNum < self._patternNZHistory[-1][0]:
raise ValueError("the record number has to increase monotonically")
# Store pattern in our history if this is a new record
if len(self._patternNZHistory) == 0 or \
recordNum > self._patternNZHistory[-1][0]:
self._patternNZHistory.append((recordNum, patternNZ))
# To allow multi-class classification, we need to be able to run learning
# without inference being on. So initialize retval outside
# of the inference block.
retval = {}
# Update maxInputIdx and augment weight matrix with zero padding
if max(patternNZ) > self._maxInputIdx:
newMaxInputIdx = max(patternNZ)
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx,
self._maxBucketIdx+1))), axis=0)
self._maxInputIdx = int(newMaxInputIdx)
# Get classification info
if classification is not None:
if type(classification["bucketIdx"]) is not list:
bucketIdxList = [classification["bucketIdx"]]
actValueList = [classification["actValue"]]
numCategory = 1
else:
bucketIdxList = classification["bucketIdx"]
actValueList = classification["actValue"]
numCategory = len(classification["bucketIdx"])
else:
if learn:
raise ValueError("classification cannot be None when learn=True")
actValueList = None
bucketIdxList = None
# ------------------------------------------------------------------------
# Inference:
# For each active bit in the activationPattern, get the classification
# votes
if infer:
retval = self.infer(patternNZ, actValueList)
if learn and classification["bucketIdx"] is not None:
for categoryI in range(numCategory):
bucketIdx = bucketIdxList[categoryI]
actValue = actValueList[categoryI]
# Update maxBucketIndex and augment weight matrix with zero padding
if bucketIdx > self._maxBucketIdx:
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(self._maxInputIdx+1,
bucketIdx-self._maxBucketIdx))), axis=1)
self._maxBucketIdx = int(bucketIdx)
# Update rolling average of actual values if it's a scalar. If it's
# not, it must be a category, in which case each bucket only ever
# sees one category so we don't need a running average.
while self._maxBucketIdx > len(self._actualValues) - 1:
self._actualValues.append(None)
if self._actualValues[bucketIdx] is None:
self._actualValues[bucketIdx] = actValue
else:
if (isinstance(actValue, int) or
isinstance(actValue, float) or
isinstance(actValue, long)):
self._actualValues[bucketIdx] = ((1.0 - self.actValueAlpha)
* self._actualValues[bucketIdx]
+ self.actValueAlpha * actValue)
else:
self._actualValues[bucketIdx] = actValue
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
error = self._calculateError(recordNum, bucketIdxList)
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
for bit in learnPatternNZ:
self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps]
# ------------------------------------------------------------------------
# Verbose print
if infer and self.verbosity >= 1:
print " inference: combined bucket likelihoods:"
print " actual bucket values:", retval["actualValues"]
for (nSteps, votes) in retval.items():
if nSteps == "actualValues":
continue
print " %d steps: " % (nSteps), _pFormatArray(votes)
bestBucketIdx = votes.argmax()
print (" most likely bucket idx: "
"%d, value: %s" % (bestBucketIdx,
retval["actualValues"][bestBucketIdx]))
print
return retval | python | def compute(self, recordNum, patternNZ, classification, learn, infer):
"""
Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
:param recordNum: Record number of this input pattern. Record numbers
normally increase sequentially by 1 each time unless there are missing
records in the dataset. Knowing this information insures that we don't get
confused by missing records.
:param patternNZ: List of the active indices from the output below. When the
input is from TemporalMemory, this list should be the indices of the
active cells.
:param classification: Dict of the classification information where:
- bucketIdx: list of indices of the encoder bucket
- actValue: list of actual values going into the encoder
Classification could be None for inference mode.
:param learn: (bool) if true, learn this sample
:param infer: (bool) if true, perform inference
:return: Dict containing inference results, there is one entry for each
step in self.steps, where the key is the number of steps, and
the value is an array containing the relative likelihood for
each bucketIdx starting from bucketIdx 0.
There is also an entry containing the average actual value to
use for each bucket. The key is 'actualValues'.
for example:
.. code-block:: python
{1 : [0.1, 0.3, 0.2, 0.7],
4 : [0.2, 0.4, 0.3, 0.5],
'actualValues': [1.5, 3,5, 5,5, 7.6],
}
"""
if self.verbosity >= 1:
print " learn:", learn
print " recordNum:", recordNum
print " patternNZ (%d):" % len(patternNZ), patternNZ
print " classificationIn:", classification
# ensures that recordNum increases monotonically
if len(self._patternNZHistory) > 0:
if recordNum < self._patternNZHistory[-1][0]:
raise ValueError("the record number has to increase monotonically")
# Store pattern in our history if this is a new record
if len(self._patternNZHistory) == 0 or \
recordNum > self._patternNZHistory[-1][0]:
self._patternNZHistory.append((recordNum, patternNZ))
# To allow multi-class classification, we need to be able to run learning
# without inference being on. So initialize retval outside
# of the inference block.
retval = {}
# Update maxInputIdx and augment weight matrix with zero padding
if max(patternNZ) > self._maxInputIdx:
newMaxInputIdx = max(patternNZ)
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx,
self._maxBucketIdx+1))), axis=0)
self._maxInputIdx = int(newMaxInputIdx)
# Get classification info
if classification is not None:
if type(classification["bucketIdx"]) is not list:
bucketIdxList = [classification["bucketIdx"]]
actValueList = [classification["actValue"]]
numCategory = 1
else:
bucketIdxList = classification["bucketIdx"]
actValueList = classification["actValue"]
numCategory = len(classification["bucketIdx"])
else:
if learn:
raise ValueError("classification cannot be None when learn=True")
actValueList = None
bucketIdxList = None
# ------------------------------------------------------------------------
# Inference:
# For each active bit in the activationPattern, get the classification
# votes
if infer:
retval = self.infer(patternNZ, actValueList)
if learn and classification["bucketIdx"] is not None:
for categoryI in range(numCategory):
bucketIdx = bucketIdxList[categoryI]
actValue = actValueList[categoryI]
# Update maxBucketIndex and augment weight matrix with zero padding
if bucketIdx > self._maxBucketIdx:
for nSteps in self.steps:
self._weightMatrix[nSteps] = numpy.concatenate((
self._weightMatrix[nSteps],
numpy.zeros(shape=(self._maxInputIdx+1,
bucketIdx-self._maxBucketIdx))), axis=1)
self._maxBucketIdx = int(bucketIdx)
# Update rolling average of actual values if it's a scalar. If it's
# not, it must be a category, in which case each bucket only ever
# sees one category so we don't need a running average.
while self._maxBucketIdx > len(self._actualValues) - 1:
self._actualValues.append(None)
if self._actualValues[bucketIdx] is None:
self._actualValues[bucketIdx] = actValue
else:
if (isinstance(actValue, int) or
isinstance(actValue, float) or
isinstance(actValue, long)):
self._actualValues[bucketIdx] = ((1.0 - self.actValueAlpha)
* self._actualValues[bucketIdx]
+ self.actValueAlpha * actValue)
else:
self._actualValues[bucketIdx] = actValue
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
error = self._calculateError(recordNum, bucketIdxList)
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
for bit in learnPatternNZ:
self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps]
# ------------------------------------------------------------------------
# Verbose print
if infer and self.verbosity >= 1:
print " inference: combined bucket likelihoods:"
print " actual bucket values:", retval["actualValues"]
for (nSteps, votes) in retval.items():
if nSteps == "actualValues":
continue
print " %d steps: " % (nSteps), _pFormatArray(votes)
bestBucketIdx = votes.argmax()
print (" most likely bucket idx: "
"%d, value: %s" % (bestBucketIdx,
retval["actualValues"][bestBucketIdx]))
print
return retval | [
"def",
"compute",
"(",
"self",
",",
"recordNum",
",",
"patternNZ",
",",
"classification",
",",
"learn",
",",
"infer",
")",
":",
"if",
"self",
".",
"verbosity",
">=",
"1",
":",
"print",
"\" learn:\"",
",",
"learn",
"print",
"\" recordNum:\"",
",",
"recordNum",
"print",
"\" patternNZ (%d):\"",
"%",
"len",
"(",
"patternNZ",
")",
",",
"patternNZ",
"print",
"\" classificationIn:\"",
",",
"classification",
"# ensures that recordNum increases monotonically",
"if",
"len",
"(",
"self",
".",
"_patternNZHistory",
")",
">",
"0",
":",
"if",
"recordNum",
"<",
"self",
".",
"_patternNZHistory",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"the record number has to increase monotonically\"",
")",
"# Store pattern in our history if this is a new record",
"if",
"len",
"(",
"self",
".",
"_patternNZHistory",
")",
"==",
"0",
"or",
"recordNum",
">",
"self",
".",
"_patternNZHistory",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
":",
"self",
".",
"_patternNZHistory",
".",
"append",
"(",
"(",
"recordNum",
",",
"patternNZ",
")",
")",
"# To allow multi-class classification, we need to be able to run learning",
"# without inference being on. So initialize retval outside",
"# of the inference block.",
"retval",
"=",
"{",
"}",
"# Update maxInputIdx and augment weight matrix with zero padding",
"if",
"max",
"(",
"patternNZ",
")",
">",
"self",
".",
"_maxInputIdx",
":",
"newMaxInputIdx",
"=",
"max",
"(",
"patternNZ",
")",
"for",
"nSteps",
"in",
"self",
".",
"steps",
":",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
",",
"numpy",
".",
"zeros",
"(",
"shape",
"=",
"(",
"newMaxInputIdx",
"-",
"self",
".",
"_maxInputIdx",
",",
"self",
".",
"_maxBucketIdx",
"+",
"1",
")",
")",
")",
",",
"axis",
"=",
"0",
")",
"self",
".",
"_maxInputIdx",
"=",
"int",
"(",
"newMaxInputIdx",
")",
"# Get classification info",
"if",
"classification",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"classification",
"[",
"\"bucketIdx\"",
"]",
")",
"is",
"not",
"list",
":",
"bucketIdxList",
"=",
"[",
"classification",
"[",
"\"bucketIdx\"",
"]",
"]",
"actValueList",
"=",
"[",
"classification",
"[",
"\"actValue\"",
"]",
"]",
"numCategory",
"=",
"1",
"else",
":",
"bucketIdxList",
"=",
"classification",
"[",
"\"bucketIdx\"",
"]",
"actValueList",
"=",
"classification",
"[",
"\"actValue\"",
"]",
"numCategory",
"=",
"len",
"(",
"classification",
"[",
"\"bucketIdx\"",
"]",
")",
"else",
":",
"if",
"learn",
":",
"raise",
"ValueError",
"(",
"\"classification cannot be None when learn=True\"",
")",
"actValueList",
"=",
"None",
"bucketIdxList",
"=",
"None",
"# ------------------------------------------------------------------------",
"# Inference:",
"# For each active bit in the activationPattern, get the classification",
"# votes",
"if",
"infer",
":",
"retval",
"=",
"self",
".",
"infer",
"(",
"patternNZ",
",",
"actValueList",
")",
"if",
"learn",
"and",
"classification",
"[",
"\"bucketIdx\"",
"]",
"is",
"not",
"None",
":",
"for",
"categoryI",
"in",
"range",
"(",
"numCategory",
")",
":",
"bucketIdx",
"=",
"bucketIdxList",
"[",
"categoryI",
"]",
"actValue",
"=",
"actValueList",
"[",
"categoryI",
"]",
"# Update maxBucketIndex and augment weight matrix with zero padding",
"if",
"bucketIdx",
">",
"self",
".",
"_maxBucketIdx",
":",
"for",
"nSteps",
"in",
"self",
".",
"steps",
":",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
",",
"numpy",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"_maxInputIdx",
"+",
"1",
",",
"bucketIdx",
"-",
"self",
".",
"_maxBucketIdx",
")",
")",
")",
",",
"axis",
"=",
"1",
")",
"self",
".",
"_maxBucketIdx",
"=",
"int",
"(",
"bucketIdx",
")",
"# Update rolling average of actual values if it's a scalar. If it's",
"# not, it must be a category, in which case each bucket only ever",
"# sees one category so we don't need a running average.",
"while",
"self",
".",
"_maxBucketIdx",
">",
"len",
"(",
"self",
".",
"_actualValues",
")",
"-",
"1",
":",
"self",
".",
"_actualValues",
".",
"append",
"(",
"None",
")",
"if",
"self",
".",
"_actualValues",
"[",
"bucketIdx",
"]",
"is",
"None",
":",
"self",
".",
"_actualValues",
"[",
"bucketIdx",
"]",
"=",
"actValue",
"else",
":",
"if",
"(",
"isinstance",
"(",
"actValue",
",",
"int",
")",
"or",
"isinstance",
"(",
"actValue",
",",
"float",
")",
"or",
"isinstance",
"(",
"actValue",
",",
"long",
")",
")",
":",
"self",
".",
"_actualValues",
"[",
"bucketIdx",
"]",
"=",
"(",
"(",
"1.0",
"-",
"self",
".",
"actValueAlpha",
")",
"*",
"self",
".",
"_actualValues",
"[",
"bucketIdx",
"]",
"+",
"self",
".",
"actValueAlpha",
"*",
"actValue",
")",
"else",
":",
"self",
".",
"_actualValues",
"[",
"bucketIdx",
"]",
"=",
"actValue",
"for",
"(",
"learnRecordNum",
",",
"learnPatternNZ",
")",
"in",
"self",
".",
"_patternNZHistory",
":",
"error",
"=",
"self",
".",
"_calculateError",
"(",
"recordNum",
",",
"bucketIdxList",
")",
"nSteps",
"=",
"recordNum",
"-",
"learnRecordNum",
"if",
"nSteps",
"in",
"self",
".",
"steps",
":",
"for",
"bit",
"in",
"learnPatternNZ",
":",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
"[",
"bit",
",",
":",
"]",
"+=",
"self",
".",
"alpha",
"*",
"error",
"[",
"nSteps",
"]",
"# ------------------------------------------------------------------------",
"# Verbose print",
"if",
"infer",
"and",
"self",
".",
"verbosity",
">=",
"1",
":",
"print",
"\" inference: combined bucket likelihoods:\"",
"print",
"\" actual bucket values:\"",
",",
"retval",
"[",
"\"actualValues\"",
"]",
"for",
"(",
"nSteps",
",",
"votes",
")",
"in",
"retval",
".",
"items",
"(",
")",
":",
"if",
"nSteps",
"==",
"\"actualValues\"",
":",
"continue",
"print",
"\" %d steps: \"",
"%",
"(",
"nSteps",
")",
",",
"_pFormatArray",
"(",
"votes",
")",
"bestBucketIdx",
"=",
"votes",
".",
"argmax",
"(",
")",
"print",
"(",
"\" most likely bucket idx: \"",
"\"%d, value: %s\"",
"%",
"(",
"bestBucketIdx",
",",
"retval",
"[",
"\"actualValues\"",
"]",
"[",
"bestBucketIdx",
"]",
")",
")",
"print",
"return",
"retval"
] | Process one input sample.
This method is called by outer loop code outside the nupic-engine. We
use this instead of the nupic engine compute() because our inputs and
outputs aren't fixed size vectors of reals.
:param recordNum: Record number of this input pattern. Record numbers
normally increase sequentially by 1 each time unless there are missing
records in the dataset. Knowing this information insures that we don't get
confused by missing records.
:param patternNZ: List of the active indices from the output below. When the
input is from TemporalMemory, this list should be the indices of the
active cells.
:param classification: Dict of the classification information where:
- bucketIdx: list of indices of the encoder bucket
- actValue: list of actual values going into the encoder
Classification could be None for inference mode.
:param learn: (bool) if true, learn this sample
:param infer: (bool) if true, perform inference
:return: Dict containing inference results, there is one entry for each
step in self.steps, where the key is the number of steps, and
the value is an array containing the relative likelihood for
each bucketIdx starting from bucketIdx 0.
There is also an entry containing the average actual value to
use for each bucket. The key is 'actualValues'.
for example:
.. code-block:: python
{1 : [0.1, 0.3, 0.2, 0.7],
4 : [0.2, 0.4, 0.3, 0.5],
'actualValues': [1.5, 3,5, 5,5, 7.6],
} | [
"Process",
"one",
"input",
"sample",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L162-L315 | valid |
numenta/nupic | src/nupic/algorithms/sdr_classifier.py | SDRClassifier.infer | def infer(self, patternNZ, actValueList):
"""
Return the inference value from one input sample. The actual
learning happens in compute().
:param patternNZ: list of the active indices from the output below
:param classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
:return: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
.. code-block:: python
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# Return value dict. For buckets which we don't have an actual value
# for yet, just plug in any valid actual value. It doesn't matter what
# we use because that bucket won't have non-zero likelihood anyways.
# NOTE: If doing 0-step prediction, we shouldn't use any knowledge
# of the classification input during inference.
if self.steps[0] == 0 or actValueList is None:
defaultValue = 0
else:
defaultValue = actValueList[0]
actValues = [x if x is not None else defaultValue
for x in self._actualValues]
retval = {"actualValues": actValues}
for nSteps in self.steps:
predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps])
retval[nSteps] = predictDist
return retval | python | def infer(self, patternNZ, actValueList):
"""
Return the inference value from one input sample. The actual
learning happens in compute().
:param patternNZ: list of the active indices from the output below
:param classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
:return: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
.. code-block:: python
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]}
"""
# Return value dict. For buckets which we don't have an actual value
# for yet, just plug in any valid actual value. It doesn't matter what
# we use because that bucket won't have non-zero likelihood anyways.
# NOTE: If doing 0-step prediction, we shouldn't use any knowledge
# of the classification input during inference.
if self.steps[0] == 0 or actValueList is None:
defaultValue = 0
else:
defaultValue = actValueList[0]
actValues = [x if x is not None else defaultValue
for x in self._actualValues]
retval = {"actualValues": actValues}
for nSteps in self.steps:
predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps])
retval[nSteps] = predictDist
return retval | [
"def",
"infer",
"(",
"self",
",",
"patternNZ",
",",
"actValueList",
")",
":",
"# Return value dict. For buckets which we don't have an actual value",
"# for yet, just plug in any valid actual value. It doesn't matter what",
"# we use because that bucket won't have non-zero likelihood anyways.",
"# NOTE: If doing 0-step prediction, we shouldn't use any knowledge",
"# of the classification input during inference.",
"if",
"self",
".",
"steps",
"[",
"0",
"]",
"==",
"0",
"or",
"actValueList",
"is",
"None",
":",
"defaultValue",
"=",
"0",
"else",
":",
"defaultValue",
"=",
"actValueList",
"[",
"0",
"]",
"actValues",
"=",
"[",
"x",
"if",
"x",
"is",
"not",
"None",
"else",
"defaultValue",
"for",
"x",
"in",
"self",
".",
"_actualValues",
"]",
"retval",
"=",
"{",
"\"actualValues\"",
":",
"actValues",
"}",
"for",
"nSteps",
"in",
"self",
".",
"steps",
":",
"predictDist",
"=",
"self",
".",
"inferSingleStep",
"(",
"patternNZ",
",",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
")",
"retval",
"[",
"nSteps",
"]",
"=",
"predictDist",
"return",
"retval"
] | Return the inference value from one input sample. The actual
learning happens in compute().
:param patternNZ: list of the active indices from the output below
:param classification: dict of the classification information:
bucketIdx: index of the encoder bucket
actValue: actual value going into the encoder
:return: dict containing inference results, one entry for each step in
self.steps. The key is the number of steps, the value is an
array containing the relative likelihood for each bucketIdx
starting from bucketIdx 0.
for example:
.. code-block:: python
{'actualValues': [0.0, 1.0, 2.0, 3.0]
1 : [0.1, 0.3, 0.2, 0.7]
4 : [0.2, 0.4, 0.3, 0.5]} | [
"Return",
"the",
"inference",
"value",
"from",
"one",
"input",
"sample",
".",
"The",
"actual",
"learning",
"happens",
"in",
"compute",
"()",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L319-L362 | valid |
numenta/nupic | src/nupic/algorithms/sdr_classifier.py | SDRClassifier.inferSingleStep | def inferSingleStep(self, patternNZ, weightMatrix):
"""
Perform inference for a single step. Given an SDR input and a weight
matrix, return a predicted distribution.
:param patternNZ: list of the active indices from the output below
:param weightMatrix: numpy array of the weight matrix
:return: numpy array of the predicted class label distribution
"""
outputActivation = weightMatrix[patternNZ].sum(axis=0)
# softmax normalization
outputActivation = outputActivation - numpy.max(outputActivation)
expOutputActivation = numpy.exp(outputActivation)
predictDist = expOutputActivation / numpy.sum(expOutputActivation)
return predictDist | python | def inferSingleStep(self, patternNZ, weightMatrix):
"""
Perform inference for a single step. Given an SDR input and a weight
matrix, return a predicted distribution.
:param patternNZ: list of the active indices from the output below
:param weightMatrix: numpy array of the weight matrix
:return: numpy array of the predicted class label distribution
"""
outputActivation = weightMatrix[patternNZ].sum(axis=0)
# softmax normalization
outputActivation = outputActivation - numpy.max(outputActivation)
expOutputActivation = numpy.exp(outputActivation)
predictDist = expOutputActivation / numpy.sum(expOutputActivation)
return predictDist | [
"def",
"inferSingleStep",
"(",
"self",
",",
"patternNZ",
",",
"weightMatrix",
")",
":",
"outputActivation",
"=",
"weightMatrix",
"[",
"patternNZ",
"]",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"# softmax normalization",
"outputActivation",
"=",
"outputActivation",
"-",
"numpy",
".",
"max",
"(",
"outputActivation",
")",
"expOutputActivation",
"=",
"numpy",
".",
"exp",
"(",
"outputActivation",
")",
"predictDist",
"=",
"expOutputActivation",
"/",
"numpy",
".",
"sum",
"(",
"expOutputActivation",
")",
"return",
"predictDist"
] | Perform inference for a single step. Given an SDR input and a weight
matrix, return a predicted distribution.
:param patternNZ: list of the active indices from the output below
:param weightMatrix: numpy array of the weight matrix
:return: numpy array of the predicted class label distribution | [
"Perform",
"inference",
"for",
"a",
"single",
"step",
".",
"Given",
"an",
"SDR",
"input",
"and",
"a",
"weight",
"matrix",
"return",
"a",
"predicted",
"distribution",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L365-L380 | valid |
numenta/nupic | src/nupic/algorithms/sdr_classifier.py | SDRClassifier._calculateError | def _calculateError(self, recordNum, bucketIdxList):
"""
Calculate error signal
:param bucketIdxList: list of encoder buckets
:return: dict containing error. The key is the number of steps
The value is a numpy array of error at the output layer
"""
error = dict()
targetDist = numpy.zeros(self._maxBucketIdx + 1)
numCategories = len(bucketIdxList)
for bucketIdx in bucketIdxList:
targetDist[bucketIdx] = 1.0/numCategories
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
predictDist = self.inferSingleStep(learnPatternNZ,
self._weightMatrix[nSteps])
error[nSteps] = targetDist - predictDist
return error | python | def _calculateError(self, recordNum, bucketIdxList):
"""
Calculate error signal
:param bucketIdxList: list of encoder buckets
:return: dict containing error. The key is the number of steps
The value is a numpy array of error at the output layer
"""
error = dict()
targetDist = numpy.zeros(self._maxBucketIdx + 1)
numCategories = len(bucketIdxList)
for bucketIdx in bucketIdxList:
targetDist[bucketIdx] = 1.0/numCategories
for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:
nSteps = recordNum - learnRecordNum
if nSteps in self.steps:
predictDist = self.inferSingleStep(learnPatternNZ,
self._weightMatrix[nSteps])
error[nSteps] = targetDist - predictDist
return error | [
"def",
"_calculateError",
"(",
"self",
",",
"recordNum",
",",
"bucketIdxList",
")",
":",
"error",
"=",
"dict",
"(",
")",
"targetDist",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"_maxBucketIdx",
"+",
"1",
")",
"numCategories",
"=",
"len",
"(",
"bucketIdxList",
")",
"for",
"bucketIdx",
"in",
"bucketIdxList",
":",
"targetDist",
"[",
"bucketIdx",
"]",
"=",
"1.0",
"/",
"numCategories",
"for",
"(",
"learnRecordNum",
",",
"learnPatternNZ",
")",
"in",
"self",
".",
"_patternNZHistory",
":",
"nSteps",
"=",
"recordNum",
"-",
"learnRecordNum",
"if",
"nSteps",
"in",
"self",
".",
"steps",
":",
"predictDist",
"=",
"self",
".",
"inferSingleStep",
"(",
"learnPatternNZ",
",",
"self",
".",
"_weightMatrix",
"[",
"nSteps",
"]",
")",
"error",
"[",
"nSteps",
"]",
"=",
"targetDist",
"-",
"predictDist",
"return",
"error"
] | Calculate error signal
:param bucketIdxList: list of encoder buckets
:return: dict containing error. The key is the number of steps
The value is a numpy array of error at the output layer | [
"Calculate",
"error",
"signal"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L478-L500 | valid |
numenta/nupic | src/nupic/data/sorter.py | sort | def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100):
"""Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
"""
if fields is not None:
assert set(key).issubset(set([f[0] for f in fields]))
with FileRecordStream(filename) as f:
# Find the indices of the requested fields
if fields:
fieldNames = [ff[0] for ff in fields]
indices = [f.getFieldNames().index(name) for name in fieldNames]
assert len(indices) == len(fields)
else:
fileds = f.getFields()
fieldNames = f.getFieldNames()
indices = None
# turn key fields to key indices
key = [fieldNames.index(name) for name in key]
chunk = 0
records = []
for i, r in enumerate(f):
# Select requested fields only
if indices:
temp = []
for i in indices:
temp.append(r[i])
r = temp
# Store processed record
records.append(r)
# Check memory
available_memory = psutil.avail_phymem()
# If bellow the watermark create a new chunk, reset and keep going
if available_memory < watermark:
_sortChunk(records, key, chunk, fields)
records = []
chunk += 1
# Sort and write the remainder
if len(records) > 0:
_sortChunk(records, key, chunk, fields)
chunk += 1
# Marge all the files
_mergeFiles(key, chunk, outputFile, fields) | python | def sort(filename, key, outputFile, fields=None, watermark=1024 * 1024 * 100):
"""Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices
"""
if fields is not None:
assert set(key).issubset(set([f[0] for f in fields]))
with FileRecordStream(filename) as f:
# Find the indices of the requested fields
if fields:
fieldNames = [ff[0] for ff in fields]
indices = [f.getFieldNames().index(name) for name in fieldNames]
assert len(indices) == len(fields)
else:
fileds = f.getFields()
fieldNames = f.getFieldNames()
indices = None
# turn key fields to key indices
key = [fieldNames.index(name) for name in key]
chunk = 0
records = []
for i, r in enumerate(f):
# Select requested fields only
if indices:
temp = []
for i in indices:
temp.append(r[i])
r = temp
# Store processed record
records.append(r)
# Check memory
available_memory = psutil.avail_phymem()
# If bellow the watermark create a new chunk, reset and keep going
if available_memory < watermark:
_sortChunk(records, key, chunk, fields)
records = []
chunk += 1
# Sort and write the remainder
if len(records) > 0:
_sortChunk(records, key, chunk, fields)
chunk += 1
# Marge all the files
_mergeFiles(key, chunk, outputFile, fields) | [
"def",
"sort",
"(",
"filename",
",",
"key",
",",
"outputFile",
",",
"fields",
"=",
"None",
",",
"watermark",
"=",
"1024",
"*",
"1024",
"*",
"100",
")",
":",
"if",
"fields",
"is",
"not",
"None",
":",
"assert",
"set",
"(",
"key",
")",
".",
"issubset",
"(",
"set",
"(",
"[",
"f",
"[",
"0",
"]",
"for",
"f",
"in",
"fields",
"]",
")",
")",
"with",
"FileRecordStream",
"(",
"filename",
")",
"as",
"f",
":",
"# Find the indices of the requested fields",
"if",
"fields",
":",
"fieldNames",
"=",
"[",
"ff",
"[",
"0",
"]",
"for",
"ff",
"in",
"fields",
"]",
"indices",
"=",
"[",
"f",
".",
"getFieldNames",
"(",
")",
".",
"index",
"(",
"name",
")",
"for",
"name",
"in",
"fieldNames",
"]",
"assert",
"len",
"(",
"indices",
")",
"==",
"len",
"(",
"fields",
")",
"else",
":",
"fileds",
"=",
"f",
".",
"getFields",
"(",
")",
"fieldNames",
"=",
"f",
".",
"getFieldNames",
"(",
")",
"indices",
"=",
"None",
"# turn key fields to key indices",
"key",
"=",
"[",
"fieldNames",
".",
"index",
"(",
"name",
")",
"for",
"name",
"in",
"key",
"]",
"chunk",
"=",
"0",
"records",
"=",
"[",
"]",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"f",
")",
":",
"# Select requested fields only",
"if",
"indices",
":",
"temp",
"=",
"[",
"]",
"for",
"i",
"in",
"indices",
":",
"temp",
".",
"append",
"(",
"r",
"[",
"i",
"]",
")",
"r",
"=",
"temp",
"# Store processed record",
"records",
".",
"append",
"(",
"r",
")",
"# Check memory",
"available_memory",
"=",
"psutil",
".",
"avail_phymem",
"(",
")",
"# If bellow the watermark create a new chunk, reset and keep going",
"if",
"available_memory",
"<",
"watermark",
":",
"_sortChunk",
"(",
"records",
",",
"key",
",",
"chunk",
",",
"fields",
")",
"records",
"=",
"[",
"]",
"chunk",
"+=",
"1",
"# Sort and write the remainder",
"if",
"len",
"(",
"records",
")",
">",
"0",
":",
"_sortChunk",
"(",
"records",
",",
"key",
",",
"chunk",
",",
"fields",
")",
"chunk",
"+=",
"1",
"# Marge all the files",
"_mergeFiles",
"(",
"key",
",",
"chunk",
",",
"outputFile",
",",
"fields",
")"
] | Sort a potentially big file
filename - the input file (standard File format)
key - a list of field names to sort by
outputFile - the name of the output file
fields - a list of fields that should be included (all fields if None)
watermark - when available memory goes bellow the watermark create a new chunk
sort() works by reading as records from the file into memory
and calling _sortChunk() on each chunk. In the process it gets
rid of unneeded fields if any. Once all the chunks have been sorted and
written to chunk files it calls _merge() to merge all the chunks into a
single sorted file.
Note, that sort() gets a key that contains field names, which it converts
into field indices for _sortChunk() becuase _sortChunk() doesn't need to know
the field name.
sort() figures out by itself how many chunk files to use by reading records
from the file until the low watermark value of availabel memory is hit and
then it sorts the current records, generates a chunk file, clears the sorted
records and starts on a new chunk.
The key field names are turned into indices | [
"Sort",
"a",
"potentially",
"big",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/sorter.py#L41-L113 | valid |
numenta/nupic | src/nupic/data/sorter.py | _sortChunk | def _sortChunk(records, key, chunkIndex, fields):
"""Sort in memory chunk of records
records - a list of records read from the original dataset
key - a list of indices to sort the records by
chunkIndex - the index of the current chunk
The records contain only the fields requested by the user.
_sortChunk() will write the sorted records to a standard File
named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...).
"""
title(additional='(key=%s, chunkIndex=%d)' % (str(key), chunkIndex))
assert len(records) > 0
# Sort the current records
records.sort(key=itemgetter(*key))
# Write to a chunk file
if chunkIndex is not None:
filename = 'chunk_%d.csv' % chunkIndex
with FileRecordStream(filename, write=True, fields=fields) as o:
for r in records:
o.appendRecord(r)
assert os.path.getsize(filename) > 0
return records | python | def _sortChunk(records, key, chunkIndex, fields):
"""Sort in memory chunk of records
records - a list of records read from the original dataset
key - a list of indices to sort the records by
chunkIndex - the index of the current chunk
The records contain only the fields requested by the user.
_sortChunk() will write the sorted records to a standard File
named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...).
"""
title(additional='(key=%s, chunkIndex=%d)' % (str(key), chunkIndex))
assert len(records) > 0
# Sort the current records
records.sort(key=itemgetter(*key))
# Write to a chunk file
if chunkIndex is not None:
filename = 'chunk_%d.csv' % chunkIndex
with FileRecordStream(filename, write=True, fields=fields) as o:
for r in records:
o.appendRecord(r)
assert os.path.getsize(filename) > 0
return records | [
"def",
"_sortChunk",
"(",
"records",
",",
"key",
",",
"chunkIndex",
",",
"fields",
")",
":",
"title",
"(",
"additional",
"=",
"'(key=%s, chunkIndex=%d)'",
"%",
"(",
"str",
"(",
"key",
")",
",",
"chunkIndex",
")",
")",
"assert",
"len",
"(",
"records",
")",
">",
"0",
"# Sort the current records",
"records",
".",
"sort",
"(",
"key",
"=",
"itemgetter",
"(",
"*",
"key",
")",
")",
"# Write to a chunk file",
"if",
"chunkIndex",
"is",
"not",
"None",
":",
"filename",
"=",
"'chunk_%d.csv'",
"%",
"chunkIndex",
"with",
"FileRecordStream",
"(",
"filename",
",",
"write",
"=",
"True",
",",
"fields",
"=",
"fields",
")",
"as",
"o",
":",
"for",
"r",
"in",
"records",
":",
"o",
".",
"appendRecord",
"(",
"r",
")",
"assert",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
">",
"0",
"return",
"records"
] | Sort in memory chunk of records
records - a list of records read from the original dataset
key - a list of indices to sort the records by
chunkIndex - the index of the current chunk
The records contain only the fields requested by the user.
_sortChunk() will write the sorted records to a standard File
named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...). | [
"Sort",
"in",
"memory",
"chunk",
"of",
"records"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/sorter.py#L115-L143 | valid |
numenta/nupic | src/nupic/data/sorter.py | _mergeFiles | def _mergeFiles(key, chunkCount, outputFile, fields):
"""Merge sorted chunk files into a sorted output file
chunkCount - the number of available chunk files
outputFile the name of the sorted output file
_mergeFiles()
"""
title()
# Open all chun files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
# Open output file
with FileRecordStream(outputFile, write=True, fields=fields) as o:
# Open all chunk files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
records = [f.getNextRecord() for f in files]
# This loop will run until all files are exhausted
while not all(r is None for r in records):
# Cleanup None values (files that were exhausted)
indices = [i for i,r in enumerate(records) if r is not None]
records = [records[i] for i in indices]
files = [files[i] for i in indices]
# Find the current record
r = min(records, key=itemgetter(*key))
# Write it to the file
o.appendRecord(r)
# Find the index of file that produced the current record
index = records.index(r)
# Read a new record from the file
records[index] = files[index].getNextRecord()
# Cleanup chunk files
for i, f in enumerate(files):
f.close()
os.remove('chunk_%d.csv' % i) | python | def _mergeFiles(key, chunkCount, outputFile, fields):
"""Merge sorted chunk files into a sorted output file
chunkCount - the number of available chunk files
outputFile the name of the sorted output file
_mergeFiles()
"""
title()
# Open all chun files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
# Open output file
with FileRecordStream(outputFile, write=True, fields=fields) as o:
# Open all chunk files
files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]
records = [f.getNextRecord() for f in files]
# This loop will run until all files are exhausted
while not all(r is None for r in records):
# Cleanup None values (files that were exhausted)
indices = [i for i,r in enumerate(records) if r is not None]
records = [records[i] for i in indices]
files = [files[i] for i in indices]
# Find the current record
r = min(records, key=itemgetter(*key))
# Write it to the file
o.appendRecord(r)
# Find the index of file that produced the current record
index = records.index(r)
# Read a new record from the file
records[index] = files[index].getNextRecord()
# Cleanup chunk files
for i, f in enumerate(files):
f.close()
os.remove('chunk_%d.csv' % i) | [
"def",
"_mergeFiles",
"(",
"key",
",",
"chunkCount",
",",
"outputFile",
",",
"fields",
")",
":",
"title",
"(",
")",
"# Open all chun files",
"files",
"=",
"[",
"FileRecordStream",
"(",
"'chunk_%d.csv'",
"%",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"chunkCount",
")",
"]",
"# Open output file",
"with",
"FileRecordStream",
"(",
"outputFile",
",",
"write",
"=",
"True",
",",
"fields",
"=",
"fields",
")",
"as",
"o",
":",
"# Open all chunk files",
"files",
"=",
"[",
"FileRecordStream",
"(",
"'chunk_%d.csv'",
"%",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"chunkCount",
")",
"]",
"records",
"=",
"[",
"f",
".",
"getNextRecord",
"(",
")",
"for",
"f",
"in",
"files",
"]",
"# This loop will run until all files are exhausted",
"while",
"not",
"all",
"(",
"r",
"is",
"None",
"for",
"r",
"in",
"records",
")",
":",
"# Cleanup None values (files that were exhausted)",
"indices",
"=",
"[",
"i",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"records",
")",
"if",
"r",
"is",
"not",
"None",
"]",
"records",
"=",
"[",
"records",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"]",
"files",
"=",
"[",
"files",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"]",
"# Find the current record",
"r",
"=",
"min",
"(",
"records",
",",
"key",
"=",
"itemgetter",
"(",
"*",
"key",
")",
")",
"# Write it to the file",
"o",
".",
"appendRecord",
"(",
"r",
")",
"# Find the index of file that produced the current record",
"index",
"=",
"records",
".",
"index",
"(",
"r",
")",
"# Read a new record from the file",
"records",
"[",
"index",
"]",
"=",
"files",
"[",
"index",
"]",
".",
"getNextRecord",
"(",
")",
"# Cleanup chunk files",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"files",
")",
":",
"f",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"'chunk_%d.csv'",
"%",
"i",
")"
] | Merge sorted chunk files into a sorted output file
chunkCount - the number of available chunk files
outputFile the name of the sorted output file
_mergeFiles() | [
"Merge",
"sorted",
"chunk",
"files",
"into",
"a",
"sorted",
"output",
"file"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/sorter.py#L145-L185 | valid |
numenta/nupic | src/nupic/algorithms/temporal_memory_shim.py | TemporalMemoryShim.compute | def compute(self, activeColumns, learn=True):
"""
Feeds input record through TM, performing inference and learning.
Updates member variables with new state.
@param activeColumns (set) Indices of active columns in `t`
"""
bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)
bottomUpInput[list(activeColumns)] = 1
super(TemporalMemoryShim, self).compute(bottomUpInput,
enableLearn=learn,
enableInference=True)
predictedState = self.getPredictedState()
self.predictiveCells = set(numpy.flatnonzero(predictedState)) | python | def compute(self, activeColumns, learn=True):
"""
Feeds input record through TM, performing inference and learning.
Updates member variables with new state.
@param activeColumns (set) Indices of active columns in `t`
"""
bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)
bottomUpInput[list(activeColumns)] = 1
super(TemporalMemoryShim, self).compute(bottomUpInput,
enableLearn=learn,
enableInference=True)
predictedState = self.getPredictedState()
self.predictiveCells = set(numpy.flatnonzero(predictedState)) | [
"def",
"compute",
"(",
"self",
",",
"activeColumns",
",",
"learn",
"=",
"True",
")",
":",
"bottomUpInput",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"numberOfCols",
",",
"dtype",
"=",
"dtype",
")",
"bottomUpInput",
"[",
"list",
"(",
"activeColumns",
")",
"]",
"=",
"1",
"super",
"(",
"TemporalMemoryShim",
",",
"self",
")",
".",
"compute",
"(",
"bottomUpInput",
",",
"enableLearn",
"=",
"learn",
",",
"enableInference",
"=",
"True",
")",
"predictedState",
"=",
"self",
".",
"getPredictedState",
"(",
")",
"self",
".",
"predictiveCells",
"=",
"set",
"(",
"numpy",
".",
"flatnonzero",
"(",
"predictedState",
")",
")"
] | Feeds input record through TM, performing inference and learning.
Updates member variables with new state.
@param activeColumns (set) Indices of active columns in `t` | [
"Feeds",
"input",
"record",
"through",
"TM",
"performing",
"inference",
"and",
"learning",
".",
"Updates",
"member",
"variables",
"with",
"new",
"state",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory_shim.py#L89-L103 | valid |
numenta/nupic | src/nupic/algorithms/temporal_memory_shim.py | TemporalMemoryShim.read | def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to read from
"""
tm = super(TemporalMemoryShim, cls).read(proto.baseTM)
tm.predictiveCells = set(proto.predictedState)
tm.connections = Connections.read(proto.conncetions) | python | def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to read from
"""
tm = super(TemporalMemoryShim, cls).read(proto.baseTM)
tm.predictiveCells = set(proto.predictedState)
tm.connections = Connections.read(proto.conncetions) | [
"def",
"read",
"(",
"cls",
",",
"proto",
")",
":",
"tm",
"=",
"super",
"(",
"TemporalMemoryShim",
",",
"cls",
")",
".",
"read",
"(",
"proto",
".",
"baseTM",
")",
"tm",
".",
"predictiveCells",
"=",
"set",
"(",
"proto",
".",
"predictedState",
")",
"tm",
".",
"connections",
"=",
"Connections",
".",
"read",
"(",
"proto",
".",
"conncetions",
")"
] | Deserialize from proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to read from | [
"Deserialize",
"from",
"proto",
"instance",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory_shim.py#L112-L119 | valid |
numenta/nupic | src/nupic/algorithms/temporal_memory_shim.py | TemporalMemoryShim.write | def write(self, proto):
"""Populate serialization proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to populate
"""
super(TemporalMemoryShim, self).write(proto.baseTM)
proto.connections.write(self.connections)
proto.predictiveCells = self.predictiveCells | python | def write(self, proto):
"""Populate serialization proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to populate
"""
super(TemporalMemoryShim, self).write(proto.baseTM)
proto.connections.write(self.connections)
proto.predictiveCells = self.predictiveCells | [
"def",
"write",
"(",
"self",
",",
"proto",
")",
":",
"super",
"(",
"TemporalMemoryShim",
",",
"self",
")",
".",
"write",
"(",
"proto",
".",
"baseTM",
")",
"proto",
".",
"connections",
".",
"write",
"(",
"self",
".",
"connections",
")",
"proto",
".",
"predictiveCells",
"=",
"self",
".",
"predictiveCells"
] | Populate serialization proto instance.
:param proto: (TemporalMemoryShimProto) the proto instance to populate | [
"Populate",
"serialization",
"proto",
"instance",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/temporal_memory_shim.py#L122-L129 | valid |
numenta/nupic | src/nupic/support/console_printer.py | ConsolePrinterMixin.cPrint | def cPrint(self, level, message, *args, **kw):
"""Print a message to the console.
Prints only if level <= self.consolePrinterVerbosity
Printing with level 0 is equivalent to using a print statement,
and should normally be avoided.
:param level: (int) indicating the urgency of the message with
lower values meaning more urgent (messages at level 0 are the most
urgent and are always printed)
:param message: (string) possibly with format specifiers
:param args: specifies the values for any format specifiers in message
:param kw: newline is the only keyword argument. True (default) if a newline
should be printed
"""
if level > self.consolePrinterVerbosity:
return
if len(kw) > 1:
raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys()))
newline = kw.get("newline", True)
if len(kw) == 1 and 'newline' not in kw:
raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0])
if len(args) == 0:
if newline:
print message
else:
print message,
else:
if newline:
print message % args
else:
print message % args, | python | def cPrint(self, level, message, *args, **kw):
"""Print a message to the console.
Prints only if level <= self.consolePrinterVerbosity
Printing with level 0 is equivalent to using a print statement,
and should normally be avoided.
:param level: (int) indicating the urgency of the message with
lower values meaning more urgent (messages at level 0 are the most
urgent and are always printed)
:param message: (string) possibly with format specifiers
:param args: specifies the values for any format specifiers in message
:param kw: newline is the only keyword argument. True (default) if a newline
should be printed
"""
if level > self.consolePrinterVerbosity:
return
if len(kw) > 1:
raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys()))
newline = kw.get("newline", True)
if len(kw) == 1 and 'newline' not in kw:
raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0])
if len(args) == 0:
if newline:
print message
else:
print message,
else:
if newline:
print message % args
else:
print message % args, | [
"def",
"cPrint",
"(",
"self",
",",
"level",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"level",
">",
"self",
".",
"consolePrinterVerbosity",
":",
"return",
"if",
"len",
"(",
"kw",
")",
">",
"1",
":",
"raise",
"KeyError",
"(",
"\"Invalid keywords for cPrint: %s\"",
"%",
"str",
"(",
"kw",
".",
"keys",
"(",
")",
")",
")",
"newline",
"=",
"kw",
".",
"get",
"(",
"\"newline\"",
",",
"True",
")",
"if",
"len",
"(",
"kw",
")",
"==",
"1",
"and",
"'newline'",
"not",
"in",
"kw",
":",
"raise",
"KeyError",
"(",
"\"Invalid keyword for cPrint: %s\"",
"%",
"kw",
".",
"keys",
"(",
")",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"if",
"newline",
":",
"print",
"message",
"else",
":",
"print",
"message",
",",
"else",
":",
"if",
"newline",
":",
"print",
"message",
"%",
"args",
"else",
":",
"print",
"message",
"%",
"args",
","
] | Print a message to the console.
Prints only if level <= self.consolePrinterVerbosity
Printing with level 0 is equivalent to using a print statement,
and should normally be avoided.
:param level: (int) indicating the urgency of the message with
lower values meaning more urgent (messages at level 0 are the most
urgent and are always printed)
:param message: (string) possibly with format specifiers
:param args: specifies the values for any format specifiers in message
:param kw: newline is the only keyword argument. True (default) if a newline
should be printed | [
"Print",
"a",
"message",
"to",
"the",
"console",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/console_printer.py#L52-L90 | valid |
numenta/nupic | scripts/profiling/tm_profile.py | profileTM | def profileTM(tmClass, tmDim, nRuns):
"""
profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs)
"""
# create TM instance to measure
tm = tmClass(numberOfCols=tmDim)
# generate input data
data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32')
for i in xrange(nRuns):
# new data every time, this is the worst case performance
# real performance would be better, as the input data would not be completely random
d = data[:,i]
# the actual function to profile!
tm.compute(d, True) | python | def profileTM(tmClass, tmDim, nRuns):
"""
profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs)
"""
# create TM instance to measure
tm = tmClass(numberOfCols=tmDim)
# generate input data
data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32')
for i in xrange(nRuns):
# new data every time, this is the worst case performance
# real performance would be better, as the input data would not be completely random
d = data[:,i]
# the actual function to profile!
tm.compute(d, True) | [
"def",
"profileTM",
"(",
"tmClass",
",",
"tmDim",
",",
"nRuns",
")",
":",
"# create TM instance to measure",
"tm",
"=",
"tmClass",
"(",
"numberOfCols",
"=",
"tmDim",
")",
"# generate input data",
"data",
"=",
"numpy",
".",
"random",
".",
"randint",
"(",
"0",
",",
"2",
",",
"[",
"tmDim",
",",
"nRuns",
"]",
")",
".",
"astype",
"(",
"'float32'",
")",
"for",
"i",
"in",
"xrange",
"(",
"nRuns",
")",
":",
"# new data every time, this is the worst case performance",
"# real performance would be better, as the input data would not be completely random",
"d",
"=",
"data",
"[",
":",
",",
"i",
"]",
"# the actual function to profile!",
"tm",
".",
"compute",
"(",
"d",
",",
"True",
")"
] | profiling performance of TemporalMemory (TM)
using the python cProfile module and ordered by cumulative time,
see how to run on command-line above.
@param tmClass implementation of TM (cpp, py, ..)
@param tmDim number of columns in TM
@param nRuns number of calls of the profiled code (epochs) | [
"profiling",
"performance",
"of",
"TemporalMemory",
"(",
"TM",
")",
"using",
"the",
"python",
"cProfile",
"module",
"and",
"ordered",
"by",
"cumulative",
"time",
"see",
"how",
"to",
"run",
"on",
"command",
"-",
"line",
"above",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/scripts/profiling/tm_profile.py#L31-L54 | valid |
numenta/nupic | scripts/run_swarm.py | runPermutations | def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue | python | def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue | [
"def",
"runPermutations",
"(",
"args",
")",
":",
"helpString",
"=",
"(",
"\"\\n\\n%prog [options] permutationsScript\\n\"",
"\"%prog [options] expDescription.json\\n\\n\"",
"\"This script runs permutations of an experiment via Grok engine, as \"",
"\"defined in a\\npermutations.py script or an expGenerator experiment \"",
"\"description json file.\\nIn the expDescription.json form, the json file \"",
"\"MUST have the file extension\\n'.json' and MUST conform to \"",
"\"expGenerator/experimentDescriptionSchema.json.\"",
")",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"usage",
"=",
"helpString",
")",
"parser",
".",
"add_option",
"(",
"\"--replaceReport\"",
",",
"dest",
"=",
"\"replaceReport\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"replaceReport\"",
"]",
",",
"help",
"=",
"\"Replace existing csv report file if it exists. Default is to \"",
"\"append to the existing file. [default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--action\"",
",",
"dest",
"=",
"\"action\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"action\"",
"]",
",",
"choices",
"=",
"[",
"\"run\"",
",",
"\"pickup\"",
",",
"\"report\"",
",",
"\"dryRun\"",
"]",
",",
"help",
"=",
"\"Which action to perform. Possible actions are run, pickup, choices, \"",
"\"report, list. \"",
"\"run: run a new HyperSearch via Grok. \"",
"\"pickup: pick up the latest run of a HyperSearch job. \"",
"\"dryRun: run a single HypersearchWorker inline within the application \"",
"\"process without the Grok infrastructure to flush out bugs in \"",
"\"description and permutations scripts; defaults to \"",
"\"maxPermutations=1: use --maxPermutations to change this; \"",
"\"report: just print results from the last or current run. \"",
"\"[default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--maxPermutations\"",
",",
"dest",
"=",
"\"maxPermutations\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"maxPermutations\"",
"]",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Maximum number of models to search. Applies only to the 'run' and \"",
"\"'dryRun' actions. [default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--exports\"",
",",
"dest",
"=",
"\"exports\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"exports\"",
"]",
",",
"type",
"=",
"\"string\"",
",",
"help",
"=",
"\"json dump of environment variable settings that should be applied\"",
"\"for the job before running. [default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--useTerminators\"",
",",
"dest",
"=",
"\"useTerminators\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"useTerminators\"",
"]",
",",
"help",
"=",
"\"Use early model terminators in HyperSearch\"",
"\"[default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--maxWorkers\"",
",",
"dest",
"=",
"\"maxWorkers\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"maxWorkers\"",
"]",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Maximum number of concurrent workers to launch. Applies only to \"",
"\"the 'run' action. [default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"-v\"",
",",
"dest",
"=",
"\"verbosityCount\"",
",",
"action",
"=",
"\"count\"",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"Increase verbosity of the output. Specify multiple times for \"",
"\"increased verbosity. e.g., -vv is more verbose than -v.\"",
")",
"parser",
".",
"add_option",
"(",
"\"--timeout\"",
",",
"dest",
"=",
"\"timeout\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"timeout\"",
"]",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Time out for this search in minutes\"",
"\"[default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--overwrite\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"overwrite\"",
"]",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"If 'yes', overwrite existing description.py and permutations.py\"",
"\" (in the same directory as the <expDescription.json> file) if they\"",
"\" already exist. [default: %default].\"",
")",
"parser",
".",
"add_option",
"(",
"\"--genTopNDescriptions\"",
",",
"dest",
"=",
"\"genTopNDescriptions\"",
",",
"default",
"=",
"DEFAULT_OPTIONS",
"[",
"\"genTopNDescriptions\"",
"]",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Generate description files for the top N models. Each one will be\"",
"\" placed into it's own subdirectory under the base description file.\"",
"\"[default: %default].\"",
")",
"(",
"options",
",",
"positionalArgs",
")",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"# Get the permutations script's filepath",
"if",
"len",
"(",
"positionalArgs",
")",
"!=",
"1",
":",
"parser",
".",
"error",
"(",
"\"You must supply the name of exactly one permutations script \"",
"\"or JSON description file.\"",
")",
"fileArgPath",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"positionalArgs",
"[",
"0",
"]",
")",
"fileArgPath",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"fileArgPath",
")",
"fileArgPath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"fileArgPath",
")",
"permWorkDir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fileArgPath",
")",
"outputLabel",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"fileArgPath",
")",
")",
"[",
"0",
"]",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fileArgPath",
")",
"fileExtension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"[",
"1",
"]",
"optionsDict",
"=",
"vars",
"(",
"options",
")",
"if",
"fileExtension",
"==",
"\".json\"",
":",
"returnValue",
"=",
"permutations_runner",
".",
"runWithJsonFile",
"(",
"fileArgPath",
",",
"optionsDict",
",",
"outputLabel",
",",
"permWorkDir",
")",
"else",
":",
"returnValue",
"=",
"permutations_runner",
".",
"runWithPermutationsScript",
"(",
"fileArgPath",
",",
"optionsDict",
",",
"outputLabel",
",",
"permWorkDir",
")",
"return",
"returnValue"
] | The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None | [
"The",
"main",
"function",
"of",
"the",
"RunPermutations",
"utility",
".",
"This",
"utility",
"will",
"automatically",
"generate",
"and",
"run",
"multiple",
"prediction",
"framework",
"experiments",
"that",
"are",
"permutations",
"of",
"a",
"base",
"experiment",
"via",
"the",
"Grok",
"engine",
".",
"For",
"example",
"if",
"you",
"have",
"an",
"experiment",
"that",
"you",
"want",
"to",
"test",
"with",
"3",
"possible",
"values",
"of",
"variable",
"A",
"and",
"2",
"possible",
"values",
"of",
"variable",
"B",
"this",
"utility",
"will",
"automatically",
"generate",
"the",
"experiment",
"directories",
"and",
"description",
"files",
"for",
"each",
"of",
"the",
"6",
"different",
"experiments",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/scripts/run_swarm.py#L35-L184 | valid |
numenta/nupic | examples/opf/experiments/classification/makeDatasets.py | _generateCategory | def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, resets=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
resets: if True, turn on reset at start of each sequence
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
('field1', 'string', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([reset, str(seqIdx), str(x)])
reset = 0
outFile.close() | python | def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1,
numRepeats=10, resets=False):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
resets: if True, turn on reset at start of each sequence
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
('field1', 'string', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
for seqIdx in seqIdxs:
reset = int(resets)
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([reset, str(seqIdx), str(x)])
reset = 0
outFile.close() | [
"def",
"_generateCategory",
"(",
"filename",
"=",
"\"simple.csv\"",
",",
"numSequences",
"=",
"2",
",",
"elementsPerSeq",
"=",
"1",
",",
"numRepeats",
"=",
"10",
",",
"resets",
"=",
"False",
")",
":",
"# Create the output file",
"scriptDir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"pathname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"scriptDir",
",",
"'datasets'",
",",
"filename",
")",
"print",
"\"Creating %s...\"",
"%",
"(",
"pathname",
")",
"fields",
"=",
"[",
"(",
"'reset'",
",",
"'int'",
",",
"'R'",
")",
",",
"(",
"'category'",
",",
"'int'",
",",
"'C'",
")",
",",
"(",
"'field1'",
",",
"'string'",
",",
"''",
")",
"]",
"outFile",
"=",
"FileRecordStream",
"(",
"pathname",
",",
"write",
"=",
"True",
",",
"fields",
"=",
"fields",
")",
"# Create the sequences",
"sequences",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numSequences",
")",
":",
"seq",
"=",
"[",
"x",
"for",
"x",
"in",
"range",
"(",
"i",
"*",
"elementsPerSeq",
",",
"(",
"i",
"+",
"1",
")",
"*",
"elementsPerSeq",
")",
"]",
"sequences",
".",
"append",
"(",
"seq",
")",
"# Write out the sequences in random order",
"seqIdxs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numRepeats",
")",
":",
"seqIdxs",
"+=",
"range",
"(",
"numSequences",
")",
"random",
".",
"shuffle",
"(",
"seqIdxs",
")",
"for",
"seqIdx",
"in",
"seqIdxs",
":",
"reset",
"=",
"int",
"(",
"resets",
")",
"seq",
"=",
"sequences",
"[",
"seqIdx",
"]",
"for",
"x",
"in",
"seq",
":",
"outFile",
".",
"appendRecord",
"(",
"[",
"reset",
",",
"str",
"(",
"seqIdx",
")",
",",
"str",
"(",
"x",
")",
"]",
")",
"reset",
"=",
"0",
"outFile",
".",
"close",
"(",
")"
] | Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
resets: if True, turn on reset at start of each sequence | [
"Generate",
"a",
"simple",
"dataset",
".",
"This",
"contains",
"a",
"bunch",
"of",
"non",
"-",
"overlapping",
"sequences",
".",
"Parameters",
":",
"----------------------------------------------------",
"filename",
":",
"name",
"of",
"the",
"file",
"to",
"produce",
"including",
"extension",
".",
"It",
"will",
"be",
"created",
"in",
"a",
"datasets",
"sub",
"-",
"directory",
"within",
"the",
"directory",
"containing",
"this",
"script",
".",
"numSequences",
":",
"how",
"many",
"sequences",
"to",
"generate",
"elementsPerSeq",
":",
"length",
"of",
"each",
"sequence",
"numRepeats",
":",
"how",
"many",
"times",
"to",
"repeat",
"each",
"sequence",
"in",
"the",
"output",
"resets",
":",
"if",
"True",
"turn",
"on",
"reset",
"at",
"start",
"of",
"each",
"sequence"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/experiments/classification/makeDatasets.py#L36-L79 | valid |
numenta/nupic | src/nupic/encoders/geospatial_coordinate.py | GeospatialCoordinateEncoder.encodeIntoArray | def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
"""
altitude = None
if len(inputData) == 4:
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray(
(coordinate, radius), output) | python | def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
"""
altitude = None
if len(inputData) == 4:
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray(
(coordinate, radius), output) | [
"def",
"encodeIntoArray",
"(",
"self",
",",
"inputData",
",",
"output",
")",
":",
"altitude",
"=",
"None",
"if",
"len",
"(",
"inputData",
")",
"==",
"4",
":",
"(",
"speed",
",",
"longitude",
",",
"latitude",
",",
"altitude",
")",
"=",
"inputData",
"else",
":",
"(",
"speed",
",",
"longitude",
",",
"latitude",
")",
"=",
"inputData",
"coordinate",
"=",
"self",
".",
"coordinateForPosition",
"(",
"longitude",
",",
"latitude",
",",
"altitude",
")",
"radius",
"=",
"self",
".",
"radiusForSpeed",
"(",
"speed",
")",
"super",
"(",
"GeospatialCoordinateEncoder",
",",
"self",
")",
".",
"encodeIntoArray",
"(",
"(",
"coordinate",
",",
"radius",
")",
",",
"output",
")"
] | See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array | [
"See",
"nupic",
".",
"encoders",
".",
"base",
".",
"Encoder",
"for",
"more",
"information",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/geospatial_coordinate.py#L82-L98 | valid |
numenta/nupic | src/nupic/encoders/geospatial_coordinate.py | GeospatialCoordinateEncoder.coordinateForPosition | def coordinateForPosition(self, longitude, latitude, altitude=None):
"""
Returns coordinate for given GPS position.
:param: longitude (float) Longitude of position
:param: latitude (float) Latitude of position
:param: altitude (float) Altitude of position
:returns: (numpy.array) Coordinate that the given GPS position
maps to
"""
coords = PROJ(longitude, latitude)
if altitude is not None:
coords = transform(PROJ, geocentric, coords[0], coords[1], altitude)
coordinate = numpy.array(coords)
coordinate = coordinate / self.scale
return coordinate.astype(int) | python | def coordinateForPosition(self, longitude, latitude, altitude=None):
"""
Returns coordinate for given GPS position.
:param: longitude (float) Longitude of position
:param: latitude (float) Latitude of position
:param: altitude (float) Altitude of position
:returns: (numpy.array) Coordinate that the given GPS position
maps to
"""
coords = PROJ(longitude, latitude)
if altitude is not None:
coords = transform(PROJ, geocentric, coords[0], coords[1], altitude)
coordinate = numpy.array(coords)
coordinate = coordinate / self.scale
return coordinate.astype(int) | [
"def",
"coordinateForPosition",
"(",
"self",
",",
"longitude",
",",
"latitude",
",",
"altitude",
"=",
"None",
")",
":",
"coords",
"=",
"PROJ",
"(",
"longitude",
",",
"latitude",
")",
"if",
"altitude",
"is",
"not",
"None",
":",
"coords",
"=",
"transform",
"(",
"PROJ",
",",
"geocentric",
",",
"coords",
"[",
"0",
"]",
",",
"coords",
"[",
"1",
"]",
",",
"altitude",
")",
"coordinate",
"=",
"numpy",
".",
"array",
"(",
"coords",
")",
"coordinate",
"=",
"coordinate",
"/",
"self",
".",
"scale",
"return",
"coordinate",
".",
"astype",
"(",
"int",
")"
] | Returns coordinate for given GPS position.
:param: longitude (float) Longitude of position
:param: latitude (float) Latitude of position
:param: altitude (float) Altitude of position
:returns: (numpy.array) Coordinate that the given GPS position
maps to | [
"Returns",
"coordinate",
"for",
"given",
"GPS",
"position",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/geospatial_coordinate.py#L101-L118 | valid |
numenta/nupic | src/nupic/encoders/geospatial_coordinate.py | GeospatialCoordinateEncoder.radiusForSpeed | def radiusForSpeed(self, speed):
"""
Returns radius for given speed.
Tries to get the encodings of consecutive readings to be
adjacent with some overlap.
:param: speed (float) Speed (in meters per second)
:returns: (int) Radius for given speed
"""
overlap = 1.5
coordinatesPerTimestep = speed * self.timestep / self.scale
radius = int(round(float(coordinatesPerTimestep) / 2 * overlap))
minRadius = int(math.ceil((math.sqrt(self.w) - 1) / 2))
return max(radius, minRadius) | python | def radiusForSpeed(self, speed):
"""
Returns radius for given speed.
Tries to get the encodings of consecutive readings to be
adjacent with some overlap.
:param: speed (float) Speed (in meters per second)
:returns: (int) Radius for given speed
"""
overlap = 1.5
coordinatesPerTimestep = speed * self.timestep / self.scale
radius = int(round(float(coordinatesPerTimestep) / 2 * overlap))
minRadius = int(math.ceil((math.sqrt(self.w) - 1) / 2))
return max(radius, minRadius) | [
"def",
"radiusForSpeed",
"(",
"self",
",",
"speed",
")",
":",
"overlap",
"=",
"1.5",
"coordinatesPerTimestep",
"=",
"speed",
"*",
"self",
".",
"timestep",
"/",
"self",
".",
"scale",
"radius",
"=",
"int",
"(",
"round",
"(",
"float",
"(",
"coordinatesPerTimestep",
")",
"/",
"2",
"*",
"overlap",
")",
")",
"minRadius",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"(",
"math",
".",
"sqrt",
"(",
"self",
".",
"w",
")",
"-",
"1",
")",
"/",
"2",
")",
")",
"return",
"max",
"(",
"radius",
",",
"minRadius",
")"
] | Returns radius for given speed.
Tries to get the encodings of consecutive readings to be
adjacent with some overlap.
:param: speed (float) Speed (in meters per second)
:returns: (int) Radius for given speed | [
"Returns",
"radius",
"for",
"given",
"speed",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/geospatial_coordinate.py#L121-L135 | valid |
numenta/nupic | examples/opf/experiments/spatial_classification/auto_generated/searchDef.py | getSearch | def getSearch(rootDir):
""" This method returns search description. See the following file for the
schema of the dictionary this method returns:
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
The streamDef element defines the stream for this model. The schema for this
element can be found at:
py/nupicengine/cluster/database/StreamDef.json
"""
# Form the stream definition
dataPath = os.path.abspath(os.path.join(rootDir, 'datasets', 'scalar_1.csv'))
streamDef = dict(
version = 1,
info = "testSpatialClassification",
streams = [
dict(source="file://%s" % (dataPath),
info="scalar_1.csv",
columns=["*"],
),
],
)
# Generate the experiment description
expDesc = {
"environment": 'nupic',
"inferenceArgs":{
"predictedField":"classification",
"predictionSteps": [0],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "float",
},
{ "fieldName": "classification",
"fieldType": "string",
},
{ "fieldName": "randomData",
"fieldType": "float",
},
],
"iterationCount": -1,
}
return expDesc | python | def getSearch(rootDir):
""" This method returns search description. See the following file for the
schema of the dictionary this method returns:
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
The streamDef element defines the stream for this model. The schema for this
element can be found at:
py/nupicengine/cluster/database/StreamDef.json
"""
# Form the stream definition
dataPath = os.path.abspath(os.path.join(rootDir, 'datasets', 'scalar_1.csv'))
streamDef = dict(
version = 1,
info = "testSpatialClassification",
streams = [
dict(source="file://%s" % (dataPath),
info="scalar_1.csv",
columns=["*"],
),
],
)
# Generate the experiment description
expDesc = {
"environment": 'nupic',
"inferenceArgs":{
"predictedField":"classification",
"predictionSteps": [0],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "float",
},
{ "fieldName": "classification",
"fieldType": "string",
},
{ "fieldName": "randomData",
"fieldType": "float",
},
],
"iterationCount": -1,
}
return expDesc | [
"def",
"getSearch",
"(",
"rootDir",
")",
":",
"# Form the stream definition",
"dataPath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"rootDir",
",",
"'datasets'",
",",
"'scalar_1.csv'",
")",
")",
"streamDef",
"=",
"dict",
"(",
"version",
"=",
"1",
",",
"info",
"=",
"\"testSpatialClassification\"",
",",
"streams",
"=",
"[",
"dict",
"(",
"source",
"=",
"\"file://%s\"",
"%",
"(",
"dataPath",
")",
",",
"info",
"=",
"\"scalar_1.csv\"",
",",
"columns",
"=",
"[",
"\"*\"",
"]",
",",
")",
",",
"]",
",",
")",
"# Generate the experiment description",
"expDesc",
"=",
"{",
"\"environment\"",
":",
"'nupic'",
",",
"\"inferenceArgs\"",
":",
"{",
"\"predictedField\"",
":",
"\"classification\"",
",",
"\"predictionSteps\"",
":",
"[",
"0",
"]",
",",
"}",
",",
"\"inferenceType\"",
":",
"\"MultiStep\"",
",",
"\"streamDef\"",
":",
"streamDef",
",",
"\"includedFields\"",
":",
"[",
"{",
"\"fieldName\"",
":",
"\"field1\"",
",",
"\"fieldType\"",
":",
"\"float\"",
",",
"}",
",",
"{",
"\"fieldName\"",
":",
"\"classification\"",
",",
"\"fieldType\"",
":",
"\"string\"",
",",
"}",
",",
"{",
"\"fieldName\"",
":",
"\"randomData\"",
",",
"\"fieldType\"",
":",
"\"float\"",
",",
"}",
",",
"]",
",",
"\"iterationCount\"",
":",
"-",
"1",
",",
"}",
"return",
"expDesc"
] | This method returns search description. See the following file for the
schema of the dictionary this method returns:
py/nupic/swarming/exp_generator/experimentDescriptionSchema.json
The streamDef element defines the stream for this model. The schema for this
element can be found at:
py/nupicengine/cluster/database/StreamDef.json | [
"This",
"method",
"returns",
"search",
"description",
".",
"See",
"the",
"following",
"file",
"for",
"the",
"schema",
"of",
"the",
"dictionary",
"this",
"method",
"returns",
":",
"py",
"/",
"nupic",
"/",
"swarming",
"/",
"exp_generator",
"/",
"experimentDescriptionSchema",
".",
"json",
"The",
"streamDef",
"element",
"defines",
"the",
"stream",
"for",
"this",
"model",
".",
"The",
"schema",
"for",
"this",
"element",
"can",
"be",
"found",
"at",
":",
"py",
"/",
"nupicengine",
"/",
"cluster",
"/",
"database",
"/",
"StreamDef",
".",
"json"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/experiments/spatial_classification/auto_generated/searchDef.py#L27-L75 | valid |
numenta/nupic | src/nupic/encoders/sparse_pass_through.py | SparsePassThroughEncoder.encodeIntoArray | def encodeIntoArray(self, value, output):
""" See method description in base.py """
denseInput = numpy.zeros(output.shape)
try:
denseInput[value] = 1
except IndexError:
if isinstance(value, numpy.ndarray):
raise ValueError(
"Numpy array must have integer dtype but got {}".format(
value.dtype))
raise
super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output) | python | def encodeIntoArray(self, value, output):
""" See method description in base.py """
denseInput = numpy.zeros(output.shape)
try:
denseInput[value] = 1
except IndexError:
if isinstance(value, numpy.ndarray):
raise ValueError(
"Numpy array must have integer dtype but got {}".format(
value.dtype))
raise
super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output) | [
"def",
"encodeIntoArray",
"(",
"self",
",",
"value",
",",
"output",
")",
":",
"denseInput",
"=",
"numpy",
".",
"zeros",
"(",
"output",
".",
"shape",
")",
"try",
":",
"denseInput",
"[",
"value",
"]",
"=",
"1",
"except",
"IndexError",
":",
"if",
"isinstance",
"(",
"value",
",",
"numpy",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"\"Numpy array must have integer dtype but got {}\"",
".",
"format",
"(",
"value",
".",
"dtype",
")",
")",
"raise",
"super",
"(",
"SparsePassThroughEncoder",
",",
"self",
")",
".",
"encodeIntoArray",
"(",
"denseInput",
",",
"output",
")"
] | See method description in base.py | [
"See",
"method",
"description",
"in",
"base",
".",
"py"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/sparse_pass_through.py#L71-L82 | valid |
numenta/nupic | src/nupic/serializable.py | Serializable.readFromFile | def readFromFile(cls, f, packed=True):
"""
Read serialized object from file.
:param f: input file
:param packed: If true, will assume content is packed
:return: first-class instance initialized from proto obj
"""
# Get capnproto schema from instance
schema = cls.getSchema()
# Read from file
if packed:
proto = schema.read_packed(f)
else:
proto = schema.read(f)
# Return first-class instance initialized from proto obj
return cls.read(proto) | python | def readFromFile(cls, f, packed=True):
"""
Read serialized object from file.
:param f: input file
:param packed: If true, will assume content is packed
:return: first-class instance initialized from proto obj
"""
# Get capnproto schema from instance
schema = cls.getSchema()
# Read from file
if packed:
proto = schema.read_packed(f)
else:
proto = schema.read(f)
# Return first-class instance initialized from proto obj
return cls.read(proto) | [
"def",
"readFromFile",
"(",
"cls",
",",
"f",
",",
"packed",
"=",
"True",
")",
":",
"# Get capnproto schema from instance",
"schema",
"=",
"cls",
".",
"getSchema",
"(",
")",
"# Read from file",
"if",
"packed",
":",
"proto",
"=",
"schema",
".",
"read_packed",
"(",
"f",
")",
"else",
":",
"proto",
"=",
"schema",
".",
"read",
"(",
"f",
")",
"# Return first-class instance initialized from proto obj",
"return",
"cls",
".",
"read",
"(",
"proto",
")"
] | Read serialized object from file.
:param f: input file
:param packed: If true, will assume content is packed
:return: first-class instance initialized from proto obj | [
"Read",
"serialized",
"object",
"from",
"file",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/serializable.py#L81-L99 | valid |
numenta/nupic | src/nupic/serializable.py | Serializable.writeToFile | def writeToFile(self, f, packed=True):
"""
Write serialized object to file.
:param f: output file
:param packed: If true, will pack contents.
"""
# Get capnproto schema from instance
schema = self.getSchema()
# Construct new message, otherwise refered to as `proto`
proto = schema.new_message()
# Populate message w/ `write()` instance method
self.write(proto)
# Finally, write to file
if packed:
proto.write_packed(f)
else:
proto.write(f) | python | def writeToFile(self, f, packed=True):
"""
Write serialized object to file.
:param f: output file
:param packed: If true, will pack contents.
"""
# Get capnproto schema from instance
schema = self.getSchema()
# Construct new message, otherwise refered to as `proto`
proto = schema.new_message()
# Populate message w/ `write()` instance method
self.write(proto)
# Finally, write to file
if packed:
proto.write_packed(f)
else:
proto.write(f) | [
"def",
"writeToFile",
"(",
"self",
",",
"f",
",",
"packed",
"=",
"True",
")",
":",
"# Get capnproto schema from instance",
"schema",
"=",
"self",
".",
"getSchema",
"(",
")",
"# Construct new message, otherwise refered to as `proto`",
"proto",
"=",
"schema",
".",
"new_message",
"(",
")",
"# Populate message w/ `write()` instance method",
"self",
".",
"write",
"(",
"proto",
")",
"# Finally, write to file",
"if",
"packed",
":",
"proto",
".",
"write_packed",
"(",
"f",
")",
"else",
":",
"proto",
".",
"write",
"(",
"f",
")"
] | Write serialized object to file.
:param f: output file
:param packed: If true, will pack contents. | [
"Write",
"serialized",
"object",
"to",
"file",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/serializable.py#L102-L122 | valid |
numenta/nupic | src/nupic/frameworks/opf/two_gram_model.py | TwoGramModel.read | def read(cls, proto):
"""
:param proto: capnp TwoGramModelProto message reader
"""
instance = object.__new__(cls)
super(TwoGramModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._reset = proto.reset
instance._hashToValueDict = {x.hash: x.value
for x in proto.hashToValueDict}
instance._learningEnabled = proto.learningEnabled
instance._encoder = encoders.MultiEncoder.read(proto.encoder)
instance._fieldNames = instance._encoder.getScalarNames()
instance._prevValues = list(proto.prevValues)
instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))]
for idx, field in enumerate(proto.twoGramDicts):
for entry in field:
prev = None if entry.value == -1 else entry.value
instance._twoGramDicts[idx][prev] = collections.defaultdict(int)
for bucket in entry.buckets:
instance._twoGramDicts[idx][prev][bucket.index] = bucket.count
return instance | python | def read(cls, proto):
"""
:param proto: capnp TwoGramModelProto message reader
"""
instance = object.__new__(cls)
super(TwoGramModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
instance._reset = proto.reset
instance._hashToValueDict = {x.hash: x.value
for x in proto.hashToValueDict}
instance._learningEnabled = proto.learningEnabled
instance._encoder = encoders.MultiEncoder.read(proto.encoder)
instance._fieldNames = instance._encoder.getScalarNames()
instance._prevValues = list(proto.prevValues)
instance._twoGramDicts = [dict() for _ in xrange(len(proto.twoGramDicts))]
for idx, field in enumerate(proto.twoGramDicts):
for entry in field:
prev = None if entry.value == -1 else entry.value
instance._twoGramDicts[idx][prev] = collections.defaultdict(int)
for bucket in entry.buckets:
instance._twoGramDicts[idx][prev][bucket.index] = bucket.count
return instance | [
"def",
"read",
"(",
"cls",
",",
"proto",
")",
":",
"instance",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"super",
"(",
"TwoGramModel",
",",
"instance",
")",
".",
"__init__",
"(",
"proto",
"=",
"proto",
".",
"modelBase",
")",
"instance",
".",
"_logger",
"=",
"opf_utils",
".",
"initLogger",
"(",
"instance",
")",
"instance",
".",
"_reset",
"=",
"proto",
".",
"reset",
"instance",
".",
"_hashToValueDict",
"=",
"{",
"x",
".",
"hash",
":",
"x",
".",
"value",
"for",
"x",
"in",
"proto",
".",
"hashToValueDict",
"}",
"instance",
".",
"_learningEnabled",
"=",
"proto",
".",
"learningEnabled",
"instance",
".",
"_encoder",
"=",
"encoders",
".",
"MultiEncoder",
".",
"read",
"(",
"proto",
".",
"encoder",
")",
"instance",
".",
"_fieldNames",
"=",
"instance",
".",
"_encoder",
".",
"getScalarNames",
"(",
")",
"instance",
".",
"_prevValues",
"=",
"list",
"(",
"proto",
".",
"prevValues",
")",
"instance",
".",
"_twoGramDicts",
"=",
"[",
"dict",
"(",
")",
"for",
"_",
"in",
"xrange",
"(",
"len",
"(",
"proto",
".",
"twoGramDicts",
")",
")",
"]",
"for",
"idx",
",",
"field",
"in",
"enumerate",
"(",
"proto",
".",
"twoGramDicts",
")",
":",
"for",
"entry",
"in",
"field",
":",
"prev",
"=",
"None",
"if",
"entry",
".",
"value",
"==",
"-",
"1",
"else",
"entry",
".",
"value",
"instance",
".",
"_twoGramDicts",
"[",
"idx",
"]",
"[",
"prev",
"]",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"bucket",
"in",
"entry",
".",
"buckets",
":",
"instance",
".",
"_twoGramDicts",
"[",
"idx",
"]",
"[",
"prev",
"]",
"[",
"bucket",
".",
"index",
"]",
"=",
"bucket",
".",
"count",
"return",
"instance"
] | :param proto: capnp TwoGramModelProto message reader | [
":",
"param",
"proto",
":",
"capnp",
"TwoGramModelProto",
"message",
"reader"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/two_gram_model.py#L152-L176 | valid |
numenta/nupic | src/nupic/frameworks/opf/two_gram_model.py | TwoGramModel.write | def write(self, proto):
"""
:param proto: capnp TwoGramModelProto message builder
"""
super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{"hash": h, "value": v}
for h, v in self._hashToValueDict.items()]
twoGramDicts = []
for items in self._twoGramDicts:
twoGramArr = []
for prev, values in items.iteritems():
buckets = [{"index": index, "count": count}
for index, count in values.iteritems()]
if prev is None:
prev = -1
twoGramArr.append({"value": prev, "buckets": buckets})
twoGramDicts.append(twoGramArr)
proto.twoGramDicts = twoGramDicts | python | def write(self, proto):
"""
:param proto: capnp TwoGramModelProto message builder
"""
super(TwoGramModel, self).writeBaseToProto(proto.modelBase)
proto.reset = self._reset
proto.learningEnabled = self._learningEnabled
proto.prevValues = self._prevValues
self._encoder.write(proto.encoder)
proto.hashToValueDict = [{"hash": h, "value": v}
for h, v in self._hashToValueDict.items()]
twoGramDicts = []
for items in self._twoGramDicts:
twoGramArr = []
for prev, values in items.iteritems():
buckets = [{"index": index, "count": count}
for index, count in values.iteritems()]
if prev is None:
prev = -1
twoGramArr.append({"value": prev, "buckets": buckets})
twoGramDicts.append(twoGramArr)
proto.twoGramDicts = twoGramDicts | [
"def",
"write",
"(",
"self",
",",
"proto",
")",
":",
"super",
"(",
"TwoGramModel",
",",
"self",
")",
".",
"writeBaseToProto",
"(",
"proto",
".",
"modelBase",
")",
"proto",
".",
"reset",
"=",
"self",
".",
"_reset",
"proto",
".",
"learningEnabled",
"=",
"self",
".",
"_learningEnabled",
"proto",
".",
"prevValues",
"=",
"self",
".",
"_prevValues",
"self",
".",
"_encoder",
".",
"write",
"(",
"proto",
".",
"encoder",
")",
"proto",
".",
"hashToValueDict",
"=",
"[",
"{",
"\"hash\"",
":",
"h",
",",
"\"value\"",
":",
"v",
"}",
"for",
"h",
",",
"v",
"in",
"self",
".",
"_hashToValueDict",
".",
"items",
"(",
")",
"]",
"twoGramDicts",
"=",
"[",
"]",
"for",
"items",
"in",
"self",
".",
"_twoGramDicts",
":",
"twoGramArr",
"=",
"[",
"]",
"for",
"prev",
",",
"values",
"in",
"items",
".",
"iteritems",
"(",
")",
":",
"buckets",
"=",
"[",
"{",
"\"index\"",
":",
"index",
",",
"\"count\"",
":",
"count",
"}",
"for",
"index",
",",
"count",
"in",
"values",
".",
"iteritems",
"(",
")",
"]",
"if",
"prev",
"is",
"None",
":",
"prev",
"=",
"-",
"1",
"twoGramArr",
".",
"append",
"(",
"{",
"\"value\"",
":",
"prev",
",",
"\"buckets\"",
":",
"buckets",
"}",
")",
"twoGramDicts",
".",
"append",
"(",
"twoGramArr",
")",
"proto",
".",
"twoGramDicts",
"=",
"twoGramDicts"
] | :param proto: capnp TwoGramModelProto message builder | [
":",
"param",
"proto",
":",
"capnp",
"TwoGramModelProto",
"message",
"builder"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/two_gram_model.py#L179-L204 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | requireAnomalyModel | def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
@wraps(func)
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator | python | def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
@wraps(func)
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator | [
"def",
"requireAnomalyModel",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"_decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"getInferenceType",
"(",
")",
"==",
"InferenceType",
".",
"TemporalAnomaly",
":",
"raise",
"RuntimeError",
"(",
"\"Method required a TemporalAnomaly model.\"",
")",
"if",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Model does not support this command. Model must\"",
"\"be an active anomalyDetector model.\"",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_decorator"
] | Decorator for functions that require anomaly models. | [
"Decorator",
"for",
"functions",
"that",
"require",
"anomaly",
"models",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L70-L82 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel.anomalyRemoveLabels | def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only removes records that match
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter) | python | def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only removes records that match
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter) | [
"def",
"anomalyRemoveLabels",
"(",
"self",
",",
"start",
",",
"end",
",",
"labelFilter",
")",
":",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"getSelf",
"(",
")",
".",
"removeLabels",
"(",
"start",
",",
"end",
",",
"labelFilter",
")"
] | Remove labels from the anomaly classifier within this model. Removes all
records if ``labelFilter==None``, otherwise only removes the labels equal to
``labelFilter``.
:param start: (int) index to start removing labels
:param end: (int) index to end removing labels
:param labelFilter: (string) If specified, only removes records that match | [
"Remove",
"labels",
"from",
"the",
"anomaly",
"classifier",
"within",
"this",
"model",
".",
"Removes",
"all",
"records",
"if",
"labelFilter",
"==",
"None",
"otherwise",
"only",
"removes",
"the",
"labels",
"equal",
"to",
"labelFilter",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L374-L384 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel.anomalyAddLabel | def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
:param start: (int) index to start label
:param end: (int) index to end label
:param labelName: (string) name of label
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName) | python | def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
:param start: (int) index to start label
:param end: (int) index to end label
:param labelName: (string) name of label
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName) | [
"def",
"anomalyAddLabel",
"(",
"self",
",",
"start",
",",
"end",
",",
"labelName",
")",
":",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"getSelf",
"(",
")",
".",
"addLabel",
"(",
"start",
",",
"end",
",",
"labelName",
")"
] | Add labels from the anomaly classifier within this model.
:param start: (int) index to start label
:param end: (int) index to end label
:param labelName: (string) name of label | [
"Add",
"labels",
"from",
"the",
"anomaly",
"classifier",
"within",
"this",
"model",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L388-L396 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel.anomalyGetLabels | def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end) | python | def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end) | [
"def",
"anomalyGetLabels",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"return",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"getSelf",
"(",
")",
".",
"getLabels",
"(",
"start",
",",
"end",
")"
] | Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels | [
"Get",
"labels",
"from",
"the",
"anomaly",
"classifier",
"within",
"this",
"model",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L400-L407 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel._getSensorInputRecord | def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory) | python | def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory) | [
"def",
"_getSensorInputRecord",
"(",
"self",
",",
"inputRecord",
")",
":",
"sensor",
"=",
"self",
".",
"_getSensorRegion",
"(",
")",
"dataRow",
"=",
"copy",
".",
"deepcopy",
"(",
"sensor",
".",
"getSelf",
"(",
")",
".",
"getOutputValues",
"(",
"'sourceOut'",
")",
")",
"dataDict",
"=",
"copy",
".",
"deepcopy",
"(",
"inputRecord",
")",
"inputRecordEncodings",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"getOutputValues",
"(",
"'sourceEncodings'",
")",
"inputRecordCategory",
"=",
"int",
"(",
"sensor",
".",
"getOutputData",
"(",
"'categoryOut'",
")",
"[",
"0",
"]",
")",
"resetOut",
"=",
"sensor",
".",
"getOutputData",
"(",
"'resetOut'",
")",
"[",
"0",
"]",
"return",
"SensorInput",
"(",
"dataRow",
"=",
"dataRow",
",",
"dataDict",
"=",
"dataDict",
",",
"dataEncodings",
"=",
"inputRecordEncodings",
",",
"sequenceReset",
"=",
"resetOut",
",",
"category",
"=",
"inputRecordCategory",
")"
] | inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record | [
"inputRecord",
"-",
"dict",
"containing",
"the",
"input",
"to",
"the",
"sensor"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L478-L496 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel._getClassifierInputRecord | def _getClassifierInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record
"""
absoluteValue = None
bucketIdx = None
if self._predictedFieldName is not None and self._classifierInputEncoder is not None:
absoluteValue = inputRecord[self._predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
return ClassifierInput(dataRow=absoluteValue,
bucketIndex=bucketIdx) | python | def _getClassifierInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record
"""
absoluteValue = None
bucketIdx = None
if self._predictedFieldName is not None and self._classifierInputEncoder is not None:
absoluteValue = inputRecord[self._predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
return ClassifierInput(dataRow=absoluteValue,
bucketIndex=bucketIdx) | [
"def",
"_getClassifierInputRecord",
"(",
"self",
",",
"inputRecord",
")",
":",
"absoluteValue",
"=",
"None",
"bucketIdx",
"=",
"None",
"if",
"self",
".",
"_predictedFieldName",
"is",
"not",
"None",
"and",
"self",
".",
"_classifierInputEncoder",
"is",
"not",
"None",
":",
"absoluteValue",
"=",
"inputRecord",
"[",
"self",
".",
"_predictedFieldName",
"]",
"bucketIdx",
"=",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"absoluteValue",
")",
"[",
"0",
"]",
"return",
"ClassifierInput",
"(",
"dataRow",
"=",
"absoluteValue",
",",
"bucketIndex",
"=",
"bucketIdx",
")"
] | inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record | [
"inputRecord",
"-",
"dict",
"containing",
"the",
"input",
"to",
"the",
"sensor"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L498-L513 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel._anomalyCompute | def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tm = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = tm.getOutputData("anomalyScore")[0]
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences | python | def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tm = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = tm.getOutputData("anomalyScore")[0]
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences | [
"def",
"_anomalyCompute",
"(",
"self",
")",
":",
"inferenceType",
"=",
"self",
".",
"getInferenceType",
"(",
")",
"inferences",
"=",
"{",
"}",
"sp",
"=",
"self",
".",
"_getSPRegion",
"(",
")",
"score",
"=",
"None",
"if",
"inferenceType",
"==",
"InferenceType",
".",
"NontemporalAnomaly",
":",
"score",
"=",
"sp",
".",
"getOutputData",
"(",
"\"anomalyScore\"",
")",
"[",
"0",
"]",
"#TODO move from SP to Anomaly ?",
"elif",
"inferenceType",
"==",
"InferenceType",
".",
"TemporalAnomaly",
":",
"tm",
"=",
"self",
".",
"_getTPRegion",
"(",
")",
"if",
"sp",
"is",
"not",
"None",
":",
"activeColumns",
"=",
"sp",
".",
"getOutputData",
"(",
"\"bottomUpOut\"",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"else",
":",
"sensor",
"=",
"self",
".",
"_getSensorRegion",
"(",
")",
"activeColumns",
"=",
"sensor",
".",
"getOutputData",
"(",
"'dataOut'",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"if",
"not",
"self",
".",
"_predictedFieldName",
"in",
"self",
".",
"_input",
":",
"raise",
"ValueError",
"(",
"\"Expected predicted field '%s' in input row, but was not found!\"",
"%",
"self",
".",
"_predictedFieldName",
")",
"# Calculate the anomaly score using the active columns",
"# and previous predicted columns.",
"score",
"=",
"tm",
".",
"getOutputData",
"(",
"\"anomalyScore\"",
")",
"[",
"0",
"]",
"# Calculate the classifier's output and use the result as the anomaly",
"# label. Stores as string of results.",
"# TODO: make labels work with non-SP models",
"if",
"sp",
"is",
"not",
"None",
":",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"setParameter",
"(",
"\"activeColumnCount\"",
",",
"len",
"(",
"activeColumns",
")",
")",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"prepareInputs",
"(",
")",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"compute",
"(",
")",
"labels",
"=",
"self",
".",
"_getAnomalyClassifier",
"(",
")",
".",
"getSelf",
"(",
")",
".",
"getLabelResults",
"(",
")",
"inferences",
"[",
"InferenceElement",
".",
"anomalyLabel",
"]",
"=",
"\"%s\"",
"%",
"labels",
"inferences",
"[",
"InferenceElement",
".",
"anomalyScore",
"]",
"=",
"score",
"return",
"inferences"
] | Compute Anomaly score, if required | [
"Compute",
"Anomaly",
"score",
"if",
"required"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L665-L709 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel._handleSDRClassifierMultiStep | def _handleSDRClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# SDRClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences | python | def _handleSDRClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# SDRClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences | [
"def",
"_handleSDRClassifierMultiStep",
"(",
"self",
",",
"patternNZ",
",",
"inputTSRecordIdx",
",",
"rawInput",
")",
":",
"inferenceArgs",
"=",
"self",
".",
"getInferenceArgs",
"(",
")",
"predictedFieldName",
"=",
"inferenceArgs",
".",
"get",
"(",
"'predictedField'",
",",
"None",
")",
"if",
"predictedFieldName",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No predicted field was enabled! Did you call enableInference()?\"",
")",
"self",
".",
"_predictedFieldName",
"=",
"predictedFieldName",
"classifier",
"=",
"self",
".",
"_getClassifierRegion",
"(",
")",
"if",
"not",
"self",
".",
"_hasCL",
"or",
"classifier",
"is",
"None",
":",
"# No classifier so return an empty dict for inferences.",
"return",
"{",
"}",
"sensor",
"=",
"self",
".",
"_getSensorRegion",
"(",
")",
"minLikelihoodThreshold",
"=",
"self",
".",
"_minLikelihoodThreshold",
"maxPredictionsPerStep",
"=",
"self",
".",
"_maxPredictionsPerStep",
"needLearning",
"=",
"self",
".",
"isLearningEnabled",
"(",
")",
"inferences",
"=",
"{",
"}",
"# Get the classifier input encoder, if we don't have it already",
"if",
"self",
".",
"_classifierInputEncoder",
"is",
"None",
":",
"if",
"predictedFieldName",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"This experiment description is missing \"",
"\"the 'predictedField' in its config, which is required \"",
"\"for multi-step prediction inference.\"",
")",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getEncoderList",
"(",
")",
"self",
".",
"_numFields",
"=",
"len",
"(",
"encoderList",
")",
"# This is getting index of predicted field if being fed to CLA.",
"fieldNames",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getScalarNames",
"(",
")",
"if",
"predictedFieldName",
"in",
"fieldNames",
":",
"self",
".",
"_predictedFieldIdx",
"=",
"fieldNames",
".",
"index",
"(",
"predictedFieldName",
")",
"else",
":",
"# Predicted field was not fed into the network, only to the classifier",
"self",
".",
"_predictedFieldIdx",
"=",
"None",
"# In a multi-step model, the classifier input encoder is separate from",
"# the other encoders and always disabled from going into the bottom of",
"# the network.",
"if",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
"is",
"not",
"None",
":",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
".",
"getEncoderList",
"(",
")",
"else",
":",
"encoderList",
"=",
"[",
"]",
"if",
"len",
"(",
"encoderList",
")",
">=",
"1",
":",
"fieldNames",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"disabledEncoder",
".",
"getScalarNames",
"(",
")",
"self",
".",
"_classifierInputEncoder",
"=",
"encoderList",
"[",
"fieldNames",
".",
"index",
"(",
"predictedFieldName",
")",
"]",
"else",
":",
"# Legacy multi-step networks don't have a separate encoder for the",
"# classifier, so use the one that goes into the bottom of the network",
"encoderList",
"=",
"sensor",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"getEncoderList",
"(",
")",
"self",
".",
"_classifierInputEncoder",
"=",
"encoderList",
"[",
"self",
".",
"_predictedFieldIdx",
"]",
"# Get the actual value and the bucket index for this sample. The",
"# predicted field may not be enabled for input to the network, so we",
"# explicitly encode it outside of the sensor",
"# TODO: All this logic could be simpler if in the encoder itself",
"if",
"not",
"predictedFieldName",
"in",
"rawInput",
":",
"raise",
"ValueError",
"(",
"\"Input row does not contain a value for the predicted \"",
"\"field configured for this model. Missing value for '%s'\"",
"%",
"predictedFieldName",
")",
"absoluteValue",
"=",
"rawInput",
"[",
"predictedFieldName",
"]",
"bucketIdx",
"=",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"absoluteValue",
")",
"[",
"0",
"]",
"# Convert the absolute values to deltas if necessary",
"# The bucket index should be handled correctly by the underlying delta encoder",
"if",
"isinstance",
"(",
"self",
".",
"_classifierInputEncoder",
",",
"DeltaEncoder",
")",
":",
"# Make the delta before any values have been seen 0 so that we do not mess up the",
"# range for the adaptive scalar encoder.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_ms_prevVal\"",
")",
":",
"self",
".",
"_ms_prevVal",
"=",
"absoluteValue",
"prevValue",
"=",
"self",
".",
"_ms_prevVal",
"self",
".",
"_ms_prevVal",
"=",
"absoluteValue",
"actualValue",
"=",
"absoluteValue",
"-",
"prevValue",
"else",
":",
"actualValue",
"=",
"absoluteValue",
"if",
"isinstance",
"(",
"actualValue",
",",
"float",
")",
"and",
"math",
".",
"isnan",
"(",
"actualValue",
")",
":",
"actualValue",
"=",
"SENTINEL_VALUE_FOR_MISSING_DATA",
"# Pass this information to the classifier's custom compute method",
"# so that it can assign the current classification to possibly",
"# multiple patterns from the past and current, and also provide",
"# the expected classification for some time step(s) in the future.",
"classifier",
".",
"setParameter",
"(",
"'inferenceMode'",
",",
"True",
")",
"classifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"needLearning",
")",
"classificationIn",
"=",
"{",
"'bucketIdx'",
":",
"bucketIdx",
",",
"'actValue'",
":",
"actualValue",
"}",
"# Handle missing records",
"if",
"inputTSRecordIdx",
"is",
"not",
"None",
":",
"recordNum",
"=",
"inputTSRecordIdx",
"else",
":",
"recordNum",
"=",
"self",
".",
"__numRunCalls",
"clResults",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"customCompute",
"(",
"recordNum",
"=",
"recordNum",
",",
"patternNZ",
"=",
"patternNZ",
",",
"classification",
"=",
"classificationIn",
")",
"# ---------------------------------------------------------------",
"# Get the prediction for every step ahead learned by the classifier",
"predictionSteps",
"=",
"classifier",
".",
"getParameter",
"(",
"'steps'",
")",
"predictionSteps",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"predictionSteps",
".",
"split",
"(",
"','",
")",
"]",
"# We will return the results in this dict. The top level keys",
"# are the step number, the values are the relative likelihoods for",
"# each classification value in that time step, represented as",
"# another dict where the keys are the classification values and",
"# the values are the relative likelihoods.",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"=",
"dict",
"(",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"=",
"dict",
"(",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"=",
"dict",
"(",
")",
"# ======================================================================",
"# Plug in the predictions for each requested time step.",
"for",
"steps",
"in",
"predictionSteps",
":",
"# From the clResults, compute the predicted actual value. The",
"# SDRClassifier classifies the bucket index and returns a list of",
"# relative likelihoods for each bucket. Let's find the max one",
"# and then look up the actual value from that bucket index",
"likelihoodsVec",
"=",
"clResults",
"[",
"steps",
"]",
"bucketValues",
"=",
"clResults",
"[",
"'actualValues'",
"]",
"# Create a dict of value:likelihood pairs. We can't simply use",
"# dict(zip(bucketValues, likelihoodsVec)) because there might be",
"# duplicate bucketValues (this happens early on in the model when",
"# it doesn't have actual values for each bucket so it returns",
"# multiple buckets with the same default actual value).",
"likelihoodsDict",
"=",
"dict",
"(",
")",
"bestActValue",
"=",
"None",
"bestProb",
"=",
"None",
"for",
"(",
"actValue",
",",
"prob",
")",
"in",
"zip",
"(",
"bucketValues",
",",
"likelihoodsVec",
")",
":",
"if",
"actValue",
"in",
"likelihoodsDict",
":",
"likelihoodsDict",
"[",
"actValue",
"]",
"+=",
"prob",
"else",
":",
"likelihoodsDict",
"[",
"actValue",
"]",
"=",
"prob",
"# Keep track of best",
"if",
"bestProb",
"is",
"None",
"or",
"likelihoodsDict",
"[",
"actValue",
"]",
">",
"bestProb",
":",
"bestProb",
"=",
"likelihoodsDict",
"[",
"actValue",
"]",
"bestActValue",
"=",
"actValue",
"# Remove entries with 0 likelihood or likelihood less than",
"# minLikelihoodThreshold, but don't leave an empty dict.",
"likelihoodsDict",
"=",
"HTMPredictionModel",
".",
"_removeUnlikelyPredictions",
"(",
"likelihoodsDict",
",",
"minLikelihoodThreshold",
",",
"maxPredictionsPerStep",
")",
"# calculate likelihood for each bucket",
"bucketLikelihood",
"=",
"{",
"}",
"for",
"k",
"in",
"likelihoodsDict",
".",
"keys",
"(",
")",
":",
"bucketLikelihood",
"[",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"k",
")",
"[",
"0",
"]",
"]",
"=",
"(",
"likelihoodsDict",
"[",
"k",
"]",
")",
"# ---------------------------------------------------------------------",
"# If we have a delta encoder, we have to shift our predicted output value",
"# by the sum of the deltas",
"if",
"isinstance",
"(",
"self",
".",
"_classifierInputEncoder",
",",
"DeltaEncoder",
")",
":",
"# Get the prediction history for this number of timesteps.",
"# The prediction history is a store of the previous best predicted values.",
"# This is used to get the final shift from the current absolute value.",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_ms_predHistories'",
")",
":",
"self",
".",
"_ms_predHistories",
"=",
"dict",
"(",
")",
"predHistories",
"=",
"self",
".",
"_ms_predHistories",
"if",
"not",
"steps",
"in",
"predHistories",
":",
"predHistories",
"[",
"steps",
"]",
"=",
"deque",
"(",
")",
"predHistory",
"=",
"predHistories",
"[",
"steps",
"]",
"# Find the sum of the deltas for the steps and use this to generate",
"# an offset from the current absolute value",
"sumDelta",
"=",
"sum",
"(",
"predHistory",
")",
"offsetDict",
"=",
"dict",
"(",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"likelihoodsDict",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"is",
"not",
"None",
":",
"# Reconstruct the absolute value based on the current actual value,",
"# the best predicted values from the previous iterations,",
"# and the current predicted delta",
"offsetDict",
"[",
"absoluteValue",
"+",
"float",
"(",
"k",
")",
"+",
"sumDelta",
"]",
"=",
"v",
"# calculate likelihood for each bucket",
"bucketLikelihoodOffset",
"=",
"{",
"}",
"for",
"k",
"in",
"offsetDict",
".",
"keys",
"(",
")",
":",
"bucketLikelihoodOffset",
"[",
"self",
".",
"_classifierInputEncoder",
".",
"getBucketIndices",
"(",
"k",
")",
"[",
"0",
"]",
"]",
"=",
"(",
"offsetDict",
"[",
"k",
"]",
")",
"# Push the current best delta to the history buffer for reconstructing the final delta",
"if",
"bestActValue",
"is",
"not",
"None",
":",
"predHistory",
".",
"append",
"(",
"bestActValue",
")",
"# If we don't need any more values in the predictionHistory, pop off",
"# the earliest one.",
"if",
"len",
"(",
"predHistory",
")",
">=",
"steps",
":",
"predHistory",
".",
"popleft",
"(",
")",
"# Provide the offsetDict as the return value",
"if",
"len",
"(",
"offsetDict",
")",
">",
"0",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"offsetDict",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"bucketLikelihoodOffset",
"else",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"likelihoodsDict",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"bucketLikelihood",
"if",
"bestActValue",
"is",
"None",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"None",
"else",
":",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"absoluteValue",
"+",
"sumDelta",
"+",
"bestActValue",
")",
"# ---------------------------------------------------------------------",
"# Normal case, no delta encoder. Just plug in all our multi-step predictions",
"# with likelihoods as well as our best prediction",
"else",
":",
"# The multiStepPredictions element holds the probabilities for each",
"# bucket",
"inferences",
"[",
"InferenceElement",
".",
"multiStepPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"likelihoodsDict",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBestPredictions",
"]",
"[",
"steps",
"]",
"=",
"(",
"bestActValue",
")",
"inferences",
"[",
"InferenceElement",
".",
"multiStepBucketLikelihoods",
"]",
"[",
"steps",
"]",
"=",
"(",
"bucketLikelihood",
")",
"return",
"inferences"
] | Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TM) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input to the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict. | [
"Handle",
"the",
"CLA",
"Classifier",
"compute",
"logic",
"when",
"implementing",
"multi",
"-",
"step",
"prediction",
".",
"This",
"is",
"where",
"the",
"patternNZ",
"is",
"associated",
"with",
"one",
"of",
"the",
"other",
"fields",
"from",
"the",
"dataset",
"0",
"to",
"N",
"steps",
"in",
"the",
"future",
".",
"This",
"method",
"is",
"used",
"by",
"each",
"type",
"of",
"network",
"(",
"encoder",
"only",
"SP",
"only",
"SP",
"+",
"TM",
")",
"to",
"handle",
"the",
"compute",
"logic",
"through",
"the",
"CLA",
"Classifier",
".",
"It",
"fills",
"in",
"the",
"inference",
"dict",
"with",
"the",
"results",
"of",
"the",
"compute",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L712-L957 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel._removeUnlikelyPredictions | def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict | python | def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict | [
"def",
"_removeUnlikelyPredictions",
"(",
"cls",
",",
"likelihoodsDict",
",",
"minLikelihoodThreshold",
",",
"maxPredictionsPerStep",
")",
":",
"maxVal",
"=",
"(",
"None",
",",
"None",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"likelihoodsDict",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"likelihoodsDict",
")",
"<=",
"1",
":",
"break",
"if",
"maxVal",
"[",
"0",
"]",
"is",
"None",
"or",
"v",
">=",
"maxVal",
"[",
"1",
"]",
":",
"if",
"maxVal",
"[",
"0",
"]",
"is",
"not",
"None",
"and",
"maxVal",
"[",
"1",
"]",
"<",
"minLikelihoodThreshold",
":",
"del",
"likelihoodsDict",
"[",
"maxVal",
"[",
"0",
"]",
"]",
"maxVal",
"=",
"(",
"k",
",",
"v",
")",
"elif",
"v",
"<",
"minLikelihoodThreshold",
":",
"del",
"likelihoodsDict",
"[",
"k",
"]",
"# Limit the number of predictions to include.",
"likelihoodsDict",
"=",
"dict",
"(",
"sorted",
"(",
"likelihoodsDict",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"[",
":",
"maxPredictionsPerStep",
"]",
")",
"return",
"likelihoodsDict"
] | Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict. | [
"Remove",
"entries",
"with",
"0",
"likelihood",
"or",
"likelihood",
"less",
"than",
"minLikelihoodThreshold",
"but",
"don",
"t",
"leave",
"an",
"empty",
"dict",
"."
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L961-L980 | valid |
numenta/nupic | src/nupic/frameworks/opf/htm_prediction_model.py | HTMPredictionModel.getRuntimeStats | def getRuntimeStats(self):
"""
Only returns data for a stat called ``numRunCalls``.
:return:
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret | python | def getRuntimeStats(self):
"""
Only returns data for a stat called ``numRunCalls``.
:return:
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret | [
"def",
"getRuntimeStats",
"(",
"self",
")",
":",
"ret",
"=",
"{",
"\"numRunCalls\"",
":",
"self",
".",
"__numRunCalls",
"}",
"#--------------------------------------------------",
"# Query temporal network stats",
"temporalStats",
"=",
"dict",
"(",
")",
"if",
"self",
".",
"_hasTP",
":",
"for",
"stat",
"in",
"self",
".",
"_netInfo",
".",
"statsCollectors",
":",
"sdict",
"=",
"stat",
".",
"getStats",
"(",
")",
"temporalStats",
".",
"update",
"(",
"sdict",
")",
"ret",
"[",
"InferenceType",
".",
"getLabel",
"(",
"InferenceType",
".",
"TemporalNextStep",
")",
"]",
"=",
"temporalStats",
"return",
"ret"
] | Only returns data for a stat called ``numRunCalls``.
:return: | [
"Only",
"returns",
"data",
"for",
"a",
"stat",
"called",
"numRunCalls",
".",
":",
"return",
":"
] | 5922fafffdccc8812e72b3324965ad2f7d4bbdad | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L983-L1001 | valid |