Spaces:
Runtime error
Runtime error
File size: 4,275 Bytes
2fc2c1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
#!/usr/local/bin/python3
# avenir-python: Machine Learning
# Author: Pranab Ghosh
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# Package imports
import os
import sys
import torch
from torch.utils.data import DataLoader
import random
import jprops
from random import randint
import optuna
sys.path.append(os.path.abspath("../lib"))
from util import *
from mlutil import *
"""
neural network hyper paramter tuning with ptuna
"""
def createTunerConfig(configFile):
"""
create tuner config pbject
"""
defValues = dict()
defValues["train.num.layers"] = ([2,4], None)
defValues["train.num.units"] = (None, "missing range of number of units")
defValues["train.activation"] = ("relu", None)
defValues["train.batch.normalize"] = (["true", "false"], None)
defValues["train.dropout.prob"] = ([-0.1, 0.5], None)
defValues["train.out.num.units"] = (None, "missing number of output units")
defValues["train.out.activation"] = (None, "missing output activation")
defValues["train.batch.size"] = ([16, 128], None)
defValues["train.opt.learning.rate"] = ([.0001, .005], None)
config = Configuration(configFile, defValues)
return config
def showStudyResults(study):
"""
shows study results
"""
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print("Value: ", trial.value)
print("Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
def objective(trial, networkType, modelConfigFile, tunerConfigFile):
"""
optuna based hyperparamter tuning for neural network
"""
tConfig = createTunerConfig(tunerConfigFile)
#tuning parameters
nlayers = config.getIntListConfig("train.num.layers")[0]
nunits = config.getIntListConfig("train.num.units")[0]
act = config.getStringConfig("train.activation")[0]
dropOutRange = config.getFloatListConfig("train.dropout.prob")[0]
outNunits = config.getIntConfig("train.out.num.units")[0]
outAct = config.getStringConfig("train.out.activation")[0]
batchSizes = config.getIntListConfig("train.batch.size")[0]
learningRates = config.getFloatListConfig("train.opt.learning.rate")[0]
numLayers = trial.suggest_int("numLayers", nlayers[0], nlayers[1])
#batch normalize on for all layers or none
batchNormOptions = ["true", "false"]
batchNorm = trial.suggest_categorical("batchNorm", batchNormOptions)
layerConfig = ""
maxUnits = nunits[1]
sep = ":"
for i in range(nlayers):
if i < nlayers - 1:
nunit = trial.suggest_int("numUnits_l{}".format(i), nunits[0], maxUnits)
dropOut = trial.suggest_int("dropOut_l{}".format(i), dropOutRange[0], dropOutRange[1])
lconfig = [str(nunit), act, batchNorm, "true", "{:.3f}".format(dropOut)]
lconfig = sep.join(lconfig) + ","
maxUnits = nunit
else:
lconfig = [str(outNunits), outAct, "false", "false", "{:.3f}".format(-0.1)]
lconfig = sep.join(lconfig)
layerConfig = layerConfig + lconfig
batchSize = trial.suggest_int("batchSize", batchSizes[0], batchSizes[1])
learningRate = trial.suggest_int("learningRate", learningRates[0], learningRates[1])
#train model
nnModel = FeedForwardNetwork(modelConfigFile)
nnModel.setConfigParam("train.layer.data", layerConfig)
nnModel.setConfigParam("train.batch.size", batchSize)
nnModel.setConfigParam("train.opt.learning.rate", learningRate)
nnModel.buildModel()
score = FeedForwardNetwork.batchTrain(nnModel)
return score
if __name__ == "__main__":
assert len(sys.argv) == 5, "requires 4 command line args"
networkType = sys.argv[1]
modelConfigFile = sys.argv[2]
tunerConfigFile = sys.argv[3]
numTrial = int(sys.argv[4])
study = optuna.create_study()
study.optimize(lambda trial: objective(trial, networkType, modelConfigFile, tunerConfigFile), n_trials=numTrial)
showStudyResults(study)
|