diff --git a/scripts/rnn.lua b/scripts/rnn.lua index 5cd7649..b893d8d 100644 --- a/scripts/rnn.lua +++ b/scripts/rnn.lua @@ -293,4 +293,4 @@ function RNN:getPredictionTruth(batch) -- for dropout self.layer:training() return predictionTruths -end \ No newline at end of file +end diff --git a/scripts/trainSynthetic.lua b/scripts/trainSynthetic.lua index c682354..7b7d732 100644 --- a/scripts/trainSynthetic.lua +++ b/scripts/trainSynthetic.lua @@ -24,11 +24,12 @@ function run() n_hidden = n_hidden, n_questions = data.n_questions, max_grad = 100, - max_steps = data.n_questions, + maxSteps = data.n_questions, --modelDir = outputRoot .. '/models/result_c5_v0_98' } local name = "result_c" .. CONCEPT_NUM .. "_v" .. VERSION + lfs.mkdir(paths.dirname(outputRoot)) lfs.mkdir(outputRoot) lfs.mkdir(outputRoot .. "models") @@ -72,7 +73,7 @@ function trainMiniBatch(rnn, data, init_rate, decay_rate, mini_batch_size, blob_ miniErr = miniErr + err miniTests = miniTests + tests if done % mini_batch_size == 0 then - rnn:update(rate) + rnn:update(nil, rate) rnn:zeroGrad() print(i/#miniBatches, sumErr/numTests) miniErr = 0 @@ -147,4 +148,4 @@ function semiSortedMiniBatches(dataset, mini_batch_size, trimToBatchSize) return shuffledBatches end -run() \ No newline at end of file +run()