-
Notifications
You must be signed in to change notification settings - Fork 28
Description
Hi,
i have a CNN model in torch, but I am trying to transfer it to python using lutorpy library this is the original torch model
unpack = table.unpack
require 'nn'
require("component")
require('pl.stringx').import()
local file = require('pl.file')
local List = require('pl.List')
-- Parse nonlinearity
local nltbl = {
['tanh'] = nn.Tanh,
['relu'] = nn.ReLU,
['prelu'] = nn.PReLU,
}
local function create_common_model(seq)
local model = seq
local aadict = file.read(path.join(hashdir, "aa1.lst"))
local aaxsize = #(aadict:splitlines())
model:add(nn.LookupTable(aaxsize, embedsize))
model:add(nn.Dropout(0.1))
return model
end
-- Convolution Models
local function create_conv_base_model(seq)
local model = seq
model:add(nn.UpDim())
local padding = math.floor(kernelsize/2)
if padding > 0 then
local concat = nn.ConcatTable()
for j = 0,poolingsize-1 do
--add zeros to (left,right,top,bottum)
concat:add(nn.SpatialZeroPadding(0,0,padding-j,kernelsize-padding-1+j+poolingsize))
end
model:add(concat)
model:add(nn.JoinTable(1))
end
-- nn.TemporalConvolution(inputFrameSize, outputFrameSize, kernal Widith, [dW]=1)
model:add(nn.TemporalConvolution(embedsize, hiddenunit, kernelsize))
model:add(nonlinearity())
if poolingsize > 1 then
model:add(nn.TemporalMaxPooling(poolingsize))
end
model:add(nn.Dropout(0.3))
-- end
model:add(nn.TemporalZip())
return model
end
local function create_conv_model()
local model = nn.Sequential()
create_common_model(model)
create_conv_base_model(model)
model:add(nn.TemporalConvolution(hiddenunit, nclass, 1))
return model
end
and i transform it to this
`import lutorpy as lua
require("nn")
#require("component")
#require('pl.stringx').import()
#local file = require('pl.file')
#local List = require('pl.List')
def reportAcc(acc,score,bestaccfile):
print('Hyperas:valid accuracy:', acc,'valid loss',score)
if not exists(bestaccfile):
current = float("inf")
else:
with open(bestaccfile) as f:
current = float(f.readline().strip())
if score < current:
with open(bestaccfile,'w') as f:
f.write('%f\n' % score)
f.write('%f\n' % acc)
def model(X_train, Y_train, X_test, Y_test):
W_maxnorm = 3
DROPOUT = {{choice([0.3,0.1])}}
poolingsize = 2
embedsize = 15
kernalsize = 5
hiddenunit = 1024
aaxsize=0
aadict = X_train
print(aadict)
aaxsize += len(aadict)
#model = Sequential()
model = nn.Sequential()
#model = nn.Sequential()
model._add(nn.LookupTable(aaxsize, embedsize))
model._add(nn.Dropout(0.1))
#Convolution Models
model._add(nn.UpDim())
padding=int(kernalsize/2)
if padding>0:
concat = nn.ConcatTable()
for j in poolingsize-1 :
concat._add(nn.SpatialZeroPadding(0,0,padding-j,kernalsize-padding-1+j+poolingsize))
model._add(concat)
model._add(nn.JoinTable(1))
model._add(nn.TemporalConvolution(embedsize, hiddenunit, kernalsize))
model._add(nonlinearity())
if poolingsize > 1 :
model._add(nn.TemporalMaxPooling(poolingsize))
model._add(nn.Dropout(0.3))
model._add(nn.TemporalZip())
#create_common_model(model)
#create_conv_base_model(model)
model._add(nn.TemporalConvolution(hiddenunit, nclass, 1))
myoptimizer = RMSprop(lr={{choice([0.01,0.001,0.0001])}}, rho=0.9, epsilon=1e-06)
mylossfunc = 'binary_crossentropy'
model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=100, nb_epoch=5,validation_split=0.1)
score, acc = model.evaluate(X_test,Y_test)
model_arch = 'MODEL_ARCH'
bestaccfile = join('TOPDIR',model_arch,model_arch+'_hyperbestacc')
reportAcc(acc,score,bestaccfile)
return {'loss': score, 'status': STATUS_OK,'model':(model.to_json(),myoptimizer,mylossfunc)}
`
can i mix some of torch model and python fit to gether???