从 sae.dnn (deepnet) 转换为 mx.mlp (mxnet) 错误
Translate from sae.dnn (deepnet) to mx.mlp (mxnet) error
我正在尝试将代码从 deepnet 转换为 mxnet,但我不确定我做错了什么。我收到一条错误消息:
"Error in nn$W[[i -1]] %*% t(post)".
requires numeric/complex matrix/vector arguments
Calls: neural.predict -> nn.predict -> t
使用 deepnet 的代码(由 Johann C. Lotter 编写)是:
library('deepnet', quietly = T)
library('caret', quietly = T)
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- sae.dnn.train(X,Y,
hidden = c(30,30,30),
activationfun = "tanh",
learningrate = 0.5,
momentum = 0.5,
learningrate_scale = 1.0,
output = "sigm",
sae_output = "linear",
numepochs = 100,
batchsize = 100,
hidden_dropout = 0,
visible_dropout = 0)
}
neural.predict = function(model,X)
{
if(is.vector(X)) X <- t(X)
return(nn.predict(Models[[model]],X))
}
neural.save = function(name)
{
save(Models,file=name)
}
neural.init = function()
{
set.seed(365)
Models <<- vector("list")
}
对于 mxnet 翻译,我将神经网络更改为:
library('mxnet', quietly = T)
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- mx.mlp(X,Y,
hidden_node = c(30,30,30),
activation = "relu",
momentum = 0.9,
learning.rate = 0.07,
out_activation = "softmax",
num_round = 100,
out_node = 2,
array.batch.size = 100)
}
我不明白我做错了什么..
请在下面找到工作代码。如果出于某种原因它在您的 machine 上不起作用,请检查您拥有的 mxnet 版本。我 运行 在 mac 上使用 mxnet 版本 0.10.1。
既然你告诉你要复制代码尽可能接近示例,我已经将属性的值更改为初始值。如果需要,请随意更改它们。例如,0.5 的动量似乎太小了 - 通常使用 0.9 或更高的值。而learning rate的值0.5太大了,一般learning rate不会高于0.1。
library('mxnet')
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- mx.mlp(X,Y,
hidden_node = c(30,30,30),
activation = "tanh",
momentum = 0.5,
learning.rate = 0.5,
out_activation = "softmax",
num.round = 100,
out_node = 2,
array.batch.size = 100,
dropout = 0,
array.layout = "rowmajor")
}
neural.predict = function(model,X)
{
if(is.vector(X)) X <- t(X)
return(predict(Models[[model]], X, array.layout = "rowmajor"))
}
neural.save = function(name)
{
save(Models,file=name)
}
neural.init = function()
{
set.seed(365)
Models <<- vector("list")
}
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
Var3 <- sample(c(0,1), replace=T, size=100)
training.data <- matrix(c(Var1, Var2, Var3), nrow = 100, ncol = 3)
Var4 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var5 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
test.data <- matrix(c(Var4, Var5), nrow = 100, ncol = 2)
neural.init()
neural.train("mx_mlp_model", training.data)
neural.predict("mx_mlp_model", test.data)
执行此操作后,我得到以下输出:
> neural.predict("mx_mlp_model", test.data)
[,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,21] [,22] [,23] [,24] [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36] [,37] [,38] [,39]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48] [,49] [,50] [,51] [,52] [,53] [,54] [,55] [,56] [,57] [,58]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,59] [,60] [,61] [,62] [,63] [,64] [,65] [,66] [,67] [,68] [,69] [,70] [,71] [,72] [,73] [,74] [,75] [,76] [,77]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,78] [,79] [,80] [,81] [,82] [,83] [,84] [,85] [,86] [,87] [,88] [,89] [,90] [,91] [,92] [,93] [,94] [,95] [,96]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,97] [,98] [,99] [,100]
[1,] 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53
希望对您有所帮助。
我正在尝试将代码从 deepnet 转换为 mxnet,但我不确定我做错了什么。我收到一条错误消息:
"Error in nn$W[[i -1]] %*% t(post)".
requires numeric/complex matrix/vector arguments
Calls: neural.predict -> nn.predict -> t
使用 deepnet 的代码(由 Johann C. Lotter 编写)是:
library('deepnet', quietly = T)
library('caret', quietly = T)
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- sae.dnn.train(X,Y,
hidden = c(30,30,30),
activationfun = "tanh",
learningrate = 0.5,
momentum = 0.5,
learningrate_scale = 1.0,
output = "sigm",
sae_output = "linear",
numepochs = 100,
batchsize = 100,
hidden_dropout = 0,
visible_dropout = 0)
}
neural.predict = function(model,X)
{
if(is.vector(X)) X <- t(X)
return(nn.predict(Models[[model]],X))
}
neural.save = function(name)
{
save(Models,file=name)
}
neural.init = function()
{
set.seed(365)
Models <<- vector("list")
}
对于 mxnet 翻译,我将神经网络更改为:
library('mxnet', quietly = T)
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- mx.mlp(X,Y,
hidden_node = c(30,30,30),
activation = "relu",
momentum = 0.9,
learning.rate = 0.07,
out_activation = "softmax",
num_round = 100,
out_node = 2,
array.batch.size = 100)
}
我不明白我做错了什么..
请在下面找到工作代码。如果出于某种原因它在您的 machine 上不起作用,请检查您拥有的 mxnet 版本。我 运行 在 mac 上使用 mxnet 版本 0.10.1。
既然你告诉你要复制代码尽可能接近示例,我已经将属性的值更改为初始值。如果需要,请随意更改它们。例如,0.5 的动量似乎太小了 - 通常使用 0.9 或更高的值。而learning rate的值0.5太大了,一般learning rate不会高于0.1。
library('mxnet')
neural.train = function(model,XY)
{
XY <- as.matrix(XY)
X <- XY[,-ncol(XY)]
Y <- XY[,ncol(XY)]
Y <- ifelse(Y > 0,1,0)
Models[[model]] <<- mx.mlp(X,Y,
hidden_node = c(30,30,30),
activation = "tanh",
momentum = 0.5,
learning.rate = 0.5,
out_activation = "softmax",
num.round = 100,
out_node = 2,
array.batch.size = 100,
dropout = 0,
array.layout = "rowmajor")
}
neural.predict = function(model,X)
{
if(is.vector(X)) X <- t(X)
return(predict(Models[[model]], X, array.layout = "rowmajor"))
}
neural.save = function(name)
{
save(Models,file=name)
}
neural.init = function()
{
set.seed(365)
Models <<- vector("list")
}
Var1 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var2 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
Var3 <- sample(c(0,1), replace=T, size=100)
training.data <- matrix(c(Var1, Var2, Var3), nrow = 100, ncol = 3)
Var4 <- c(rnorm(50, 1, 0.5), rnorm(50, -0.6, 0.2))
Var5 <- c(rnorm(50, -0.8, 0.2), rnorm(50, 2, 1))
test.data <- matrix(c(Var4, Var5), nrow = 100, ncol = 2)
neural.init()
neural.train("mx_mlp_model", training.data)
neural.predict("mx_mlp_model", test.data)
执行此操作后,我得到以下输出:
> neural.predict("mx_mlp_model", test.data)
[,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,21] [,22] [,23] [,24] [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36] [,37] [,38] [,39]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48] [,49] [,50] [,51] [,52] [,53] [,54] [,55] [,56] [,57] [,58]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,59] [,60] [,61] [,62] [,63] [,64] [,65] [,66] [,67] [,68] [,69] [,70] [,71] [,72] [,73] [,74] [,75] [,76] [,77]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,78] [,79] [,80] [,81] [,82] [,83] [,84] [,85] [,86] [,87] [,88] [,89] [,90] [,91] [,92] [,93] [,94] [,95] [,96]
[1,] 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53 0.53
[,97] [,98] [,99] [,100]
[1,] 0.47 0.47 0.47 0.47
[2,] 0.53 0.53 0.53 0.53
希望对您有所帮助。