如何通过 equality_constraint 在 Julia 中使用 NLopt
How to use NLopt in Julia with equality_constraint
我正在努力修改 Julia-specific tutorial on NLopt 以满足我的需要,如果有人能解释我做错了什么或未能理解,我将不胜感激。
我希望:
- 最小化某些objective函数的值
myfunc(x)
;其中
x
必须位于单位超立方体中(下例中只有 2 个维度);和
x
的元素之和必须为1
下面我把 myfunc
变得非常简单——从 x
到 [2.0, 0.0]
的距离的平方,所以问题的明显正确解是 x = [1.0,0.0]
myfunc(x) = 1.0
。我还添加了 println
语句,以便我可以看到求解器在做什么。
testNLopt = function()
origin = [2.0,0.0]
n = length(origin)
#Returns square of the distance between x and "origin", and amends grad in-place
myfunc = function(x::Vector{Float64}, grad::Vector{Float64})
if length(grad) > 0
grad = 2 .* (x .- origin)
end
xOut = sum((x .- origin).^2)
println("myfunc: x = $x; myfunc(x) = $xOut; ∂myfunc/∂x = $grad")
return(xOut)
end
#Constrain the sums of the x's to be 1...
sumconstraint =function(x::Vector{Float64}, grad::Vector{Float64})
if length(grad) > 0
grad = ones(length(x))
end
xOut = sum(x) - 1
println("sumconstraint: x = $x; constraint = $xOut; ∂constraint/∂x = $grad")
return(xOut)
end
opt = Opt(:LD_SLSQP,n)
lower_bounds!(opt, zeros(n))
upper_bounds!(opt,ones(n))
equality_constraint!(opt,sumconstraint,0)
#xtol_rel!(opt,1e-4)
xtol_abs!(opt,1e-8)
min_objective!(opt, myfunc)
maxeval!(opt,20)#to ensure code always terminates, remove this line when code working correctly?
optimize(opt,ones(n)./n)
end
我读过 and documentation here and here, but still can't figure out what's wrong. Worryingly, each time I run testNLopt
I see different behaviour, as in this screenshot,包括求解器多次无用地计算 myfunc([NaN,NaN])
的情况。
我正在努力修改 Julia-specific tutorial on NLopt 以满足我的需要,如果有人能解释我做错了什么或未能理解,我将不胜感激。
我希望:
- 最小化某些objective函数的值
myfunc(x)
;其中 x
必须位于单位超立方体中(下例中只有 2 个维度);和x
的元素之和必须为1
下面我把 myfunc
变得非常简单——从 x
到 [2.0, 0.0]
的距离的平方,所以问题的明显正确解是 x = [1.0,0.0]
myfunc(x) = 1.0
。我还添加了 println
语句,以便我可以看到求解器在做什么。
testNLopt = function()
origin = [2.0,0.0]
n = length(origin)
#Returns square of the distance between x and "origin", and amends grad in-place
myfunc = function(x::Vector{Float64}, grad::Vector{Float64})
if length(grad) > 0
grad = 2 .* (x .- origin)
end
xOut = sum((x .- origin).^2)
println("myfunc: x = $x; myfunc(x) = $xOut; ∂myfunc/∂x = $grad")
return(xOut)
end
#Constrain the sums of the x's to be 1...
sumconstraint =function(x::Vector{Float64}, grad::Vector{Float64})
if length(grad) > 0
grad = ones(length(x))
end
xOut = sum(x) - 1
println("sumconstraint: x = $x; constraint = $xOut; ∂constraint/∂x = $grad")
return(xOut)
end
opt = Opt(:LD_SLSQP,n)
lower_bounds!(opt, zeros(n))
upper_bounds!(opt,ones(n))
equality_constraint!(opt,sumconstraint,0)
#xtol_rel!(opt,1e-4)
xtol_abs!(opt,1e-8)
min_objective!(opt, myfunc)
maxeval!(opt,20)#to ensure code always terminates, remove this line when code working correctly?
optimize(opt,ones(n)./n)
end
我读过 testNLopt
I see different behaviour, as in this screenshot,包括求解器多次无用地计算 myfunc([NaN,NaN])
的情况。