缩放抛物面和导数检查
Scaled paraboloid and derivatives checking
我对 check_partial_derivatives()
方法应用于我上一个问题中显示的问题的输出感到惊讶:。当我添加对该方法的调用时:
from __future__ import print_function
import sys
from openmdao.api import IndepVarComp, Component, Problem, Group, ScipyOptimizer
class Paraboloid(Component):
def __init__(self):
super(Paraboloid, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y', val=0.0)
self.add_output('f_xy', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
x = params['x']
y = params['y']
#unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
unknowns['f_xy'] = (1000.*x-3.)**2 + (1000.*x)*(0.01*y) + (0.01*y+4.)**2 - 3.
def linearize(self, params, unknowns, resids):
""" Jacobian for our paraboloid."""
x = params['x']
y = params['y']
J = {}
#J['f_xy', 'x'] = 2.0*x - 6.0 + y
#J['f_xy', 'y'] = 2.0*y + 8.0 + x
J['f_xy', 'x'] = 2000000.0*x - 6000.0 + 10.0*y
J['f_xy', 'y'] = 0.0002*y + 0.08 + 10.0*x
return J
if __name__ == "__main__":
top = Problem()
root = top.root = Group()
#root.fd_options['force_fd'] = True
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', Paraboloid())
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('p1.x', lower=-1000, upper=1000, scaler=1000.)
top.driver.add_desvar('p2.y', lower=-1000, upper=1000, scaler=0.001)
top.driver.add_objective('p.f_xy')
top.setup()
top.check_partial_derivatives() # added line
top.run()
print('\n')
print('Minimum of %f found at (%f, %f)' % (top['p.f_xy'], top['p.x'], top['p.y']))
我得到以下输出:
Partial Derivatives Check
----------------
Component: 'p'
----------------
p: 'f_xy' wrt 'x'
Forward Magnitude : 6.000000e+03
Reverse Magnitude : 6.000000e+03
Fd Magnitude : 2.199400e+07
Absolute Error (Jfor - Jfd) : 2.200000e+07
Absolute Error (Jrev - Jfd) : 2.200000e+07
Absolute Error (Jfor - Jrev): 0.000000e+00
Relative Error (Jfor - Jfd) : 1.000273e+00
Relative Error (Jrev - Jfd) : 1.000273e+00
Relative Error (Jfor - Jrev): 0.000000e+00
Raw Forward Derivative (Jfor)
[[-6000.]]
Raw Reverse Derivative (Jrev)
[[-6000.]]
Raw FD Derivative (Jfor)
[[ 21994001.]]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
p: 'f_xy' wrt 'y'
Forward Magnitude : 8.000000e-02
Reverse Magnitude : 8.000000e-02
Fd Magnitude : 2.200000e+07
Absolute Error (Jfor - Jfd) : 2.200000e+07
Absolute Error (Jrev - Jfd) : 2.200000e+07
Absolute Error (Jfor - Jrev): 0.000000e+00
Relative Error (Jfor - Jfd) : 1.000000e+00
Relative Error (Jrev - Jfd) : 1.000000e+00
Relative Error (Jfor - Jrev): 0.000000e+00
Raw Forward Derivative (Jfor)
[[ 0.08]]
Raw Reverse Derivative (Jrev)
[[ 0.08]]
Raw FD Derivative (Jfor)
[[ 22000000.08]]
Optimization terminated successfully. (Exit mode 0)
Current function value: [-27.33333333]
Iterations: 4
Function evaluations: 6
Gradient evaluations: 4
Optimization Complete
-----------------------------------
Minimum of -27.333333 found at (0.006667, -733.333333)
优化是正确的(即几乎可以肯定地证明导数是正确的),但是 check_partial_derivatives
输出没有显示 fd 和 forward/reverse 方法之间一致的结果。
参考
因此,您遇到了之前出现的限制,即您无法计算关于设计点的导数,直到您 运行 您的模型在那个点。有限差分结果是错误的,因为模型从来没有运行。要验证您的部分,您需要将 check_partial_derivatives
移动到 运行 之后。此外,我总是在检查导数时注释掉优化器,以便检查关于初始点的导数。当我做这两件事时,我得到了很好的结果(见下面的代码)。
top = Problem()
root = top.root = Group()
#root.fd_options['force_fd'] = True
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', Paraboloid())
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
#top.driver = ScipyOptimizer()
#top.driver.options['optimizer'] = 'SLSQP'
#top.driver.add_desvar('p1.x', lower=-1000, upper=1000, scaler=1000.)
#top.driver.add_desvar('p2.y', lower=-1000, upper=1000, scaler=0.001)
#top.driver.add_objective('p.f_xy')
top.setup()
top.run()
top.check_partial_derivatives() # added line
print('\n')
print('Minimum of %f found at (%f, %f)' % (top['p.f_xy'], top['p.x'], top['p.y']))
我们的 github 上有一项功能请求,要求能够 运行 check_partial_derivatives 无需先 运行 模型。我认为我们可以通过只告诉 root solve_nonlinear,忽略驱动程序来做到这一点,所以它可能会在某个时候添加。
我对 check_partial_derivatives()
方法应用于我上一个问题中显示的问题的输出感到惊讶:
from __future__ import print_function import sys from openmdao.api import IndepVarComp, Component, Problem, Group, ScipyOptimizer class Paraboloid(Component): def __init__(self): super(Paraboloid, self).__init__() self.add_param('x', val=0.0) self.add_param('y', val=0.0) self.add_output('f_xy', val=0.0) def solve_nonlinear(self, params, unknowns, resids): x = params['x'] y = params['y'] #unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 unknowns['f_xy'] = (1000.*x-3.)**2 + (1000.*x)*(0.01*y) + (0.01*y+4.)**2 - 3. def linearize(self, params, unknowns, resids): """ Jacobian for our paraboloid.""" x = params['x'] y = params['y'] J = {} #J['f_xy', 'x'] = 2.0*x - 6.0 + y #J['f_xy', 'y'] = 2.0*y + 8.0 + x J['f_xy', 'x'] = 2000000.0*x - 6000.0 + 10.0*y J['f_xy', 'y'] = 0.0002*y + 0.08 + 10.0*x return J if __name__ == "__main__": top = Problem() root = top.root = Group() #root.fd_options['force_fd'] = True root.add('p1', IndepVarComp('x', 3.0)) root.add('p2', IndepVarComp('y', -4.0)) root.add('p', Paraboloid()) root.connect('p1.x', 'p.x') root.connect('p2.y', 'p.y') top.driver = ScipyOptimizer() top.driver.options['optimizer'] = 'SLSQP' top.driver.add_desvar('p1.x', lower=-1000, upper=1000, scaler=1000.) top.driver.add_desvar('p2.y', lower=-1000, upper=1000, scaler=0.001) top.driver.add_objective('p.f_xy') top.setup() top.check_partial_derivatives() # added line top.run() print('\n') print('Minimum of %f found at (%f, %f)' % (top['p.f_xy'], top['p.x'], top['p.y']))
我得到以下输出:
Partial Derivatives Check ---------------- Component: 'p' ---------------- p: 'f_xy' wrt 'x' Forward Magnitude : 6.000000e+03 Reverse Magnitude : 6.000000e+03 Fd Magnitude : 2.199400e+07 Absolute Error (Jfor - Jfd) : 2.200000e+07 Absolute Error (Jrev - Jfd) : 2.200000e+07 Absolute Error (Jfor - Jrev): 0.000000e+00 Relative Error (Jfor - Jfd) : 1.000273e+00 Relative Error (Jrev - Jfd) : 1.000273e+00 Relative Error (Jfor - Jrev): 0.000000e+00 Raw Forward Derivative (Jfor) [[-6000.]] Raw Reverse Derivative (Jrev) [[-6000.]] Raw FD Derivative (Jfor) [[ 21994001.]] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - p: 'f_xy' wrt 'y' Forward Magnitude : 8.000000e-02 Reverse Magnitude : 8.000000e-02 Fd Magnitude : 2.200000e+07 Absolute Error (Jfor - Jfd) : 2.200000e+07 Absolute Error (Jrev - Jfd) : 2.200000e+07 Absolute Error (Jfor - Jrev): 0.000000e+00 Relative Error (Jfor - Jfd) : 1.000000e+00 Relative Error (Jrev - Jfd) : 1.000000e+00 Relative Error (Jfor - Jrev): 0.000000e+00 Raw Forward Derivative (Jfor) [[ 0.08]] Raw Reverse Derivative (Jrev) [[ 0.08]] Raw FD Derivative (Jfor) [[ 22000000.08]] Optimization terminated successfully. (Exit mode 0) Current function value: [-27.33333333] Iterations: 4 Function evaluations: 6 Gradient evaluations: 4 Optimization Complete ----------------------------------- Minimum of -27.333333 found at (0.006667, -733.333333)
优化是正确的(即几乎可以肯定地证明导数是正确的),但是 check_partial_derivatives
输出没有显示 fd 和 forward/reverse 方法之间一致的结果。
参考
因此,您遇到了之前出现的限制,即您无法计算关于设计点的导数,直到您 运行 您的模型在那个点。有限差分结果是错误的,因为模型从来没有运行。要验证您的部分,您需要将 check_partial_derivatives
移动到 运行 之后。此外,我总是在检查导数时注释掉优化器,以便检查关于初始点的导数。当我做这两件事时,我得到了很好的结果(见下面的代码)。
top = Problem()
root = top.root = Group()
#root.fd_options['force_fd'] = True
root.add('p1', IndepVarComp('x', 3.0))
root.add('p2', IndepVarComp('y', -4.0))
root.add('p', Paraboloid())
root.connect('p1.x', 'p.x')
root.connect('p2.y', 'p.y')
#top.driver = ScipyOptimizer()
#top.driver.options['optimizer'] = 'SLSQP'
#top.driver.add_desvar('p1.x', lower=-1000, upper=1000, scaler=1000.)
#top.driver.add_desvar('p2.y', lower=-1000, upper=1000, scaler=0.001)
#top.driver.add_objective('p.f_xy')
top.setup()
top.run()
top.check_partial_derivatives() # added line
print('\n')
print('Minimum of %f found at (%f, %f)' % (top['p.f_xy'], top['p.x'], top['p.y']))
我们的 github 上有一项功能请求,要求能够 运行 check_partial_derivatives 无需先 运行 模型。我认为我们可以通过只告诉 root solve_nonlinear,忽略驱动程序来做到这一点,所以它可能会在某个时候添加。