带 ILU 预调节器的一般最小 RESidual (GMRES)
General Minimum RESidual (GMRES) with ILU preconditioner
我正在尝试在我编写的这段 GMRES 代码中实现 ILU 预调节器(为了求解线性系统 Ax = b。我正在尝试使用一个简单的 25x25 维三对角 SPD 矩阵。如您所见我正在用 spilu 方法计算预条件子。代码是 运行 没有错误,但解决方案显然是错误的,因为在代码末尾,我打印了 b 的范数和乘积的范数A*x. 他们几乎不一样..
代码 运行 在没有预处理器的情况下很好,并且对同一矩阵进行 13 次迭代收敛。
This is the code I followed
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
'Size controller'
matrixSize =25
'Building a tri-diagonal matrix'
def Atridiag(val_0, val_sup, val_inf, mSize):
cen = np.ones((1, mSize))*val_0
sup = np.ones((1, mSize-1))*val_sup
inf = np.ones((1, mSize-1))*val_inf
diag_cen = np.diagflat(cen, 0)
diag_sup = np.diagflat(sup, 1)
diag_inf = np.diagflat(inf, -1)
return diag_cen + diag_sup + diag_inf
A = Atridiag(2, -1, -1, matrixSize)
A = sp.sparse.csc_matrix (A)
'Plot matrix sparsity'
plt.clf()
plt.spy(A, marker ='.', markersize=2)
plt.show()
'random b and x0 vectors'
b = np.matrix(np.ones((matrixSize, 1)))
x = np.matrix(np.ones((matrixSize, 1)))
'Incomplete LU'
M = sp.sparse.linalg.dsolve.spilu(A)
M1 = lambda x: M.solve(x)
M2=sp.sparse.linalg.LinearOperator((matrixSize,matrixSize),M1)
'Initial Data'
nmax_iter = 30
rstart = 2
tol = 1e-7
e = np.zeros((nmax_iter + 1, 1))
rr = 1
'Starting GMRES'
for rs in range (0, rstart+1):
'first check on residual'
if rr < tol :
break
else:
r0 = (b - A.dot(x))
betha = np.linalg.norm(r0)
e[0] = betha
H = np.zeros((nmax_iter + 1, nmax_iter))
V = np.zeros((matrixSize, nmax_iter+1))
V[:, 0:1] = r0/betha
for k in range (1, nmax_iter+1):
'Appling the Preconditioner'
t = A.dot(V[:, k-1])
V[:, k] = M2.matvec(t)
'Ortogonalizzazione GS'
for j in range (k):
H[j, k-1] = np.dot(V[:, k].T, V[:, j])
V[:, k] = V[:, k] - (np.dot(H[j, k-1], V[:, j]))
H[k, k-1] = np.linalg.norm(V[:, k])
V[:, k] = V[:, k] / H[k, k-1]
'QR Decomposition'
n=k
Q = np.zeros((n+1, n))
R = np.zeros((n, n))
R[0, 0] = np.linalg.norm(H[0:n+2, 0])
Q[:, 0] = H[0:n+1, 0] / R[0,0]
for j in range (0, n+1):
t = H[0:n+1, j-1]
for i in range (0, j-1):
R[i, j-1] = np.dot(Q[:, i], t)
t = t - np.dot(R[i, j-1], Q[:, i])
R[j-1, j-1] = np.linalg.norm(t)
Q[:, j-1] = t / R[j-1, j-1]
g = np.dot(Q.T, e[0:k+1])
Z = np.dot(np.linalg.inv(R), g)
Res = e[0:n] - np.dot(H[0:n, 0:n], Z[0:n])
rr = np.linalg.norm(Res)
'second check on residual'
if rr < tol:
break
'Updating the solution'
x = x + np.dot(V[:, 0:k], Z)
print(sp.linalg.norm(b))
print(sp.linalg.norm(np.dot(A.todense(),x)))
真的希望有人能解决!!
也许为时已晚,但供将来参考:
你更新x的时候忘记乘以conditioner了:
x = x + M2.dot(np.dot(V[:, 0:k], Z) # M2.matvec() works the same
见here
通过该修复,算法在 1 次迭代中收敛。
其他评论:
- 你可以直接做:
M2 = sp.sparse.linalg.LinearOperator((matrixSize,matrixSize),M.solve)
- 最后,比较
Ax
和b
,最好打印差异(残差),因为你会得到更精确的结果:print(sp.linalg.norm(b - np.dot(A.todense(),x)))
我正在尝试在我编写的这段 GMRES 代码中实现 ILU 预调节器(为了求解线性系统 Ax = b。我正在尝试使用一个简单的 25x25 维三对角 SPD 矩阵。如您所见我正在用 spilu 方法计算预条件子。代码是 运行 没有错误,但解决方案显然是错误的,因为在代码末尾,我打印了 b 的范数和乘积的范数A*x. 他们几乎不一样.. 代码 运行 在没有预处理器的情况下很好,并且对同一矩阵进行 13 次迭代收敛。
This is the code I followed
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
'Size controller'
matrixSize =25
'Building a tri-diagonal matrix'
def Atridiag(val_0, val_sup, val_inf, mSize):
cen = np.ones((1, mSize))*val_0
sup = np.ones((1, mSize-1))*val_sup
inf = np.ones((1, mSize-1))*val_inf
diag_cen = np.diagflat(cen, 0)
diag_sup = np.diagflat(sup, 1)
diag_inf = np.diagflat(inf, -1)
return diag_cen + diag_sup + diag_inf
A = Atridiag(2, -1, -1, matrixSize)
A = sp.sparse.csc_matrix (A)
'Plot matrix sparsity'
plt.clf()
plt.spy(A, marker ='.', markersize=2)
plt.show()
'random b and x0 vectors'
b = np.matrix(np.ones((matrixSize, 1)))
x = np.matrix(np.ones((matrixSize, 1)))
'Incomplete LU'
M = sp.sparse.linalg.dsolve.spilu(A)
M1 = lambda x: M.solve(x)
M2=sp.sparse.linalg.LinearOperator((matrixSize,matrixSize),M1)
'Initial Data'
nmax_iter = 30
rstart = 2
tol = 1e-7
e = np.zeros((nmax_iter + 1, 1))
rr = 1
'Starting GMRES'
for rs in range (0, rstart+1):
'first check on residual'
if rr < tol :
break
else:
r0 = (b - A.dot(x))
betha = np.linalg.norm(r0)
e[0] = betha
H = np.zeros((nmax_iter + 1, nmax_iter))
V = np.zeros((matrixSize, nmax_iter+1))
V[:, 0:1] = r0/betha
for k in range (1, nmax_iter+1):
'Appling the Preconditioner'
t = A.dot(V[:, k-1])
V[:, k] = M2.matvec(t)
'Ortogonalizzazione GS'
for j in range (k):
H[j, k-1] = np.dot(V[:, k].T, V[:, j])
V[:, k] = V[:, k] - (np.dot(H[j, k-1], V[:, j]))
H[k, k-1] = np.linalg.norm(V[:, k])
V[:, k] = V[:, k] / H[k, k-1]
'QR Decomposition'
n=k
Q = np.zeros((n+1, n))
R = np.zeros((n, n))
R[0, 0] = np.linalg.norm(H[0:n+2, 0])
Q[:, 0] = H[0:n+1, 0] / R[0,0]
for j in range (0, n+1):
t = H[0:n+1, j-1]
for i in range (0, j-1):
R[i, j-1] = np.dot(Q[:, i], t)
t = t - np.dot(R[i, j-1], Q[:, i])
R[j-1, j-1] = np.linalg.norm(t)
Q[:, j-1] = t / R[j-1, j-1]
g = np.dot(Q.T, e[0:k+1])
Z = np.dot(np.linalg.inv(R), g)
Res = e[0:n] - np.dot(H[0:n, 0:n], Z[0:n])
rr = np.linalg.norm(Res)
'second check on residual'
if rr < tol:
break
'Updating the solution'
x = x + np.dot(V[:, 0:k], Z)
print(sp.linalg.norm(b))
print(sp.linalg.norm(np.dot(A.todense(),x)))
真的希望有人能解决!!
也许为时已晚,但供将来参考:
你更新x的时候忘记乘以conditioner了:
x = x + M2.dot(np.dot(V[:, 0:k], Z) # M2.matvec() works the same
见here
通过该修复,算法在 1 次迭代中收敛。
其他评论:
- 你可以直接做:
M2 = sp.sparse.linalg.LinearOperator((matrixSize,matrixSize),M.solve)
- 最后,比较
Ax
和b
,最好打印差异(残差),因为你会得到更精确的结果:print(sp.linalg.norm(b - np.dot(A.todense(),x)))