用opencv找到Camera Center
find Camera Center with opencv
我正在尝试从经过校准的相机获取相机中心。
我有 4 个测量的 3D objectPoints 及其图像,并试图从投影矩阵中获取中心(平移),但没有可接受的结果。
关于我对 opencv 期望的准确性有什么建议吗?我应该增加点数吗?
这些是我得到的结果:
TrueCenter in mm for XYZ
[[4680.]
[5180.]
[1621.]]
Center
[[-2508.791]
[ 6015.98 ]
[-1096.674]]
import numpy as np
import cv2
from scipy.linalg import inv
TrueCameraCenter = np.array([4680., 5180, 1621]).reshape(-1,1)
objectPoints = np.array(
[[ 0., 5783., 1970.],
[ 0., 5750., 1261.],
[ 0., 6412., 1968.],
[1017., 9809., 1547.]], dtype=np.float32)
imagePoints=np.array(
[[ 833.75, 1097.25],
[ 798. , 1592.25],
[1323. , 1133.5 ],
[3425.5 , 1495.5 ]], dtype=np.float32)
cameraMatrix= np.array(
[[3115.104, -7.3 , 2027.605],
[ 0. , 3077.283, 1504.034],
[ 0. , 0. , 1. ]])
retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints,cameraMatrix,None, None, None, False, cv2.SOLVEPNP_ITERATIVE)
R,jac= cv2.Rodrigues(rvec)
imagePoints2,jac= cv2.projectPoints(objectPoints, rvec, tvec, cameraMatrix,None)
print('TrueCenter in mm for XYZ\n', TrueCameraCenter, '\nCenter\n', -inv(R).dot(tvec))
我发现 Bill Wolfe 关于位置确定问题的有趣演示。 Perspective View Of 3 Points
因此,使用 4 个非共面点(非 3 个共线)改进了解决方案。
import numpy as np
import cv2
from scipy.linalg import inv,norm
TrueCameraCenter = np.array([4680., 5180, 1621])
objectPoints = np.array(
[[ 0., 5783., 1970.],
[ 0., 5750., 1261.],
[ 0., 6412., 1968.],
[ 0., 6449., 1288.]])
imagePoints=np.array(
[[ 497.5 , 674.75],
[ 523.75, 1272.5 ],
[1087.75, 696.75],
[1120. , 1212.5 ]])
cameraMatrix= np.array(
[[3189.096, 0. , 2064.431],
[ 0. , 3177.615, 1482.859],
[ 0. , 0. , 1. ]])
dist_coefs=np.array([[ 0.232, -1.215, -0.002, 0.011, 1.268]])
retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints,cameraMatrix,dist_coefs,
None, None, False, cv2.SOLVEPNP_ITERATIVE)
R,_= cv2.Rodrigues(rvec)
C=-inv(R).dot(tvec).flatten()
print('TrueCenter in mm for XYZ\n', TrueCameraCenter, '\nCenter\n',C.astype(int) )
print('Distance:', int(norm(TrueCameraCenter-C)))
我正在尝试从经过校准的相机获取相机中心。 我有 4 个测量的 3D objectPoints 及其图像,并试图从投影矩阵中获取中心(平移),但没有可接受的结果。 关于我对 opencv 期望的准确性有什么建议吗?我应该增加点数吗?
这些是我得到的结果:
TrueCenter in mm for XYZ
[[4680.]
[5180.]
[1621.]]
Center
[[-2508.791]
[ 6015.98 ]
[-1096.674]]
import numpy as np
import cv2
from scipy.linalg import inv
TrueCameraCenter = np.array([4680., 5180, 1621]).reshape(-1,1)
objectPoints = np.array(
[[ 0., 5783., 1970.],
[ 0., 5750., 1261.],
[ 0., 6412., 1968.],
[1017., 9809., 1547.]], dtype=np.float32)
imagePoints=np.array(
[[ 833.75, 1097.25],
[ 798. , 1592.25],
[1323. , 1133.5 ],
[3425.5 , 1495.5 ]], dtype=np.float32)
cameraMatrix= np.array(
[[3115.104, -7.3 , 2027.605],
[ 0. , 3077.283, 1504.034],
[ 0. , 0. , 1. ]])
retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints,cameraMatrix,None, None, None, False, cv2.SOLVEPNP_ITERATIVE)
R,jac= cv2.Rodrigues(rvec)
imagePoints2,jac= cv2.projectPoints(objectPoints, rvec, tvec, cameraMatrix,None)
print('TrueCenter in mm for XYZ\n', TrueCameraCenter, '\nCenter\n', -inv(R).dot(tvec))
我发现 Bill Wolfe 关于位置确定问题的有趣演示。 Perspective View Of 3 Points
因此,使用 4 个非共面点(非 3 个共线)改进了解决方案。
import numpy as np
import cv2
from scipy.linalg import inv,norm
TrueCameraCenter = np.array([4680., 5180, 1621])
objectPoints = np.array(
[[ 0., 5783., 1970.],
[ 0., 5750., 1261.],
[ 0., 6412., 1968.],
[ 0., 6449., 1288.]])
imagePoints=np.array(
[[ 497.5 , 674.75],
[ 523.75, 1272.5 ],
[1087.75, 696.75],
[1120. , 1212.5 ]])
cameraMatrix= np.array(
[[3189.096, 0. , 2064.431],
[ 0. , 3177.615, 1482.859],
[ 0. , 0. , 1. ]])
dist_coefs=np.array([[ 0.232, -1.215, -0.002, 0.011, 1.268]])
retval, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints,cameraMatrix,dist_coefs,
None, None, False, cv2.SOLVEPNP_ITERATIVE)
R,_= cv2.Rodrigues(rvec)
C=-inv(R).dot(tvec).flatten()
print('TrueCenter in mm for XYZ\n', TrueCameraCenter, '\nCenter\n',C.astype(int) )
print('Distance:', int(norm(TrueCameraCenter-C)))