透视图像拼接
perspective Image Stitching
我从图像拼接中找到了非常有用的示例,但我的问题是那些类型的图像
这是一个例子
这是另一张图片
当我使用 opencv 拼接器时,结果图像越来越小
像这个
有没有什么方法可以对输入图像应用变换,使它们像这样
这是代码
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/stitching/stitcher.hpp>
#include<vector>
using namespace cv;
using namespace std;
cv::vector<cv::Mat> ImagesList;
string result_name ="/TopViewsHorizantale/1.bmp";
int main()
{
// Load the images
Mat image1= imread("current_00000.bmp" );
Mat image2= imread("current_00001.bmp" );
cv::resize(image1, image1, image2.size());
Mat gray_image1;
Mat gray_image2;
Mat Matrix = Mat(3,3,CV_32FC1);
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
namedWindow("first image",WINDOW_AUTOSIZE);
namedWindow("second image",WINDOW_AUTOSIZE);
imshow("first image",image2);
imshow("second image",image1);
if( !gray_image1.data || !gray_image2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
int N = image1.rows + image2.rows;
int M = image1.cols+image2.cols;
warpPerspective(image1,result,H,cv::Size(N,M));
cv::Mat half(result,cv::Rect(0,0,image2.rows,image2.cols));
result.copyTo(half);
namedWindow("Result",WINDOW_AUTOSIZE);
imshow( "Result", result);
imwrite(result_name, result);
waitKey(0);
return 0;
}
还有一些图片 link :: https://www.dropbox.com/sh/ovzkqomxvzw8rww/AAB2DDCrCF6NlCFre7V1Gb6La?dl=0
太感谢了
拉菲
问题:输出图像太大。
原码:-
int N = image1.rows + image2.rows;
int M = image1.cols+image2.cols;
warpPerspective(image1,result,H,cv::Size(N,M)); // Too big size.
cv::Mat half(result,cv::Rect(0,0,image2.rows,image2.cols));
result.copyTo(half);
namedWindow("Result",WINDOW_AUTOSIZE);
imshow( "Result", result);
生成的结果图像存储的行数与 image1 和 image2 中的行一样多。但是,输出图像应等于 image1 和 image2 的尺寸 - 重叠区域的尺寸。
另一个问题
你为什么要扭曲 image1。计算 H'(H 的逆矩阵)并使用 H' 扭曲 image2。您应该将 image2 注册到 image1 上。
此外,研究 warpPerspective
的工作原理。它找到 image2 将变形到的区域 ROI。接下来,对于 result(say x,y) 的这个 ROI 区域中的每个像素,它在 image2 中找到相应的位置,say (x',y')。注意:(x', y') 可以是实数值,例如 (4.5, 5.4).
某种形式的插值(可能是线性插值)用于查找图像结果中 (x, y) 的像素值。
接下来,如何找到结果矩阵的大小。不要使用 N,M。使用矩阵 H' 和扭曲图像角来找到它们将结束的位置
对于转换矩阵,请参阅此 wiki and http://planning.cs.uiuc.edu/node99.html. Know the difference between rotation, translational, affine and perspective transformation matrix. Then read the opencv docs here。
您还可以阅读我之前的 answer。该答案显示了用于查找作物区域的简单代数。您需要调整两个图像的四个角的代码。请注意,新图像的图像像素也可以转到负像素位置。
示例代码(在java):-
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.Features2d;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
public class Driver {
public static void stitchImages() {
// Read as grayscale
Mat grayImage1 = Imgcodecs.imread("current_00000.bmp", 0);
Mat grayImage2 = Imgcodecs.imread("current_00001.bmp", 0);
if (grayImage1.dataAddr() == 0 || grayImage2.dataAddr() == 0) {
System.out.println("Images read unsuccessful.");
return;
}
// Create transformation matrix
Mat transformMatrix = new Mat(3, 3, CvType.CV_32FC1);
// -- Step 1: Detect the keypoints using AKAZE Detector
int minHessian = 400;
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
FeatureDetector surf = FeatureDetector.create(FeatureDetector.AKAZE);
surf.detect(grayImage1, keypoints1);
surf.detect(grayImage2, keypoints2);
// -- Step 2: Calculate descriptors (feature vectors)
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.AKAZE);
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
extractor.compute(grayImage1, keypoints1, descriptors1);
extractor.compute(grayImage2, keypoints2, descriptors2);
// -- Step 3: Match the keypoints
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
List<DMatch> myList = new LinkedList<>(matches.toList());
// Filter good matches
double min_dist = Double.MAX_VALUE;
Iterator<DMatch> itr = myList.iterator();
while (itr.hasNext()) {
DMatch element = itr.next();
min_dist = Math.min(element.distance, min_dist);
}
LinkedList<Point> img1GoodPointsList = new LinkedList<Point>();
LinkedList<Point> img2GoodPointsList = new LinkedList<Point>();
List<KeyPoint> keypoints1List = keypoints1.toList();
List<KeyPoint> keypoints2List = keypoints2.toList();
itr = myList.iterator();
while (itr.hasNext()) {
DMatch dMatch = itr.next();
if (dMatch.distance >= 5 * min_dist) {
img1GoodPointsList.addLast(keypoints1List.get(dMatch.queryIdx).pt);
img2GoodPointsList.addLast(keypoints2List.get(dMatch.trainIdx).pt);
} else {
itr.remove();
}
}
matches.fromList(myList);
Mat outputMid = new Mat();
System.out.println("best matches size: " + matches.size());
Features2d.drawMatches(grayImage1, keypoints1, grayImage2, keypoints2, matches, outputMid);
Imgcodecs.imwrite("outputMid - A - A.jpg", outputMid);
MatOfPoint2f img1Locations = new MatOfPoint2f();
img1Locations.fromList(img1GoodPointsList);
MatOfPoint2f img2Locations = new MatOfPoint2f();
img2Locations.fromList(img2GoodPointsList);
// Find the Homography Matrix - Note img2Locations is give first to get
// inverse directly.
Mat hg = Calib3d.findHomography(img2Locations, img1Locations, Calib3d.RANSAC, 3);
System.out.println("hg is: " + hg.dump());
// Find the location of two corners to which Image2 will warp.
Size img1Size = grayImage1.size();
Size img2Size = grayImage2.size();
System.out.println("Sizes are: " + img1Size + ", " + img2Size);
// Store location x,y,z for 4 corners
Mat img2Corners = new Mat(3, 4, CvType.CV_64FC1, new Scalar(0));
Mat img2CornersWarped = new Mat(3, 4, CvType.CV_64FC1);
img2Corners.put(0, 0, 0, img2Size.width, 0, img2Size.width); // x
img2Corners.put(1, 0, 0, 0, img2Size.height, img2Size.height); // y
img2Corners.put(2, 0, 1, 1, 1, 1); // z - all 1
System.out.println("Homography is \n" + hg.dump());
System.out.println("Corners matrix is \n" + img2Corners.dump());
Core.gemm(hg, img2Corners, 1, new Mat(), 0, img2CornersWarped);
System.out.println("img2CornersWarped: " + img2CornersWarped.dump());
// Find the new size to use
int minX = 0, minY = 0; // The grayscale1 already has minimum location at 0
int maxX = 1500, maxY = 1500; // The grayscale1 already has maximum location at 1500(possible 1499, but 1 pixel wont effect)
double[] xCoordinates = new double[4];
img2CornersWarped.get(0, 0, xCoordinates);
double[] yCoordinates = new double[4];
img2CornersWarped.get(1, 0, yCoordinates);
for (int c = 0; c < 4; c++) {
minX = Math.min((int)xCoordinates[c], minX);
maxX = Math.max((int)xCoordinates[c], maxX);
minY = Math.min((int)xCoordinates[c], minY);
maxY = Math.max((int)xCoordinates[c], maxY);
}
int rows = (maxX - minX + 1);
int cols = (maxY - minY + 1);
// Warp to product final output
Mat output1 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0));
Mat output2 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0));
Imgproc.warpPerspective(grayImage1, output1, Mat.eye(new Size(3, 3), CvType.CV_32F), new Size(cols, rows));
Imgproc.warpPerspective(grayImage2, output2, hg, new Size(cols, rows));
Mat output = new Mat(new Size(cols, rows), CvType.CV_8U);
Core.addWeighted(output1, 0.5, output2, 0.5, 0, output);
Imgcodecs.imwrite("output.jpg", output);
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
stitchImages();
}
}
更改描述符
从 Surf 移动到 Akaze。我从这里看到了完美的图像配准。
输出图像
此输出使用较少 space 并且描述符的更改显示完美注册。
P.S.: 恕我直言,编码很棒,但真正的宝藏是基础 knowledge/concepts.
我从图像拼接中找到了非常有用的示例,但我的问题是那些类型的图像
这是一个例子
这是另一张图片
当我使用 opencv 拼接器时,结果图像越来越小
像这个
有没有什么方法可以对输入图像应用变换,使它们像这样
这是代码
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/stitching/stitcher.hpp>
#include<vector>
using namespace cv;
using namespace std;
cv::vector<cv::Mat> ImagesList;
string result_name ="/TopViewsHorizantale/1.bmp";
int main()
{
// Load the images
Mat image1= imread("current_00000.bmp" );
Mat image2= imread("current_00001.bmp" );
cv::resize(image1, image1, image2.size());
Mat gray_image1;
Mat gray_image2;
Mat Matrix = Mat(3,3,CV_32FC1);
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
namedWindow("first image",WINDOW_AUTOSIZE);
namedWindow("second image",WINDOW_AUTOSIZE);
imshow("first image",image2);
imshow("second image",image1);
if( !gray_image1.data || !gray_image2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
int N = image1.rows + image2.rows;
int M = image1.cols+image2.cols;
warpPerspective(image1,result,H,cv::Size(N,M));
cv::Mat half(result,cv::Rect(0,0,image2.rows,image2.cols));
result.copyTo(half);
namedWindow("Result",WINDOW_AUTOSIZE);
imshow( "Result", result);
imwrite(result_name, result);
waitKey(0);
return 0;
}
还有一些图片 link :: https://www.dropbox.com/sh/ovzkqomxvzw8rww/AAB2DDCrCF6NlCFre7V1Gb6La?dl=0 太感谢了 拉菲
问题:输出图像太大。
原码:-
int N = image1.rows + image2.rows;
int M = image1.cols+image2.cols;
warpPerspective(image1,result,H,cv::Size(N,M)); // Too big size.
cv::Mat half(result,cv::Rect(0,0,image2.rows,image2.cols));
result.copyTo(half);
namedWindow("Result",WINDOW_AUTOSIZE);
imshow( "Result", result);
生成的结果图像存储的行数与 image1 和 image2 中的行一样多。但是,输出图像应等于 image1 和 image2 的尺寸 - 重叠区域的尺寸。
另一个问题 你为什么要扭曲 image1。计算 H'(H 的逆矩阵)并使用 H' 扭曲 image2。您应该将 image2 注册到 image1 上。
此外,研究 warpPerspective
的工作原理。它找到 image2 将变形到的区域 ROI。接下来,对于 result(say x,y) 的这个 ROI 区域中的每个像素,它在 image2 中找到相应的位置,say (x',y')。注意:(x', y') 可以是实数值,例如 (4.5, 5.4).
某种形式的插值(可能是线性插值)用于查找图像结果中 (x, y) 的像素值。
接下来,如何找到结果矩阵的大小。不要使用 N,M。使用矩阵 H' 和扭曲图像角来找到它们将结束的位置
对于转换矩阵,请参阅此 wiki and http://planning.cs.uiuc.edu/node99.html. Know the difference between rotation, translational, affine and perspective transformation matrix. Then read the opencv docs here。
您还可以阅读我之前的 answer。该答案显示了用于查找作物区域的简单代数。您需要调整两个图像的四个角的代码。请注意,新图像的图像像素也可以转到负像素位置。
示例代码(在java):-
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.Features2d;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
public class Driver {
public static void stitchImages() {
// Read as grayscale
Mat grayImage1 = Imgcodecs.imread("current_00000.bmp", 0);
Mat grayImage2 = Imgcodecs.imread("current_00001.bmp", 0);
if (grayImage1.dataAddr() == 0 || grayImage2.dataAddr() == 0) {
System.out.println("Images read unsuccessful.");
return;
}
// Create transformation matrix
Mat transformMatrix = new Mat(3, 3, CvType.CV_32FC1);
// -- Step 1: Detect the keypoints using AKAZE Detector
int minHessian = 400;
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
FeatureDetector surf = FeatureDetector.create(FeatureDetector.AKAZE);
surf.detect(grayImage1, keypoints1);
surf.detect(grayImage2, keypoints2);
// -- Step 2: Calculate descriptors (feature vectors)
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.AKAZE);
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
extractor.compute(grayImage1, keypoints1, descriptors1);
extractor.compute(grayImage2, keypoints2, descriptors2);
// -- Step 3: Match the keypoints
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
List<DMatch> myList = new LinkedList<>(matches.toList());
// Filter good matches
double min_dist = Double.MAX_VALUE;
Iterator<DMatch> itr = myList.iterator();
while (itr.hasNext()) {
DMatch element = itr.next();
min_dist = Math.min(element.distance, min_dist);
}
LinkedList<Point> img1GoodPointsList = new LinkedList<Point>();
LinkedList<Point> img2GoodPointsList = new LinkedList<Point>();
List<KeyPoint> keypoints1List = keypoints1.toList();
List<KeyPoint> keypoints2List = keypoints2.toList();
itr = myList.iterator();
while (itr.hasNext()) {
DMatch dMatch = itr.next();
if (dMatch.distance >= 5 * min_dist) {
img1GoodPointsList.addLast(keypoints1List.get(dMatch.queryIdx).pt);
img2GoodPointsList.addLast(keypoints2List.get(dMatch.trainIdx).pt);
} else {
itr.remove();
}
}
matches.fromList(myList);
Mat outputMid = new Mat();
System.out.println("best matches size: " + matches.size());
Features2d.drawMatches(grayImage1, keypoints1, grayImage2, keypoints2, matches, outputMid);
Imgcodecs.imwrite("outputMid - A - A.jpg", outputMid);
MatOfPoint2f img1Locations = new MatOfPoint2f();
img1Locations.fromList(img1GoodPointsList);
MatOfPoint2f img2Locations = new MatOfPoint2f();
img2Locations.fromList(img2GoodPointsList);
// Find the Homography Matrix - Note img2Locations is give first to get
// inverse directly.
Mat hg = Calib3d.findHomography(img2Locations, img1Locations, Calib3d.RANSAC, 3);
System.out.println("hg is: " + hg.dump());
// Find the location of two corners to which Image2 will warp.
Size img1Size = grayImage1.size();
Size img2Size = grayImage2.size();
System.out.println("Sizes are: " + img1Size + ", " + img2Size);
// Store location x,y,z for 4 corners
Mat img2Corners = new Mat(3, 4, CvType.CV_64FC1, new Scalar(0));
Mat img2CornersWarped = new Mat(3, 4, CvType.CV_64FC1);
img2Corners.put(0, 0, 0, img2Size.width, 0, img2Size.width); // x
img2Corners.put(1, 0, 0, 0, img2Size.height, img2Size.height); // y
img2Corners.put(2, 0, 1, 1, 1, 1); // z - all 1
System.out.println("Homography is \n" + hg.dump());
System.out.println("Corners matrix is \n" + img2Corners.dump());
Core.gemm(hg, img2Corners, 1, new Mat(), 0, img2CornersWarped);
System.out.println("img2CornersWarped: " + img2CornersWarped.dump());
// Find the new size to use
int minX = 0, minY = 0; // The grayscale1 already has minimum location at 0
int maxX = 1500, maxY = 1500; // The grayscale1 already has maximum location at 1500(possible 1499, but 1 pixel wont effect)
double[] xCoordinates = new double[4];
img2CornersWarped.get(0, 0, xCoordinates);
double[] yCoordinates = new double[4];
img2CornersWarped.get(1, 0, yCoordinates);
for (int c = 0; c < 4; c++) {
minX = Math.min((int)xCoordinates[c], minX);
maxX = Math.max((int)xCoordinates[c], maxX);
minY = Math.min((int)xCoordinates[c], minY);
maxY = Math.max((int)xCoordinates[c], maxY);
}
int rows = (maxX - minX + 1);
int cols = (maxY - minY + 1);
// Warp to product final output
Mat output1 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0));
Mat output2 = new Mat(new Size(cols, rows), CvType.CV_8U, new Scalar(0));
Imgproc.warpPerspective(grayImage1, output1, Mat.eye(new Size(3, 3), CvType.CV_32F), new Size(cols, rows));
Imgproc.warpPerspective(grayImage2, output2, hg, new Size(cols, rows));
Mat output = new Mat(new Size(cols, rows), CvType.CV_8U);
Core.addWeighted(output1, 0.5, output2, 0.5, 0, output);
Imgcodecs.imwrite("output.jpg", output);
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
stitchImages();
}
}
更改描述符
从 Surf 移动到 Akaze。我从这里看到了完美的图像配准。
输出图像
此输出使用较少 space 并且描述符的更改显示完美注册。
P.S.: 恕我直言,编码很棒,但真正的宝藏是基础 knowledge/concepts.