文章主要參考Opencv Sift和Surf特征實現(xiàn)圖像無縫拼接生成全景圖像,我做了一小點點的修改虚吟,同時在iOS上能正常使用莱革。
問題說明
在
Xcode9
中闽颇,如果直接將圖片等文件拖拽進項目中锻拘,可能會識別不到米母。這時候轮锥,我們通過Add Files to xxx
的方式來進行添加矫钓。
項目目錄文件結(jié)構(gòu)
主要代碼
一、合成代碼
#include "opencv2.framework/Headers/opencv.hpp"
#include "opencv2.framework/Headers/legacy/legacy.hpp"
#include "opencv2.framework/Headers/nonfree/nonfree.hpp"
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
//計算原始圖像點位在經(jīng)過矩陣變換后在目標圖像上對應(yīng)位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri){
Mat originelP,targetP;
originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
targetP=transformMaxtri*originelP;
float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
return Point2f(x,y);
}
- (UIImage *)composeImage{
NSString *path01 = [[NSBundle mainBundle] pathForResource:@"test01" ofType:@"jpg"];
NSString *path02 = [[NSBundle mainBundle] pathForResource:@"test02" ofType:@"jpg"];
Mat img01;
Mat img02;
if (path01 == nil && path02 == nil) {
return [UIImage new];
}
else{
img01 = imread([path01 UTF8String]);
img02 = imread([path02 UTF8String]);
//如果沒有讀取到image
if (!img01.data && !img02.data) {
return [UIImage new];
}
//灰度圖轉(zhuǎn)換
Mat img_h_01 ,img_h_02;
cvtColor(img01, img_h_01, CV_RGB2GRAY);
cvtColor(img02, img_h_02, CV_RGB2GRAY);
//提取特征點
SiftFeatureDetector siftDetector(800);
vector<KeyPoint> keyPoint1,KeyPoint2;
siftDetector.detect(img_h_01, keyPoint1);
siftDetector.detect(img_h_02, KeyPoint2);
//特征點描述舍杜,為下面的特征點匹配做準備
SiftDescriptorExtractor siftDescriptor;
Mat img_description_01,img_description_02;
siftDescriptor.compute(img_h_01, keyPoint1, img_description_01);
siftDescriptor.compute(img_h_02, KeyPoint2, img_description_02);
//獲得匹配特征點新娜,并提取最優(yōu)配對
FlannBasedMatcher matcher;
vector<DMatch> matchePoints;
matcher.match(img_description_01,img_description_02,matchePoints,Mat());
sort(matchePoints.begin(), matchePoints.end());//特征點排序
//獲取排在前N個的最優(yōu)配對
vector<Point2f> imagePoints1,imagePoints2;
for (int i = 0; i < 10; i++) {
imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
imagePoints2.push_back(KeyPoint2[matchePoints[i].trainIdx].pt);
}
//獲取img1到img2的投影映射矩陣,尺寸為3*3
Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
Mat adjustMat = (Mat_<double>(3,3)<<1.0,0,img01.cols,0,1.0,0,0,0,1.0);
Mat adjustHomo = adjustMat * homo;
//獲得最強配對點在原始圖像和矩陣變換后圖像上的對應(yīng)位置既绩,用于圖像拼接點的定位
Point2f originalLinkPoint,targetLintPoint,basedImagePoint;
originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
targetLintPoint = getTransformPoint(originalLinkPoint, adjustHomo);
basedImagePoint = KeyPoint2[matchePoints[0].trainIdx].pt;
//圖像配準
Mat imageTransform1;
warpPerspective(img01, imageTransform1, adjustHomo, cv::Size(img02.cols+img01.cols+110,img02.rows));
//在最強配準點左側(cè)的重疊區(qū)域進行累加概龄,使銜接穩(wěn)定過度,消除突變
Mat image01OverLap,image02OverLap;
image01OverLap = imageTransform1(cv::Rect(cv::Point(targetLintPoint.x - basedImagePoint.x,0),cv::Point(targetLintPoint.x,img02.rows)));
image02OverLap = img02(cv::Rect(0,0,image01OverLap.cols,image01OverLap.rows));
//復(fù)制img01的重疊部分
Mat image01ROICOPY = image01OverLap.clone();
for (int i = 0; i < image01OverLap.rows; i++) {
for (int j = 0; j < image01OverLap.cols;j++) {
double weight;
//隨距離改變而改變的疊加體系
weight = (double)j/image01OverLap.cols;
image01OverLap.at<Vec3b>(i,j)[0] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[0]+weight*image02OverLap.at<Vec3b>(i,j)[0];
image01OverLap.at<Vec3b>(i,j)[1] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[1]+weight*image02OverLap.at<Vec3b>(i,j)[1];
image01OverLap.at<Vec3b>(i,j)[2] = (1 - weight)*image01ROICOPY.at<Vec3b>(i,j)[2]+weight*image02OverLap.at<Vec3b>(i,j)[2];
}
}
Mat ROIMat = img02(cv::Rect(cv::Point(image01OverLap.cols,0),cv::Point(img02.cols,img02.rows)));
ROIMat.copyTo(Mat(imageTransform1,cv::Rect(targetLintPoint.x,0,ROIMat.cols,img02.rows)));
return [self imageWithCVMat:imageTransform1];
}
}
二饲握、CVMat
轉(zhuǎn)UIImage
- (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
UIImage *cvImage = [[UIImage alloc]initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return cvImage;
}
三私杜、顯示合成的圖片
- (void)viewDidLoad {
[super viewDidLoad];
double start = [[NSDate date] timeIntervalSince1970]*1000;
NSLog(@"start time= %f ", (start));
UIImageView *img = [[UIImageView alloc]initWithFrame:self.view.bounds];
img.contentMode = UIViewContentModeScaleAspectFit;
img.image = [self composeImage];
[self.view addSubview:img];
double end = [[NSDate date] timeIntervalSince1970]*1000;
NSLog(@"end time= %f ", (end));
NSLog(@"use time =%f millisecond ", (end-start));
}