EmguCV的官方网站上的例子中,有SURF算法的实现,其实现的时候利用的GPU的加速,看着比较复杂。此外,官网上例子的实现并没有做界面,看着不舒服,加载图片也不是很方便,因此,为了学习,我将官网上的例子进行了修改,去掉了GPU加速的部分,然后在做了显示界面,操作起来更友好些。
窗体上有两个PictureBox控件,一个用来显示待匹配的源图像,一个用来显示匹配的目标图像。然后相对应的有三个Button控件,第一个用来打开源图像,第二个用来打开目标图像,第三个用来匹配,当点击第三个Button控件实现匹配,匹配的图像显示在新的窗体上,新的窗体很简单,就一个窗体,图像我们使用窗体的Paint事件绘制在上面,第二个窗体如下:
其中button1实现的是打开源图像,代码如下:
private voidbuttonSrc_Click(object sender, EventArgs e)
{
//Create open dialog;
OpenFileDialog opnDlg = newOpenFileDialog();
opnDlg.Filter = "All Imagefiles|*.bmp;*.gif;*.jpg;*.ico;*png";
//Seting the title of dialog;
opnDlg.Title = "Open Src imagefiles";
opnDlg.ShowHelp = true;
if (opnDlg.ShowDialog() ==DialogResult.OK)
{
curFileNameSrc =opnDlg.FileName;
try
{
curBitmapSrc = newBitmap(curFileNameSrc);
pictureBoxSrc.Image =curBitmapSrc;
}
catch
{
MessageBox.Show("programe error");
}
}
}
Button2实现的是打开目标图像的功能,代码如下:
private voidbuttonDst_Click(object sender, EventArgs e)
{
//Create open dialog;
OpenFileDialog opnDlg = newOpenFileDialog();
opnDlg.Filter = "All Imagefiles|*.bmp;*.gif;*.jpg;*.ico;*png";
//Seting the title of dialog;
opnDlg.Title = "Open Dst imagefiles";
opnDlg.ShowHelp = true;
if (opnDlg.ShowDialog() ==DialogResult.OK)
{
curFileNameDst =opnDlg.FileName;
try
{
curBitmapDst = newBitmap(curFileNameDst);
pictureBoxDst.Image =curBitmapDst;
}
catch
{
MessageBox.Show("programe error");
}
}
}
Button3用来实现匹配,代码如下:
private voidbuttonMatch_Click(object sender, EventArgs e)
{
if(curBitmapDst!=null&&curBitmapSrc!=null)
{
long matchTime;
Image<Bgr, Byte> srcImg =new Image<Bgr, Byte>(curBitmapSrc);
Image<Gray, Byte> srcImg1= srcImg.Convert<Gray, Byte>();
Image<Bgr, Byte> dstImg =new Image<Bgr, Byte>(curBitmapDst);
Image<Gray, Byte> dstImg1= dstImg.Convert<Gray, Byte>();
Matching Match = newMatching();
Image<Bgr, byte> result =Match.Draw(srcImg1,dstImg1,out matchTime);
Bitmap Img = result.ToBitmap();
Form2 Form = new Form2(Img);
Form.ShowDialog();
}
else
{
MessageBox.Show("programeerror");
}
}
用于匹配的函数,我新建了一个名叫Matching的类,用来实现具体的匹配过程,在Button3中仅是对该类实例化,传入源图像和目标图像调用Match的函数就可以了。新建的Match类的代码如下:
using System;
usingSystem.Collections.Generic;
usingSystem.Linq;
usingSystem.Text;
usingSystem.Threading.Tasks;
using Emgu.CV;
using Emgu.Util;
using Emgu.CV.Structure;
usingEmgu.CV.Util;
usingEmgu.CV.Features2D;
usingSystem.Diagnostics;
usingSystem.Drawing;
namespace test10
{
class Matching
{
public Matching()
{
}
public void FindMatch(Image<Gray, Byte> modelImage, Image<Gray,byte> observedImage, out long matchTime, out VectorOfKeyPointmodelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int>indices, out Matrix<byte> mask, out HomographyMatrix homography)
{
int k = 2;
double uniquenessThreshold = 0.8;
SURFDetector surfCPU = newSURFDetector(500, false);
Stopwatch watch;
homography = null;
//extract features from the objectimage
modelKeyPoints = newVectorOfKeyPoint();
Matrix<float>modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from theobserved image
observedKeyPoints = newVectorOfKeyPoint();
Matrix<float>observedDescriptors = surfCPU.DetectAndCompute(observedImage, null,observedKeyPoints);
BruteForceMatcher<float>matcher = new BruteForceMatcher<float>(DistanceType.L2);
matcher.Add(modelDescriptors);
indices = newMatrix<int>(observedDescriptors.Rows, k);
using (Matrix<float> dist =new Matrix<float>(observedDescriptors.Rows, k))
{
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = newMatrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
}
int nonZeroCount =CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount =Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography =Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,observedKeyPoints, indices, mask, 2);
}
watch.Stop();
matchTime = watch.ElapsedMilliseconds;
}
public Image<Bgr, Byte>Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage,out long matchTime)
{
HomographyMatrix homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
Matrix<int> indices;
Matrix<byte> mask;
FindMatch(modelImage,observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, outindices, out mask, out homography);
//Draw the matched keypoints
Image<Bgr, Byte> result =Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage,observedKeyPoints,
indices, new Bgr(255, 255, 255),new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
#region draw the projected regionon the image
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect =modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left,rect.Bottom),
new PointF(rect.Right,rect.Bottom),
new PointF(rect.Right,rect.Top),
new PointF(rect.Left,rect.Top)};
homography.ProjectPoints(pts);
result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts,Point.Round), true, new Bgr(Color.Red), 5);
}
#endregion
return result;
}
}
}
在(窗体2)Form2中的代码如下:
using System;
usingSystem.Collections.Generic;
usingSystem.ComponentModel;
usingSystem.Data;
usingSystem.Drawing;
usingSystem.Linq;
usingSystem.Text;
usingSystem.Threading.Tasks;
usingSystem.Windows.Forms;
namespace test10
{
public partial class Form2 : Form
{
private Bitmap Img1;
public Form2(Bitmap Img)
{
InitializeComponent();
Img1 = Img;
}
private void Form2_Paint(object sender,PaintEventArgs e)
{
Graphics img = e.Graphics;
if (Img1 != null)
{
img.DrawImage(Img1, 0, 0,Img1.Width, Img1.Height);
//img.DrawImage(Img1, newRectangle(this.AutoScrollPosition.X, this.AutoScrollPosition.Y,(int)(Img1.Width), (int)(Img1.Height)));
}
}
}
}
好,这样就实现了这个例子,我们运行程序看看。
这是加载源图像和目标图像。