Browse Source

Added DAISY, LUCID, LATCH to XFeatures2D.

UWP10
Canming Huang 10 years ago
parent
commit
56f898c27c
  1. 17
      Emgu.CV.Contrib/XFeatures2D/BriefDescriptorExtractor.cs
  2. 16
      Emgu.CV.Contrib/XFeatures2D/CudaSURF.cs
  3. 93
      Emgu.CV.Contrib/XFeatures2D/DAISY.cs
  4. 16
      Emgu.CV.Contrib/XFeatures2D/Freak.cs
  5. 66
      Emgu.CV.Contrib/XFeatures2D/LATCH.cs
  6. 58
      Emgu.CV.Contrib/XFeatures2D/LUCID.cs
  7. 14
      Emgu.CV.Contrib/XFeatures2D/SIFT.cs
  8. 14
      Emgu.CV.Contrib/XFeatures2D/SURF.cs
  9. 20
      Emgu.CV.Contrib/XFeatures2D/StarDetector.cs
  10. 4
      Emgu.CV.Example/SURFFeature/DrawMatches.cs
  11. 4
      Emgu.CV.Example/TrafficSignRecognition/StopSignDetector.cs
  12. 8
      Emgu.CV.Extern/xfeatures2d/nonfree_c.cpp
  13. 8
      Emgu.CV.Extern/xfeatures2d/nonfree_c.h
  14. 24
      Emgu.CV.Extern/xfeatures2d/xfeatures2d_c.cpp
  15. 12
      Emgu.CV.Extern/xfeatures2d/xfeatures2d_c.h
  16. 6
      Emgu.CV.Test/AutoTestCuda.cs
  17. 70
      Emgu.CV.Test/AutoTestFeatures2d.cs
  18. 2
      Emgu.CV.Test/AutoTestVarious.cs
  19. 4
      Emgu.CV.Test/Class1.cs
  20. 2
      Emgu.CV/Features2D/Feature2D.cs

17
Emgu.CV.Contrib/XFeatures2D/BriefDescriptorExtractor.cs

@ -20,18 +20,13 @@ namespace Emgu.CV.XFeatures2D
/// </summary>
public class BriefDescriptorExtractor : Feature2D
{
static BriefDescriptorExtractor()
{
CvInvoke.CheckLibraryLoaded();
}
/// <summary>
/// Create a BRIEF descriptor extractor.
/// </summary>
/// <param name="descriptorSize">The size of descriptor. It can be equal 16, 32 or 64 bytes.</param>
public BriefDescriptorExtractor(int descriptorSize = 32)
{
_ptr = CvBriefDescriptorExtractorCreate(descriptorSize, ref _feature2D);
_ptr = ContribInvoke.cveBriefDescriptorExtractorCreate(descriptorSize, ref _feature2D);
}
/// <summary>
@ -40,14 +35,18 @@ namespace Emgu.CV.XFeatures2D
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
CvBriefDescriptorExtractorRelease(ref _ptr);
ContribInvoke.cveBriefDescriptorExtractorRelease(ref _ptr);
base.DisposeObject();
}
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr CvBriefDescriptorExtractorCreate(int descriptorSize, ref IntPtr feature2D);
internal extern static IntPtr cveBriefDescriptorExtractorCreate(int descriptorSize, ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void CvBriefDescriptorExtractorRelease(ref IntPtr extractor);
internal extern static void cveBriefDescriptorExtractorRelease(ref IntPtr extractor);
}
}

16
Emgu.CV.Contrib/XFeatures2D/CudaSURFDetector.cs → Emgu.CV.Contrib/XFeatures2D/CudaSURF.cs

@ -18,19 +18,8 @@ namespace Emgu.CV.XFeatures2D
/// <summary>
/// A SURF detector using Cuda
/// </summary>
public class CudaSURFDetector : UnmanagedObject
public class CudaSURF : UnmanagedObject
{
/*
/// <summary>
/// Create a Cuda SURF detector using the specific parameters
/// </summary>
/// <param name="detector">The surf detector where the parameters will be borrow from</param>
/// <param name="featuresRatio">Max features = featuresRatio * img.size().srea().</param>
public CudaSURFDetector(MCvSURFParams detector, float featuresRatio = 0.01f)
: this((float)detector.HessianThreshold, detector.NOctaves, detector.NOctaveLayers, (detector.Extended != 0), featuresRatio, (detector.Upright != 0))
{
}*/
/// <summary>
/// Create a Cuda SURF detector
/// </summary>
@ -40,7 +29,7 @@ namespace Emgu.CV.XFeatures2D
/// <param name="extended">True, if generate 128-len descriptors, false - 64-len descriptors.</param>
/// <param name="featuresRatio">Max features = featuresRatio * img.size().srea().</param>
/// <param name="upright">If set to true, the orientation is not computed for the keypoints</param>
public CudaSURFDetector(
public CudaSURF(
float hessianThreshold = 100.0f,
int nOctaves = 4,
int nOctaveLayers = 2,
@ -113,7 +102,6 @@ namespace Emgu.CV.XFeatures2D
/// <returns>The image features founded on the keypoint location</returns>
public GpuMat ComputeDescriptorsRaw(GpuMat image, GpuMat mask, GpuMat keyPoints)
{
//GpuMat descriptors = new GpuMat(keyPoints.Size.Height, DescriptorSize, 1);
GpuMat descriptors = new GpuMat();
ContribInvoke.cudaSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true);
return descriptors;

93
Emgu.CV.Contrib/XFeatures2D/DAISY.cs

@ -0,0 +1,93 @@
//----------------------------------------------------------------------------
// Copyright (C) 2004-2015 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Text;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.Util;
using Emgu.CV.Features2D;
namespace Emgu.CV.XFeatures2D
{
/// <summary>
/// Daisy descriptor.
/// </summary>
public class DAISY : Feature2D
{
/// <summary>
/// Create DAISY descriptor extractor
/// </summary>
/// <param name="radius">Radius of the descriptor at the initial scale.</param>
/// <param name="qRadius">Amount of radial range division quantity.</param>
/// <param name="qTheta">Amount of angular range division quantity.</param>
/// <param name="qHist">Amount of gradient orientations range division quantity.</param>
/// <param name="norm">Descriptors normalization type.</param>
/// <param name="H">optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image</param>
/// <param name="interpolation">Switch to disable interpolation for speed improvement at minor quality loss</param>
/// <param name="useOrientation">Sample patterns using keypoints orientation, disabled by default.</param>
public DAISY(float radius = 15, int qRadius = 3, int qTheta = 8,
int qHist = 8, NormalizationType norm = NormalizationType.None, IInputArray H = null,
bool interpolation = true, bool useOrientation = false)
{
using (InputArray iaH = H == null ? InputArray.GetEmpty() : H.GetInputArray())
_ptr = ContribInvoke.cveDAISYCreate(radius, qRadius, qTheta, qHist, norm, iaH, interpolation, useOrientation,
ref _feature2D);
}
/// <summary>
/// Normalization type
/// </summary>
public enum NormalizationType
{
/// <summary>
/// Will not do any normalization (default)
/// </summary>
None = 100,
/// <summary>
/// Histograms are normalized independently for L2 norm equal to 1.0
/// </summary>
Partial = 101,
/// <summary>
/// Descriptors are normalized for L2 norm equal to 1.0
/// </summary>
Full = 102,
/// <summary>
/// Descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
/// </summary>
SIFT = 103
}
/// <summary>
/// Release all the unmanaged resource associated with BRIEF
/// </summary>
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
{
ContribInvoke.cveDAISYRelease(ref _ptr);
}
base.DisposeObject();
}
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr cveDAISYCreate(
float radius, int qRadius, int qTheta,
int qHist, DAISY.NormalizationType norm, IntPtr H,
bool interpolation, bool useOrientation,
ref IntPtr daisy);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void cveDAISYRelease(ref IntPtr daisy);
}
}

16
Emgu.CV.Contrib/XFeatures2D/Freak.cs

@ -28,11 +28,6 @@ namespace Emgu.CV.XFeatures2D
/// </summary>
public class Freak : Feature2D
{
static Freak()
{
CvInvoke.CheckLibraryLoaded();
}
/// <summary>
/// Create a Freak descriptor extractor.
/// </summary>
@ -42,7 +37,7 @@ namespace Emgu.CV.XFeatures2D
/// <param name="nOctaves">Number of octaves covered by the detected keypoints.</param>
public Freak(bool orientationNormalized = true, bool scaleNormalized = true, float patternScale = 22.0f, int nOctaves = 4)
{
_ptr = CvFreakCreate(orientationNormalized, scaleNormalized, patternScale, nOctaves, ref _feature2D);
_ptr = ContribInvoke.cveFreakCreate(orientationNormalized, scaleNormalized, patternScale, nOctaves, ref _feature2D);
}
/// <summary>
@ -51,13 +46,16 @@ namespace Emgu.CV.XFeatures2D
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
CvFreakRelease(ref _ptr);
ContribInvoke.cveFreakRelease(ref _ptr);
base.DisposeObject();
}
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr CvFreakCreate(
internal extern static IntPtr cveFreakCreate(
[MarshalAs(CvInvoke.BoolMarshalType)]
bool orientationNormalized,
[MarshalAs(CvInvoke.BoolMarshalType)]
@ -67,7 +65,7 @@ namespace Emgu.CV.XFeatures2D
ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void CvFreakRelease(ref IntPtr extractor);
internal extern static void cveFreakRelease(ref IntPtr extractor);
}
}

66
Emgu.CV.Contrib/XFeatures2D/LATCH.cs

@ -0,0 +1,66 @@
//----------------------------------------------------------------------------
// Copyright (C) 2004-2015 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Text;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.Util;
using Emgu.CV.Features2D;
namespace Emgu.CV.XFeatures2D
{
/// <summary>
/// latch Class for computing the LATCH descriptor.
/// If you find this code useful, please add a reference to the following paper in your work:
/// Gil Levi and Tal Hassner, "LATCH: Learned Arrangements of Three Patch Codes", arXiv preprint arXiv:1501.03719, 15 Jan. 2015
/// LATCH is a binary descriptor based on learned comparisons of triplets of image patches.
/// </summary>
public class LATCH : Feature2D
{
/// <summary>
/// Create LATCH descriptor extractor
/// </summary>
/// <param name="bytes">The size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1</param>
/// <param name="rotationInvariance">Whether or not the descriptor should compensate for orientation changes.</param>
/// <param name="halfSsdSize">the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x
/// then the half_ssd_size should be (7-1)/2 = 3.</param>
public LATCH(int bytes = 32, bool rotationInvariance = true, int halfSsdSize = 3)
{
_ptr = ContribInvoke.cveLATCHCreate(bytes, rotationInvariance, halfSsdSize, ref _feature2D);
}
/// <summary>
/// Release all the unmanaged resource associated with BRIEF
/// </summary>
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
{
ContribInvoke.cveLATCHRelease(ref _ptr);
}
base.DisposeObject();
}
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr cveLATCHCreate(
int bytes,
[MarshalAs(CvInvoke.BoolMarshalType)]
bool rotationInvariance,
int halfSsdSize,
ref IntPtr extractor);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void cveLATCHRelease(ref IntPtr extractor);
}
}

58
Emgu.CV.Contrib/XFeatures2D/LUCID.cs

@ -0,0 +1,58 @@
//----------------------------------------------------------------------------
// Copyright (C) 2004-2015 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Text;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.Util;
using Emgu.CV.Features2D;
namespace Emgu.CV.XFeatures2D
{
/// <summary>
/// The locally uniform comparison image descriptor:
/// An image descriptor that can be computed very fast, while being
/// about as robust as, for example, SURF or BRIEF.
/// </summary>
public class LUCID : Feature2D
{
/// <summary>
/// Create a locally uniform comparison image descriptor.
/// </summary>
/// <param name="lucidKernel">Kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth</param>
/// <param name="blurKernel">kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth</param>
public LUCID(int lucidKernel = 1, int blurKernel = 2)
{
_ptr = ContribInvoke.cveLUCIDCreate(lucidKernel, blurKernel, ref _feature2D);
}
/// <summary>
/// Release all the unmanaged resource associated with BRIEF
/// </summary>
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
{
ContribInvoke.cveLUCIDRelease(ref _ptr);
}
base.DisposeObject();
}
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr cveLUCIDCreate(int lucidKernel, int blurKernel, ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void cveLUCIDRelease(ref IntPtr extractor);
}
}

14
Emgu.CV.Contrib/XFeatures2D/SIFTDetector.cs → Emgu.CV.Contrib/XFeatures2D/SIFT.cs

@ -17,22 +17,22 @@ namespace Emgu.CV.XFeatures2D
/// <summary>
/// Wrapped SIFT detector
/// </summary>
public class SIFTDetector : Feature2D
public class SIFT : Feature2D
{
/// <summary>
/// Create a SIFTDetector using the specific values
/// Create a SIFT using the specific values
/// </summary>
/// <param name="nFeatures">The desired number of features. Use 0 for un-restricted number of features</param>
/// <param name="nOctaveLayers">The number of octave layers. Use 3 for default</param>
/// <param name="contrastThreshold">Contrast threshold. Use 0.04 as default</param>
/// <param name="edgeThreshold">Detector parameter. Use 10.0 as default</param>
/// <param name="sigma">Use 1.6 as default</param>
public SIFTDetector(
public SIFT(
int nFeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10.0,
double sigma = 1.6)
{
_ptr = ContribInvoke.CvSIFTDetectorCreate(nFeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, ref _feature2D);
_ptr = ContribInvoke.cveSIFTCreate(nFeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, ref _feature2D);
}
/// <summary>
@ -41,7 +41,7 @@ namespace Emgu.CV.XFeatures2D
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
ContribInvoke.CvSIFTDetectorRelease(ref _ptr);
ContribInvoke.cveSIFTRelease(ref _ptr);
base.DisposeObject();
}
}
@ -49,12 +49,12 @@ namespace Emgu.CV.XFeatures2D
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr CvSIFTDetectorCreate(
internal extern static IntPtr cveSIFTCreate(
int nFeatures, int nOctaveLayers,
double contrastThreshold, double edgeThreshold,
double sigma, ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void CvSIFTDetectorRelease(ref IntPtr detector);
internal extern static void cveSIFTRelease(ref IntPtr detector);
}
}

14
Emgu.CV.Contrib/XFeatures2D/SURFDetector.cs → Emgu.CV.Contrib/XFeatures2D/SURF.cs

@ -14,7 +14,7 @@ namespace Emgu.CV.XFeatures2D
/// <summary>
/// Class for extracting Speeded Up Robust Features from an image
/// </summary>
public class SURFDetector : Feature2D
public class SURF : Feature2D
{
/// <summary>
@ -40,9 +40,9 @@ namespace Emgu.CV.XFeatures2D
/// False means that detector computes orientation of each feature.
/// True means that the orientation is not computed (which is much, much faster).
/// For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting upright=true.</param>
public SURFDetector(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
{
_ptr = ContribInvoke.CvSURFDetectorCreate(hessianThresh, nOctaves, nOctaveLayers, extended, upright, ref _feature2D);
_ptr = ContribInvoke.cveSURFCreate(hessianThresh, nOctaves, nOctaveLayers, extended, upright, ref _feature2D);
}
/// <summary>
@ -51,13 +51,13 @@ namespace Emgu.CV.XFeatures2D
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
ContribInvoke.CvSURFDetectorRelease(ref _ptr);
ContribInvoke.cveSURFRelease(ref _ptr);
base.DisposeObject();
}
}
/// <summary>
/// This class wraps the functional calls to the opencv_nonfree module
/// This class wraps the functional calls to the opencv contrib modules
/// </summary>
public static partial class ContribInvoke
{
@ -67,7 +67,7 @@ namespace Emgu.CV.XFeatures2D
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr CvSURFDetectorCreate(
internal extern static IntPtr cveSURFCreate(
double hessianThresh, int nOctaves, int nOctaveLayers,
[MarshalAs(CvInvoke.BoolMarshalType)]
bool extended,
@ -76,6 +76,6 @@ namespace Emgu.CV.XFeatures2D
ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void CvSURFDetectorRelease(ref IntPtr detector);
internal extern static void cveSURFRelease(ref IntPtr detector);
}
}

20
Emgu.CV.Contrib/XFeatures2D/StarDetector.cs

@ -16,11 +16,6 @@ namespace Emgu.CV.XFeatures2D
/// </summary>
public class StarDetector : Feature2D
{
static StarDetector()
{
CvInvoke.CheckLibraryLoaded();
}
/// <summary>
/// Create a star detector with the specific parameters
/// </summary>
@ -45,7 +40,7 @@ namespace Emgu.CV.XFeatures2D
/// </param>
public StarDetector(int maxSize = 45, int responseThreshold = 30, int lineThresholdProjected = 10, int lineThresholdBinarized = 8, int suppressNonmaxSize = 5)
{
_ptr = CvStarDetectorCreate(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, ref _feature2D);
_ptr = ContribInvoke.cveStarDetectorCreate(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, ref _feature2D);
}
/// <summary>
@ -54,15 +49,20 @@ namespace Emgu.CV.XFeatures2D
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
CvStarDetectorRelease(ref _ptr);
ContribInvoke.cveStarDetectorRelease(ref _ptr);
base.DisposeObject();
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static IntPtr CvStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, ref IntPtr feature2D);
}
public static partial class ContribInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void CvStarDetectorRelease(ref IntPtr detector);
internal extern static IntPtr cveStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, ref IntPtr feature2D);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal extern static void cveStarDetectorRelease(ref IntPtr detector);
}
}

4
Emgu.CV.Example/SURFFeature/DrawMatches.cs

@ -35,7 +35,7 @@ namespace SURFFeatureExample
#if !IOS
if ( CudaInvoke.HasCuda)
{
CudaSURFDetector surfCuda = new CudaSURFDetector((float) hessianThresh);
CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
using (GpuMat gpuModelImage = new GpuMat(modelImage))
//extract features from the object image
using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
@ -79,7 +79,7 @@ namespace SURFFeatureExample
using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
{
SURFDetector surfCPU = new SURFDetector(hessianThresh);
SURF surfCPU = new SURF(hessianThresh);
//extract features from the object image
UMat modelDescriptors = new UMat();
surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

4
Emgu.CV.Example/TrafficSignRecognition/StopSignDetector.cs

@ -24,13 +24,13 @@ namespace TrafficSignRecognition
private Mat _modelDescriptors;
private BFMatcher _modelDescriptorMatcher;
//private Features2DTracker<float> _tracker;
private SURFDetector _detector;
private SURF _detector;
private VectorOfPoint _octagon;
public StopSignDetector(IInputArray stopSignModel)
{
_detector = new SURFDetector(500);
_detector = new SURF(500);
using (Mat redMask = new Mat())
{
GetRedPixelMask(stopSignModel, redMask);

8
Emgu.CV.Extern/xfeatures2d/nonfree_c.cpp

@ -7,7 +7,7 @@
#include "nonfree_c.h"
//SIFTDetector
cv::xfeatures2d::SIFT* CvSIFTDetectorCreate(
cv::xfeatures2d::SIFT* cveSIFTCreate(
int nFeatures, int nOctaveLayers,
double contrastThreshold, double edgeThreshold,
double sigma, cv::Feature2D** feature2D)
@ -19,7 +19,7 @@ cv::xfeatures2d::SIFT* CvSIFTDetectorCreate(
return siftPtr.get();
}
void CvSIFTDetectorRelease(cv::xfeatures2d::SIFT** detector)
void cveSIFTRelease(cv::xfeatures2d::SIFT** detector)
{
delete *detector;
*detector = 0;
@ -65,7 +65,7 @@ void CvSIFTDetectorComputeDescriptors(cv::SIFT* detector, IplImage* image, std::
}*/
//SURFDetector
cv::xfeatures2d::SURF* CvSURFDetectorCreate(double hessianThresh, int nOctaves, int nOctaveLayers, bool extended, bool upright, cv::Feature2D** feature2D)
cv::xfeatures2d::SURF* cveSURFCreate(double hessianThresh, int nOctaves, int nOctaveLayers, bool extended, bool upright, cv::Feature2D** feature2D)
{
cv::Ptr<cv::xfeatures2d::SURF> surfPtr = cv::xfeatures2d::SURF::create(hessianThresh, nOctaves, nOctaveLayers, extended, upright);
surfPtr.addref();
@ -74,7 +74,7 @@ cv::xfeatures2d::SURF* CvSURFDetectorCreate(double hessianThresh, int nOctaves,
return surfPtr.get();
}
void CvSURFDetectorRelease(cv::xfeatures2d::SURF** detector)
void cveSURFRelease(cv::xfeatures2d::SURF** detector)
{
delete *detector;
*detector = 0;

8
Emgu.CV.Extern/xfeatures2d/nonfree_c.h

@ -13,15 +13,15 @@
#include "opencv2/xfeatures2d.hpp"
//SIFTDetector
CVAPI(cv::xfeatures2d::SIFT*) CvSIFTDetectorCreate(
CVAPI(cv::xfeatures2d::SIFT*) cveSIFTCreate(
int nFeatures, int nOctaveLayers,
double contrastThreshold, double edgeThreshold,
double sigma, cv::Feature2D** feature2D);
CVAPI(void) CvSIFTDetectorRelease(cv::xfeatures2d::SIFT** detector);
CVAPI(void) cveSIFTRelease(cv::xfeatures2d::SIFT** detector);
//SURFDetector
CVAPI(cv::xfeatures2d::SURF*) CvSURFDetectorCreate(double hessianThresh, int nOctaves, int nOctaveLayers, bool extended, bool upright, cv::Feature2D** feature2D);
CVAPI(void) CvSURFDetectorRelease(cv::xfeatures2d::SURF** detector);
CVAPI(cv::xfeatures2d::SURF*) cveSURFCreate(double hessianThresh, int nOctaves, int nOctaveLayers, bool extended, bool upright, cv::Feature2D** feature2D);
CVAPI(void) cveSURFRelease(cv::xfeatures2d::SURF** detector);
/*
//----------------------------------------------------------------------------

24
Emgu.CV.Extern/xfeatures2d/xfeatures2d_c.cpp

@ -7,7 +7,7 @@
#include "xfeatures2d_c.h"
//StarDetector
cv::xfeatures2d::StarDetector* CvStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, cv::Feature2D** feature2D)
cv::xfeatures2d::StarDetector* cveStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, cv::Feature2D** feature2D)
{
cv::Ptr<cv::xfeatures2d::StarDetector> detectorPtr = cv::xfeatures2d::StarDetector::create(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize);
detectorPtr.addref();
@ -15,7 +15,7 @@ cv::xfeatures2d::StarDetector* CvStarDetectorCreate(int maxSize, int responseThr
return detectorPtr.get();
}
void CvStarDetectorRelease(cv::xfeatures2d::StarDetector** detector)
void cveStarDetectorRelease(cv::xfeatures2d::StarDetector** detector)
{
delete *detector;
*detector = 0;
@ -118,11 +118,11 @@ void cveLUCIDRelease(cv::xfeatures2d::LUCID** lucid)
}
//LATCH
cv::xfeatures2d::LATCH* cveLATCHCreate(int bytes, bool rotationInvariance, int halfSsdSize, cv::DescriptorExtractor** extractor)
cv::xfeatures2d::LATCH* cveLATCHCreate(int bytes, bool rotationInvariance, int halfSsdSize, cv::Feature2D** extractor)
{
cv::Ptr<cv::xfeatures2d::LATCH> latchPtr = cv::xfeatures2d::LATCH::create(bytes, rotationInvariance, halfSsdSize);
latchPtr.addref();
*extractor = dynamic_cast<cv::DescriptorExtractor*>(latchPtr.get());
*extractor = dynamic_cast<cv::Feature2D*>(latchPtr.get());
return latchPtr.get();
}
void cveLATCHRelease(cv::xfeatures2d::LATCH** latch)
@ -130,3 +130,19 @@ void cveLATCHRelease(cv::xfeatures2d::LATCH** latch)
delete *latch;
*latch = 0;
}
//DAISY
cv::xfeatures2d::DAISY* cveDAISYCreate(float radius, int qRadius, int qTheta,
int qHist, int norm, cv::_InputArray* H,
bool interpolation, bool useOrientation, cv::Feature2D** extractor)
{
cv::Ptr<cv::xfeatures2d::DAISY> daisyPtr = cv::xfeatures2d::DAISY::create(radius, qRadius, qTheta, qHist, norm, H ? *H : (cv::_InputArray) cv::noArray(), interpolation, useOrientation);
daisyPtr.addref();
*extractor = dynamic_cast<cv::Feature2D*>(daisyPtr.get());
return daisyPtr.get();
}
void cveDAISYRelease(cv::xfeatures2d::DAISY** daisy)
{
delete* daisy;
*daisy = 0;
}

12
Emgu.CV.Extern/xfeatures2d/xfeatures2d_c.h

@ -13,8 +13,8 @@
#include "vectors_c.h"
//StarDetector
CVAPI(cv::xfeatures2d::StarDetector*) CvStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, cv::Feature2D** feature2D);
CVAPI(void) CvStarDetectorRelease(cv::xfeatures2d::StarDetector** detector);
CVAPI(cv::xfeatures2d::StarDetector*) cveStarDetectorCreate(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize, cv::Feature2D** feature2D);
CVAPI(void) cveStarDetectorRelease(cv::xfeatures2d::StarDetector** detector);
/*
//GridAdaptedFeatureDetector
@ -45,13 +45,13 @@ CVAPI(void) cveLUCIDRelease(cv::xfeatures2d::LUCID** lucid);
//LATCH
CVAPI(cv::xfeatures2d::LATCH*) cveLATCHCreate(int bytes, bool rotationInvariance, int halfSsdSize, cv::DescriptorExtractor** extractor);
CVAPI(cv::xfeatures2d::LATCH*) cveLATCHCreate(int bytes, bool rotationInvariance, int halfSsdSize, cv::Feature2D** extractor);
CVAPI(void) cveLATCHRelease(cv::xfeatures2d::LATCH** lucid);
//DAISY
CVAPI(cv::xfeatures2d::DAISY*) cveDAISYCreate(float radius = 15, int q_radius = 3, int q_theta = 8,
int q_hist = 8, int norm, cv::_InputArray* H,
bool interpolation, bool use_orientation = false, cv::DescriptorExtractor** extractor);
CVAPI(cv::xfeatures2d::DAISY*) cveDAISYCreate(float radius, int qRadius, int qTheta,
int qHist, int norm, cv::_InputArray* H,
bool interpolation, bool useOrientation, cv::Feature2D** extractor);
CVAPI(void) cveDAISYRelease(cv::xfeatures2d::DAISY** daisy);
#endif

6
Emgu.CV.Test/AutoTestCuda.cs

@ -1,8 +1,8 @@
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Copyright (C) 2004-2015 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
@ -524,7 +524,7 @@ namespace Emgu.CV.Test
EmguAssert.IsTrue(gpuMat.ToMat().Equals(image.Mat));
CudaSURFDetector cudaSurf = new CudaSURFDetector(100.0f, 2, 4, false, 0.01f, false);
CudaSURF cudaSurf = new CudaSURF(100.0f, 2, 4, false, 0.01f, false);
GpuMat cudaKpts = cudaSurf.DetectKeyPointsRaw(gpuMat, null);
VectorOfKeyPoint kpts = new VectorOfKeyPoint();
cudaSurf.DownloadKeypoints(cudaKpts, kpts);

70
Emgu.CV.Test/AutoTestFeatures2d.cs

@ -37,14 +37,40 @@ namespace Emgu.CV.Test
[Test]
public void TestBrisk()
{
Brisk detector = new Brisk(30, 3, 1.0f);
Brisk detector = new Brisk();
EmguAssert.IsTrue(TestFeature2DTracker(detector, detector), "Unable to find homography matrix");
}
#endif
[Test]
public void TestDAISY()
{
SURF surf = new SURF(300);
DAISY daisy = new DAISY();
EmguAssert.IsTrue(TestFeature2DTracker(surf, daisy), "Unable to find homography matrix");
}
[Test]
public void TestLATCH()
{
SURF surf = new SURF(300);
LATCH latch = new LATCH();
EmguAssert.IsTrue(TestFeature2DTracker(surf, latch), "Unable to find homography matrix");
}
/*
[Test]
public void TestLUCID()
{
SURF surf = new SURF(300);
LUCID lucid = new LUCID();
EmguAssert.IsTrue(TestFeature2DTracker(surf, lucid ), "Unable to find homography matrix");
}*/
[Test]
public void TestSIFT()
{
SIFTDetector detector = new SIFTDetector();
SIFT detector = new SIFT();
EmguAssert.IsTrue(TestFeature2DTracker(detector, detector), "Unable to find homography matrix");
}
@ -53,7 +79,7 @@ namespace Emgu.CV.Test
public void TestDense()
{
DenseFeatureDetector detector = new DenseFeatureDetector(1.0f, 1, 0.1f, 6, 0, true, false);
SIFTDetector extractor = new SIFTDetector();
SIFT extractor = new SIFT();
EmguAssert.IsTrue(TestFeature2DTracker(detector, extractor), "Unable to find homography matrix");
}*/
@ -61,7 +87,7 @@ namespace Emgu.CV.Test
[Test]
public void TestSURF()
{
SURFDetector detector = new SURFDetector(500);
SURF detector = new SURF(500);
//ParamDef[] parameters = detector.GetParams();
EmguAssert.IsTrue(TestFeature2DTracker(detector, detector), "Unable to find homography matrix");
}
@ -69,7 +95,7 @@ namespace Emgu.CV.Test
[Test]
public void TestSURFBlankImage()
{
SURFDetector detector = new SURFDetector(500);
SURF detector = new SURF(500);
Image<Gray, Byte> img = new Image<Gray, byte>(1024, 900);
VectorOfKeyPoint vp = new VectorOfKeyPoint();
Mat descriptors = new Mat();
@ -81,8 +107,8 @@ namespace Emgu.CV.Test
{
StarDetector keyPointDetector = new StarDetector();
//SURFDetector descriptorGenerator = new SURFDetector(500, false);
SIFTDetector descriptorGenerator = new SIFTDetector();
//SURF descriptorGenerator = new SURF(500, false);
SIFT descriptorGenerator = new SIFT();
//ParamDef[] parameters = keyPointDetector.GetParams();
TestFeature2DTracker(keyPointDetector, descriptorGenerator);
}
@ -91,7 +117,7 @@ namespace Emgu.CV.Test
public void TestGFTTDetector()
{
GFTTDetector keyPointDetector = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04);
SIFTDetector descriptorGenerator = new SIFTDetector();
SIFT descriptorGenerator = new SIFT();
//ParamDef[] parameters = keyPointDetector.GetParams();
TestFeature2DTracker(keyPointDetector, descriptorGenerator);
}
@ -101,7 +127,7 @@ namespace Emgu.CV.Test
public void TestDenseFeatureDetector()
{
DenseFeatureDetector keyPointDetector = new DenseFeatureDetector(1, 1, 0.1f, 6, 0, true, false);
SIFTDetector descriptorGenerator = new SIFTDetector();
SIFT descriptorGenerator = new SIFT();
TestFeature2DTracker(keyPointDetector, descriptorGenerator);
}*/
@ -112,8 +138,8 @@ namespace Emgu.CV.Test
LDetector keyPointDetector = new LDetector();
keyPointDetector.Init();
//SURFDetector descriptorGenerator = new SURFDetector(500, false);
SIFTDetector descriptorGenerator = new SIFTDetector(4, 3, -1, SIFTDetector.AngleMode.AVERAGE_ANGLE, 0.04 / 3 / 2.0, 10.0, 3.0, true, true);
//SURF descriptorGenerator = new SURF(500, false);
SIFT descriptorGenerator = new SIFT(4, 3, -1, SIFT.AngleMode.AVERAGE_ANGLE, 0.04 / 3 / 2.0, 10.0, 3.0, true, true);
TestFeature2DTracker(keyPointDetector, descriptorGenerator);
}*/
@ -123,7 +149,7 @@ namespace Emgu.CV.Test
public void TestMSER()
{
MSERDetector keyPointDetector = new MSERDetector();
SIFTDetector descriptorGenerator = new SIFTDetector();
SIFT descriptorGenerator = new SIFT();
//ParamDef[] parameters = keyPointDetector.GetParams();
TestFeature2DTracker(keyPointDetector, descriptorGenerator);
}
@ -183,7 +209,7 @@ namespace Emgu.CV.Test
feature2D = keyPointDetector as Feature2D;
}
Image<Gray, Byte> modelImage = EmguAssert.LoadImage<Gray, byte>("box.png");
Mat modelImage = EmguAssert.LoadMat("box.png");
//Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
//modelImage = modelImage.Resize(400, 400, true);
@ -234,9 +260,9 @@ namespace Emgu.CV.Test
#endregion
//Merge the object image and the observed image into one big image for display
Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);
Image<Gray, Byte> res = modelImage.ToImage<Gray, Byte>().ConcateVertical(observedImage);
Rectangle rect = modelImage.ROI;
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
@ -339,9 +365,9 @@ namespace Emgu.CV.Test
Image<Bgr, byte> box = EmguAssert.LoadImage<Bgr, byte>("box.png");
Image<Gray, byte> gray = box.Convert<Gray, Byte>();
SURFDetector surf = new SURFDetector(400);
SURF surf = new SURF(400);
OpponentColorDescriptorExtractor opponentSurf = new OpponentColorDescriptorExtractor(surf);
SIFTDetector sift = new SIFTDetector();
SIFT sift = new SIFT();
OpponentColorDescriptorExtractor opponentSift = new OpponentColorDescriptorExtractor(sift);
//using (Util.VectorOfKeyPoint kpts = surf.DetectKeyPointsRaw(gray, null))
using (Util.VectorOfKeyPoint kpts = new VectorOfKeyPoint() )
@ -377,7 +403,7 @@ namespace Emgu.CV.Test
{
//Trace.WriteLine("Size of MCvSURFParams: " + Marshal.SizeOf(typeof(MCvSURFParams)));
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
SURFDetector detector = new SURFDetector(400);
SURF detector = new SURF(400);
Stopwatch watch = Stopwatch.StartNew();
VectorOfKeyPoint vp1 = new VectorOfKeyPoint();
@ -439,7 +465,7 @@ namespace Emgu.CV.Test
public void TestGridAdaptedFeatureDetectorRepeatedRun()
{
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
SURFDetector surfdetector = new SURFDetector(400);
SURF surfdetector = new SURF(400);
GridAdaptedFeatureDetector detector = new GridAdaptedFeatureDetector(surfdetector, 1000, 2, 2);
VectorOfKeyPoint kpts1 = new VectorOfKeyPoint();
@ -454,7 +480,7 @@ namespace Emgu.CV.Test
public void TestSURFDetectorRepeatedRun()
{
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
SURFDetector detector = new SURFDetector(400);
SURF detector = new SURF(400);
Image<Gray, Byte> boxInScene = EmguAssert.LoadImage<Gray, byte>("box_in_scene.png");
ImageFeature<float>[] features1 = detector.DetectAndCompute(box, null);
Features2DTracker<float> tracker = new Features2DTracker<float>(features1);
@ -492,7 +518,7 @@ namespace Emgu.CV.Test
public void TestSelfMatch()
{
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
SURFDetector surfDetector = new SURFDetector(300);
SURF surfDetector = new SURF(300);
ImageFeature<float>[] features1 = surfDetector.DetectAndCompute(box, null);
Features2DTracker<float> tracker = new Features2DTracker<float>(features1);
HomographyMatrix m = tracker.Detect(features1, 0.8);
@ -535,7 +561,7 @@ namespace Emgu.CV.Test
public void TestBOWKmeansTrainer()
{
Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
SURFDetector detector = new SURFDetector(500);
SURF detector = new SURF(500);
VectorOfKeyPoint kpts = new VectorOfKeyPoint();
Mat descriptors = new Mat();
detector.DetectAndCompute(box, null, kpts, descriptors, false);

2
Emgu.CV.Test/AutoTestVarious.cs

@ -2129,7 +2129,7 @@ namespace Emgu.CV.Test
using (Image<Gray, Byte> gray = image.Convert<Gray, byte>())
using (RTreeClassifier<Bgr> classifier = new RTreeClassifier<Bgr>())
{
SURFDetector surf = new SURFDetector(300);
SURF surf = new SURF(300);
MKeyPoint[] keypoints = surf.Detect(gray, null);
Point[] points = Array.ConvertAll<MKeyPoint, Point>(keypoints, delegate(MKeyPoint kp) {
return Point.Round(kp.Point); });

4
Emgu.CV.Test/Class1.cs

@ -598,7 +598,7 @@ namespace Emgu.CV.Test
using (Capture capture = new Capture())
using (GaussianMotionFilter motionFilter = new GaussianMotionFilter())
//using (Features2D.FastDetector detector = new Features2D.FastDetector(10, true))
using (Features2D.SURFDetector detector = new Features2D.SURFDetector(500, false))
using (Features2D.SURF detector = new Features2D.SURF(500, false))
//using (Features2D.ORBDetector detector = new Features2D.ORBDetector(500))
using (OnePassStabilizer stabilizer = new OnePassStabilizer(capture))
{
@ -648,7 +648,7 @@ namespace Emgu.CV.Test
using (Capture capture = new Capture("tree.avi"))
using (GaussianMotionFilter motionFilter = new GaussianMotionFilter(15, -1.0f))
//using (Features2D.FastDetector detector = new Features2D.FastDetector(10, true))
//using (Features2D.SURFDetector detector = new Features2D.SURFDetector(500, false))
//using (Features2D.SURF detector = new Features2D.SURF(500, false))
//using (Features2D.ORBDetector detector = new Features2D.ORBDetector(500))
using (CaptureFrameSource frameSource = new CaptureFrameSource(capture))
using (TwoPassStabilizer stabilizer = new TwoPassStabilizer(frameSource))

2
Emgu.CV/Features2D/Feature2D.cs

@ -151,8 +151,6 @@ namespace Emgu.CV.Features2D
return Feature2DInvoke.CvFeature2DGetDescriptorSize(_feature2D);
}
}
}
internal partial class Feature2DInvoke

Loading…
Cancel
Save