Browse Source

Prepend PInvoke functions with "cve"

pull/768/merge
Canming Huang 1 year ago
parent
commit
04a6fcbfec
  1. 2
      Emgu.CV.Extern/calib3d/calib3d_c.h
  2. 18
      Emgu.CV.Extern/dataLogger.cpp
  3. 8
      Emgu.CV.Extern/dataLogger.h
  4. 20
      Emgu.CV.Extern/features2d/features2d_c.h
  5. 20
      Emgu.CV.Extern/features2d/keypointDetectors.cpp
  6. 8
      Emgu.CV.Extern/ml/ml.cpp
  7. 6
      Emgu.CV.Extern/ml/ml_c.h
  8. 2
      Emgu.CV.Extern/video/video_c.cpp
  9. 20
      Emgu.CV/Features2D/Feature2D.cs
  10. 24
      Emgu.CV/Features2D/Features2DToolbox.cs
  11. 16
      Emgu.CV/Ml/MlInvoke.cs
  12. 6
      Emgu.CV/Ml/StatModel.cs
  13. 8
      Emgu.CV/Util/CvInvokeUtil.cs
  14. 8
      Emgu.CV/Util/DataLogger.cs

2
Emgu.CV.Extern/calib3d/calib3d_c.h

@ -45,7 +45,7 @@ CVAPI(void) cveStereoMatcherCompute(cv::StereoMatcher* disparitySolver, cv::_In
CVAPI(void) cveStereoMatcherRelease(cv::Ptr<cv::StereoMatcher>** sharedPtr);
//2D Tracker
CVAPI(bool) getHomographyMatrixFromMatchedFeatures(std::vector<cv::KeyPoint>* model, std::vector<cv::KeyPoint>* observed, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double randsacThreshold, cv::Mat* homography);
CVAPI(bool) cveGetHomographyMatrixFromMatchedFeatures(std::vector<cv::KeyPoint>* model, std::vector<cv::KeyPoint>* observed, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double randsacThreshold, cv::Mat* homography);
//Find circles grid
CVAPI(bool) cveFindCirclesGrid(cv::_InputArray* image, CvSize* patternSize, cv::_OutputArray* centers, int flags, cv::Feature2D* blobDetector);

18
Emgu.CV.Extern/dataLogger.cpp

@ -8,16 +8,26 @@
using namespace emgu;
DataLogger* DataLoggerCreate(int logLevel, int loggerId) { return new DataLogger(logLevel, loggerId); }
DataLogger* cveDataLoggerCreate(int logLevel, int loggerId)
{
return new DataLogger(logLevel, loggerId);
}
void DataLoggerRelease(DataLogger** logger) { if (*logger) { delete *logger; *logger = 0; } }
void cveDataLoggerRelease(DataLogger** logger)
{
if (*logger)
{
delete *logger;
*logger = 0;
}
}
void DataLoggerRegisterCallback(DataLogger* logger, DataCallback dataCallback )
void cveDataLoggerRegisterCallback(DataLogger* logger, DataCallback dataCallback )
{
logger->callback = dataCallback;
}
void DataLoggerLog(DataLogger* logger, void* data, int logLevel)
void cveDataLoggerLog(DataLogger* logger, void* data, int logLevel)
{
logger->log(data, logLevel);
}

8
Emgu.CV.Extern/dataLogger.h

@ -62,12 +62,12 @@ namespace emgu {
};
/* DataLogger */
CVAPI(emgu::DataLogger*) DataLoggerCreate(int logLevel, int loggerId);
CVAPI(emgu::DataLogger*) cveDataLoggerCreate(int logLevel, int loggerId);
CVAPI(void) DataLoggerRelease(emgu::DataLogger** logger);
CVAPI(void) cveDataLoggerRelease(emgu::DataLogger** logger);
CVAPI(void) DataLoggerRegisterCallback(emgu::DataLogger* logger, emgu::DataCallback messageCallback );
CVAPI(void) cveDataLoggerRegisterCallback(emgu::DataLogger* logger, emgu::DataCallback messageCallback );
CVAPI(void) DataLoggerLog(emgu::DataLogger* logger, void* data, int logLevel);
CVAPI(void) cveDataLoggerLog(emgu::DataLogger* logger, void* data, int logLevel);
#endif

20
Emgu.CV.Extern/features2d/features2d_c.h

@ -94,7 +94,7 @@ CVAPI(cv::SimpleBlobDetector::Params*) cveSimpleBlobDetectorParamsCreate();
CVAPI(void) cveSimpleBlobDetectorParamsRelease(cv::SimpleBlobDetector::Params** params);
// Draw keypoints.
CVAPI(void) drawKeypoints(
CVAPI(void) cveDrawKeypoints(
cv::_InputArray* image,
const std::vector<cv::KeyPoint>* keypoints,
cv::_InputOutputArray* outImage,
@ -102,7 +102,7 @@ CVAPI(void) drawKeypoints(
int flags);
// Draws matches of keypoints from two images on output image.
CVAPI(void) drawMatchedFeatures1(
CVAPI(void) cveDrawMatchedFeatures1(
cv::_InputArray* img1,
const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2,
@ -114,7 +114,7 @@ CVAPI(void) drawMatchedFeatures1(
std::vector< unsigned char >* matchesMask,
int flags);
CVAPI(void) drawMatchedFeatures2(
CVAPI(void) cveDrawMatchedFeatures2(
cv::_InputArray* img1,
const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2,
@ -126,7 +126,7 @@ CVAPI(void) drawMatchedFeatures2(
std::vector< std::vector< unsigned char > >* matchesMask,
int flags);
CVAPI(void) drawMatchedFeatures3(
CVAPI(void) cveDrawMatchedFeatures3(
cv::_InputArray* img1, const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2, const std::vector<cv::KeyPoint>* keypoints2,
std::vector< std::vector< cv::DMatch > >* matches,
@ -199,14 +199,14 @@ CVAPI(cv::FlannBasedMatcher*) cveFlannBasedMatcherCreate(cv::flann::IndexParams*
CVAPI(void) cveFlannBasedMatcherRelease(cv::FlannBasedMatcher** matcher);
//2D Tracker
CVAPI(int) voteForSizeAndOrientation(std::vector<cv::KeyPoint>* modelKeyPoints, std::vector<cv::KeyPoint>* observedKeyPoints, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double scaleIncrement, int rotationBins);
CVAPI(int) cveVoteForSizeAndOrientation(std::vector<cv::KeyPoint>* modelKeyPoints, std::vector<cv::KeyPoint>* observedKeyPoints, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double scaleIncrement, int rotationBins);
//Feature2D
CVAPI(void) CvFeature2DDetectAndCompute(cv::Feature2D* feature2D, cv::_InputArray* image, cv::_InputArray* mask, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors, bool useProvidedKeyPoints);
CVAPI(void) CvFeature2DDetect(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_InputArray* mask);
CVAPI(void) CvFeature2DCompute(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors);
CVAPI(int) CvFeature2DGetDescriptorSize(cv::Feature2D* feature2D);
CVAPI(cv::Algorithm*) CvFeature2DGetAlgorithm(cv::Feature2D* feature2D);
CVAPI(void) cveFeature2DDetectAndCompute(cv::Feature2D* feature2D, cv::_InputArray* image, cv::_InputArray* mask, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors, bool useProvidedKeyPoints);
CVAPI(void) cveFeature2DDetect(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_InputArray* mask);
CVAPI(void) cveFeature2DCompute(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors);
CVAPI(int) cveFeature2DGetDescriptorSize(cv::Feature2D* feature2D);
CVAPI(cv::Algorithm*) cveFeature2DGetAlgorithm(cv::Feature2D* feature2D);
//BowKMeansTrainer
CVAPI(cv::BOWKMeansTrainer*) cveBOWKMeansTrainerCreate(int clusterCount, const CvTermCriteria* termcrit, int attempts, int flags);

20
Emgu.CV.Extern/features2d/keypointDetectors.cpp

@ -183,7 +183,7 @@ void cveSimpleBlobDetectorParamsRelease(cv::SimpleBlobDetector::Params** params)
}
// Draw keypoints.
void drawKeypoints(
void cveDrawKeypoints(
cv::_InputArray* image,
const std::vector<cv::KeyPoint>* keypoints,
cv::_InputOutputArray* outImage,
@ -198,7 +198,7 @@ void drawKeypoints(
}
// Draws matches of keypoints from two images on output image.
void drawMatchedFeatures1(
void cveDrawMatchedFeatures1(
cv::_InputArray* img1,
const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2,
@ -232,7 +232,7 @@ void drawMatchedFeatures1(
#endif
}
void drawMatchedFeatures2(
void cveDrawMatchedFeatures2(
cv::_InputArray* img1,
const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2,
@ -273,7 +273,7 @@ void drawMatchedFeatures2(
#endif
}
void drawMatchedFeatures3(
void cveDrawMatchedFeatures3(
cv::_InputArray* img1, const std::vector<cv::KeyPoint>* keypoints1,
cv::_InputArray* img2, const std::vector<cv::KeyPoint>* keypoints2,
std::vector< std::vector< cv::DMatch > >* matches,
@ -497,7 +497,7 @@ void cveFlannBasedMatcherRelease(cv::FlannBasedMatcher** matcher)
}
//2D tracker
int voteForSizeAndOrientation(std::vector<cv::KeyPoint>* modelKeyPoints, std::vector<cv::KeyPoint>* observedKeyPoints, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double scaleIncrement, int rotationBins)
int cveVoteForSizeAndOrientation(std::vector<cv::KeyPoint>* modelKeyPoints, std::vector<cv::KeyPoint>* observedKeyPoints, std::vector< std::vector< cv::DMatch > >* matches, cv::Mat* mask, double scaleIncrement, int rotationBins)
{
#ifdef HAVE_OPENCV_FEATURES2D
CV_Assert(!modelKeyPoints->empty());
@ -572,7 +572,7 @@ int voteForSizeAndOrientation(std::vector<cv::KeyPoint>* modelKeyPoints, std::ve
}
//Feature2D
void CvFeature2DDetectAndCompute(cv::Feature2D* feature2D, cv::_InputArray* image, cv::_InputArray* mask, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors, bool useProvidedKeyPoints)
void cveFeature2DDetectAndCompute(cv::Feature2D* feature2D, cv::_InputArray* image, cv::_InputArray* mask, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors, bool useProvidedKeyPoints)
{
#ifdef HAVE_OPENCV_FEATURES2D
feature2D->detectAndCompute(*image, mask ? *mask : (cv::InputArray) cv::noArray(), *keypoints, *descriptors, useProvidedKeyPoints);
@ -580,7 +580,7 @@ void CvFeature2DDetectAndCompute(cv::Feature2D* feature2D, cv::_InputArray* imag
throw_no_features2d();
#endif
}
void CvFeature2DDetect(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_InputArray* mask)
void cveFeature2DDetect(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_InputArray* mask)
{
#ifdef HAVE_OPENCV_FEATURES2D
feature2D->detect(*image, *keypoints, mask ? *mask : (cv::InputArray) cv::noArray());
@ -588,7 +588,7 @@ void CvFeature2DDetect(cv::Feature2D* feature2D, cv::_InputArray* image, std::ve
throw_no_features2d();
#endif
}
void CvFeature2DCompute(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors)
void cveFeature2DCompute(cv::Feature2D* feature2D, cv::_InputArray* image, std::vector<cv::KeyPoint>* keypoints, cv::_OutputArray* descriptors)
{
#ifdef HAVE_OPENCV_FEATURES2D
feature2D->compute(*image, *keypoints, *descriptors);
@ -596,7 +596,7 @@ void CvFeature2DCompute(cv::Feature2D* feature2D, cv::_InputArray* image, std::v
throw_no_features2d();
#endif
}
int CvFeature2DGetDescriptorSize(cv::Feature2D* feature2D)
int cveFeature2DGetDescriptorSize(cv::Feature2D* feature2D)
{
#ifdef HAVE_OPENCV_FEATURES2D
return feature2D->descriptorSize();
@ -604,7 +604,7 @@ int CvFeature2DGetDescriptorSize(cv::Feature2D* feature2D)
throw_no_features2d();
#endif
}
cv::Algorithm* CvFeature2DGetAlgorithm(cv::Feature2D* feature2D)
cv::Algorithm* cveFeature2DGetAlgorithm(cv::Feature2D* feature2D)
{
#ifdef HAVE_OPENCV_FEATURES2D
return dynamic_cast<cv::Algorithm*>(feature2D);

8
Emgu.CV.Extern/ml/ml.cpp

@ -7,7 +7,7 @@
#include "ml_c.h"
bool StatModelTrain(cv::ml::StatModel* model, cv::_InputArray* samples, int layout, cv::_InputArray* responses)
bool cveStatModelTrain(cv::ml::StatModel* model, cv::_InputArray* samples, int layout, cv::_InputArray* responses)
{
#ifdef HAVE_OPENCV_ML
return model->train(*samples, layout, *responses);
@ -15,7 +15,7 @@ bool StatModelTrain(cv::ml::StatModel* model, cv::_InputArray* samples, int layo
throw_no_ml();
#endif
}
bool StatModelTrainWithData(cv::ml::StatModel* model, cv::ml::TrainData* data, int flags)
bool cveStatModelTrainWithData(cv::ml::StatModel* model, cv::ml::TrainData* data, int flags)
{
#ifdef HAVE_OPENCV_ML
cv::Ptr<cv::ml::TrainData> p(data, [](cv::ml::TrainData*) {});
@ -24,10 +24,10 @@ bool StatModelTrainWithData(cv::ml::StatModel* model, cv::ml::TrainData* data, i
throw_no_ml();
#endif
}
float StatModelPredict(cv::ml::StatModel* model, cv::_InputArray* samples, cv::_OutputArray* results, int flags)
float cveStatModelPredict(cv::ml::StatModel* model, cv::_InputArray* samples, cv::_OutputArray* results, int flags)
{
#ifdef HAVE_OPENCV_ML
return model->predict(*samples, results ? *results : (cv::OutputArray) cv::noArray(), flags);
return model->predict(*samples, results ? *results : static_cast<cv::OutputArray>(cv::noArray()), flags);
#else
throw_no_ml();
#endif

6
Emgu.CV.Extern/ml/ml_c.h

@ -41,9 +41,9 @@ namespace cv {
//StatModel
CVAPI(bool) StatModelTrain(cv::ml::StatModel* model, cv::_InputArray* samples, int layout, cv::_InputArray* responses );
CVAPI(bool) StatModelTrainWithData(cv::ml::StatModel* model, cv::ml::TrainData* data, int flags);
CVAPI(float) StatModelPredict(cv::ml::StatModel* model, cv::_InputArray* samples, cv::_OutputArray* results, int flags);
CVAPI(bool) cveStatModelTrain(cv::ml::StatModel* model, cv::_InputArray* samples, int layout, cv::_InputArray* responses );
CVAPI(bool) cveStatModelTrainWithData(cv::ml::StatModel* model, cv::ml::TrainData* data, int flags);
CVAPI(float) cveStatModelPredict(cv::ml::StatModel* model, cv::_InputArray* samples, cv::_OutputArray* results, int flags);
CVAPI(cv::ml::TrainData*) cveTrainDataCreate(
cv::_InputArray* samples, int layout, cv::_InputArray* responses,

2
Emgu.CV.Extern/video/video_c.cpp

@ -265,7 +265,7 @@ double cveFindTransformECC(
*templateImage, *inputImage,
*warpMatrix, motionType,
*criteria,
inputMask ? *inputMask : (cv::InputArray) cv::noArray());
inputMask ? *inputMask : static_cast<cv::InputArray>(cv::noArray()));
#else
throw_no_video();
#endif

20
Emgu.CV/Features2D/Feature2D.cs

@ -48,7 +48,7 @@ namespace Emgu.CV.Features2D
if (_feature2D == IntPtr.Zero)
return IntPtr.Zero;
return Features2DInvoke.CvFeature2DGetAlgorithm(_feature2D);
return Features2DInvoke.cveFeature2DGetAlgorithm(_feature2D);
}
}
@ -65,7 +65,7 @@ namespace Emgu.CV.Features2D
using (InputArray iaImage = image.GetInputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
using (OutputArray oaDescriptors = descriptors.GetOutputArray())
Features2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
Features2DInvoke.cveFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
}
/// <summary>
@ -88,7 +88,7 @@ namespace Emgu.CV.Features2D
{
using (InputArray iaImage = image.GetInputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
Features2DInvoke.CvFeature2DDetect(_feature2D, iaImage, keypoints.Ptr, iaMask);
Features2DInvoke.cveFeature2DDetect(_feature2D, iaImage, keypoints.Ptr, iaMask);
}
/// <summary>
@ -116,7 +116,7 @@ namespace Emgu.CV.Features2D
{
using (InputArray iaImage = image.GetInputArray())
using (OutputArray oaDescriptors = descriptors.GetOutputArray())
Features2DInvoke.CvFeature2DCompute(_feature2D, iaImage, keyPoints.Ptr, oaDescriptors);
Features2DInvoke.cveFeature2DCompute(_feature2D, iaImage, keyPoints.Ptr, oaDescriptors);
}
/// <summary>
@ -129,7 +129,7 @@ namespace Emgu.CV.Features2D
{
if (_feature2D == IntPtr.Zero)
return 0;
return Features2DInvoke.CvFeature2DGetDescriptorSize(_feature2D);
return Features2DInvoke.cveFeature2DGetDescriptorSize(_feature2D);
}
}
}
@ -138,10 +138,10 @@ namespace Emgu.CV.Features2D
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr CvFeature2DGetAlgorithm(IntPtr detector);
internal static extern IntPtr cveFeature2DGetAlgorithm(IntPtr detector);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void CvFeature2DDetectAndCompute(
internal static extern void cveFeature2DDetectAndCompute(
IntPtr feature2D,
IntPtr image,
IntPtr mask,
@ -151,17 +151,17 @@ namespace Emgu.CV.Features2D
bool useProvidedKeyPoints);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void CvFeature2DDetect(
internal static extern void cveFeature2DDetect(
IntPtr detector,
IntPtr image,
IntPtr keypoints,
IntPtr mask);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void CvFeature2DCompute(IntPtr extractor, IntPtr image, IntPtr keypoints, IntPtr descriptors);
internal static extern void cveFeature2DCompute(IntPtr extractor, IntPtr image, IntPtr keypoints, IntPtr descriptors);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern int CvFeature2DGetDescriptorSize(IntPtr extractor);
internal static extern int cveFeature2DGetDescriptorSize(IntPtr extractor);
}
}

24
Emgu.CV/Features2D/Features2DToolbox.cs

@ -38,7 +38,7 @@ namespace Emgu.CV.Features2D
MCvScalar c = color.MCvScalar;
using (InputArray iaImage = image.GetInputArray())
using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
Features2DInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
Features2DInvoke.cveDrawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
}
/// <summary>
@ -69,7 +69,7 @@ namespace Emgu.CV.Features2D
using (InputArray iaModelImage = modelImage.GetInputArray())
using (InputArray iaObservedImage = observedImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
Features2DInvoke.drawMatchedFeatures2(
Features2DInvoke.cveDrawMatchedFeatures2(
iaObservedImage,
observedKeyPoints,
iaModelImage,
@ -110,7 +110,7 @@ namespace Emgu.CV.Features2D
using (InputArray iaModelImage = modelImage.GetInputArray())
using (InputArray iaObservedImage = observedImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
Features2DInvoke.drawMatchedFeatures1(iaObservedImage, observedKeyPoints, iaModelImage,
Features2DInvoke.cveDrawMatchedFeatures1(iaObservedImage, observedKeyPoints, iaModelImage,
modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, mask, flags);
}
@ -143,7 +143,7 @@ namespace Emgu.CV.Features2D
using (InputArray iaObservedImage = observedImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
Features2DInvoke.drawMatchedFeatures3(iaObservedImage, observedKeyPoints, iaModelImage,
Features2DInvoke.cveDrawMatchedFeatures3(iaObservedImage, observedKeyPoints, iaModelImage,
modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask, flags);
}
@ -189,7 +189,7 @@ namespace Emgu.CV.Features2D
double scaleIncrement,
int rotationBins)
{
return Features2DInvoke.voteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, scaleIncrement,
return Features2DInvoke.cveVoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, scaleIncrement,
rotationBins);
}
@ -217,7 +217,7 @@ namespace Emgu.CV.Features2D
double ransacReprojThreshold)
{
Mat homography = new Mat();
bool found = Features2DInvoke.getHomographyMatrixFromMatchedFeatures(model, observed, matches, mask,
bool found = Features2DInvoke.cveGetHomographyMatrixFromMatchedFeatures(model, observed, matches, mask,
ransacReprojThreshold, homography);
if (found)
{
@ -263,13 +263,13 @@ namespace Emgu.CV.Features2D
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
[return: MarshalAs(CvInvoke.BoolMarshalType)]
internal static extern bool getHomographyMatrixFromMatchedFeatures(IntPtr model, IntPtr observed, IntPtr indices, IntPtr mask, double ransacReprojThreshold, IntPtr homography);
internal static extern bool cveGetHomographyMatrixFromMatchedFeatures(IntPtr model, IntPtr observed, IntPtr indices, IntPtr mask, double ransacReprojThreshold, IntPtr homography);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern int voteForSizeAndOrientation(IntPtr modelKeyPoints, IntPtr observedKeyPoints, IntPtr indices, IntPtr mask, double scaleIncrement, int rotationBins);
internal static extern int cveVoteForSizeAndOrientation(IntPtr modelKeyPoints, IntPtr observedKeyPoints, IntPtr indices, IntPtr mask, double scaleIncrement, int rotationBins);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void drawMatchedFeatures1(
internal static extern void cveDrawMatchedFeatures1(
IntPtr img1,
IntPtr keypoints1,
IntPtr img2,
@ -282,7 +282,7 @@ namespace Emgu.CV.Features2D
Features2D.Features2DToolbox.KeypointDrawType flags);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void drawMatchedFeatures2(
internal static extern void cveDrawMatchedFeatures2(
IntPtr img1,
IntPtr keypoints1,
IntPtr img2,
@ -295,7 +295,7 @@ namespace Emgu.CV.Features2D
Features2D.Features2DToolbox.KeypointDrawType flags);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void drawMatchedFeatures3(
internal static extern void cveDrawMatchedFeatures3(
IntPtr img1,
IntPtr keypoints1,
IntPtr img2,
@ -308,7 +308,7 @@ namespace Emgu.CV.Features2D
Features2D.Features2DToolbox.KeypointDrawType flags);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void drawKeypoints(
internal static extern void cveDrawKeypoints(
IntPtr image,
IntPtr vectorOfKeypoints,
IntPtr outImage,

16
Emgu.CV/Ml/MlInvoke.cs

@ -25,14 +25,14 @@ namespace Emgu.CV.ML
#region CvStatModel
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
[return: MarshalAs(CvInvoke.BoolMarshalType)]
internal static extern bool StatModelTrain(IntPtr model, IntPtr samples, DataLayoutType layout, IntPtr responses);
internal static extern bool cveStatModelTrain(IntPtr model, IntPtr samples, DataLayoutType layout, IntPtr responses);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
[return: MarshalAs(CvInvoke.BoolMarshalType)]
internal static extern bool StatModelTrainWithData(IntPtr model, IntPtr data, int flags);
internal static extern bool cveStatModelTrainWithData(IntPtr model, IntPtr data, int flags);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern float StatModelPredict(IntPtr model, IntPtr samples, IntPtr results, int flags);
internal static extern float cveStatModelPredict(IntPtr model, IntPtr samples, IntPtr results, int flags);
#endregion
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
@ -45,7 +45,7 @@ namespace Emgu.CV.ML
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveTrainDataRelease(ref IntPtr sharedPtr);
#region CvNormalBayesClassifier
#region NormalBayesClassifier
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveNormalBayesClassifierDefaultCreate(ref IntPtr statModel, ref IntPtr algorithm, ref IntPtr sharedPtr);
@ -102,7 +102,7 @@ namespace Emgu.CV.ML
public static extern float CvNormalBayesClassifierPredict(IntPtr model, IntPtr samples, IntPtr results);*/
#endregion
#region CvKNearest
#region KNearest
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveKNearestCreate(
ref IntPtr statModel,
@ -123,7 +123,7 @@ namespace Emgu.CV.ML
#endregion
#region CvEM
#region EM
/*
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveEmParamsCreate(int nclusters, MlEnum.EmCovarianMatrixType covMatType, ref MCvTermCriteria termcrit);
@ -205,7 +205,7 @@ namespace Emgu.CV.ML
#endregion
#region CvSVM
#region SVM
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveSVMDefaultCreate(ref IntPtr statModel, ref IntPtr algorithm, ref IntPtr sharedPtr);
@ -431,7 +431,7 @@ namespace Emgu.CV.ML
#endregion
#region CvSVM
#region SVM
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveSVMSGDDefaultCreate(ref IntPtr statModel, ref IntPtr algorithm, ref IntPtr sharedPtr);

6
Emgu.CV/Ml/StatModel.cs

@ -38,7 +38,7 @@ namespace Emgu.CV.ML
using (InputArray iaSamples = samples.GetInputArray())
using (InputArray iaResponses = responses.GetInputArray())
{
return MlInvoke.StatModelTrain(model.StatModelPtr, iaSamples, layoutType, iaResponses);
return MlInvoke.cveStatModelTrain(model.StatModelPtr, iaSamples, layoutType, iaResponses);
}
}
@ -51,7 +51,7 @@ namespace Emgu.CV.ML
/// <returns>True if the training is successful.</returns>
public static bool Train(this IStatModel model, TrainData trainData, int flags = 0)
{
return MlInvoke.StatModelTrainWithData(model.StatModelPtr, trainData, flags);
return MlInvoke.cveStatModelTrainWithData(model.StatModelPtr, trainData, flags);
}
/// <summary>
@ -67,7 +67,7 @@ namespace Emgu.CV.ML
using (InputArray iaSamples = samples.GetInputArray())
using (OutputArray oaResults = results == null ? OutputArray.GetEmpty() : results.GetOutputArray())
{
return MlInvoke.StatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
return MlInvoke.cveStatModelPredict(model.StatModelPtr, iaSamples, oaResults, flags);
}
}

8
Emgu.CV/Util/CvInvokeUtil.cs

@ -13,18 +13,18 @@ namespace Emgu.CV
public static partial class CvInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr DataLoggerCreate(int logLevel, int loggerId);
internal static extern IntPtr cveDataLoggerCreate(int logLevel, int loggerId);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void DataLoggerRelease(ref IntPtr logger);
internal static extern void cveDataLoggerRelease(ref IntPtr logger);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void DataLoggerRegisterCallback(
internal static extern void cveDataLoggerRegisterCallback(
IntPtr logger,
Util.DataLoggerHelper.DataCallback messageCallback);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void DataLoggerLog(
internal static extern void cveDataLoggerLog(
IntPtr logger,
IntPtr data,
int logLevel);

8
Emgu.CV/Util/DataLogger.cs

@ -28,8 +28,8 @@ namespace Emgu.CV.Util
DataLoggerHelper.TotalLoggerCount++;
}
_ptr = CvInvoke.DataLoggerCreate(logLevel, _loggerId);
CvInvoke.DataLoggerRegisterCallback(_ptr, DataLoggerHelper.Handler);
_ptr = CvInvoke.cveDataLoggerCreate(logLevel, _loggerId);
CvInvoke.cveDataLoggerRegisterCallback(_ptr, DataLoggerHelper.Handler);
DataLoggerHelper.OnDataReceived += this.HelperDataHandler;
}
@ -53,7 +53,7 @@ namespace Emgu.CV.Util
/// <param name="logLevel">The logLevel. The Log function only logs when the <paramref name="logLevel"/> is greater or equals to the DataLogger's logLevel</param>
public void Log(IntPtr data, int logLevel)
{
CvInvoke.DataLoggerLog(_ptr, data, logLevel);
CvInvoke.cveDataLoggerLog(_ptr, data, logLevel);
}
/// <summary>
@ -62,7 +62,7 @@ namespace Emgu.CV.Util
protected override void DisposeObject()
{
if (_ptr != IntPtr.Zero)
CvInvoke.DataLoggerRelease(ref _ptr);
CvInvoke.cveDataLoggerRelease(ref _ptr);
DataLoggerHelper.OnDataReceived -= this.HelperDataHandler;
}
}

Loading…
Cancel
Save