Browse Source

updates to opencv 3.2

pull/19/head
Canming Huang 9 years ago
parent
commit
59d84bb91b
  1. 6
      Emgu.CV.Contrib/Aruco/GridBoard.cs
  2. 8
      Emgu.CV.Contrib/Dnn/Blob.cs
  3. 154
      Emgu.CV.Contrib/XPhoto/XPhotoInvoke.cs
  4. 56
      Emgu.CV.Extern/aruco/aruco_c.cpp
  5. 2
      Emgu.CV.Extern/aruco/aruco_c.h
  6. 4
      Emgu.CV.Extern/dnn/dnn_c.cpp
  7. 2
      Emgu.CV.Extern/dnn/dnn_c.h
  8. 44
      Emgu.CV.Extern/tesseract/libtesseract/CMakeLists.txt
  9. 45
      Emgu.CV.Extern/xphoto/xphoto_c.cpp
  10. 16
      Emgu.CV.Extern/xphoto/xphoto_c.h
  11. 15
      Emgu.CV/PInvoke/CvEnum.cs
  12. 2
      opencv
  13. 2
      opencv_extra

6
Emgu.CV.Contrib/Aruco/GridBoard.cs

@ -42,9 +42,9 @@ namespace Emgu.CV.Aruco
/// <param name="markerSeparation">separation between two markers (same unit than markerLenght)</param>
/// <param name="dictionary">dictionary of markers indicating the type of markers. The first markersX*markersY markers in the dictionary are used.</param>
public GridBoard(int markersX, int markersY, float markerLength, float markerSeparation,
Dictionary dictionary)
Dictionary dictionary, int firstMarker = 0)
{
_ptr = ArucoInvoke.cveArucoGridBoardCreate(markersX, markersY, markerLength, markerSeparation, dictionary, ref _boardPtr);
_ptr = ArucoInvoke.cveArucoGridBoardCreate(markersX, markersY, markerLength, markerSeparation, dictionary, firstMarker, ref _boardPtr);
}
/// <summary>
@ -138,7 +138,7 @@ namespace Emgu.CV.Aruco
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveArucoGridBoardCreate(
int markersX, int markersY, float markerLength, float markerSeparation,
IntPtr dictionary, ref IntPtr boardPtr);
IntPtr dictionary, int firstMarker, ref IntPtr boardPtr);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveArucoGridBoardRelease(ref IntPtr gridBoard);

8
Emgu.CV.Contrib/Dnn/Blob.cs

@ -29,11 +29,11 @@ namespace Emgu.CV.Dnn
/// Constucts 4-dimensional blob (so-called batch) from image or array of images.
/// </summary>
/// <param name="image">2-dimensional multi-channel or 3-dimensional single-channel image (or array of images)</param>
/// <param name="dstCn">specify size of second axis of ouptut blob</param>
public Blob(IInputArray image, int dstCn = -1)
public Blob(IInputArray image)
{
using (InputArray iaImage = image.GetInputArray())
_ptr = DnnInvoke.cveDnnBlobCreateFromInputArray(iaImage, dstCn);
_ptr = DnnInvoke.cveDnnBlobCreateFromInputArray(iaImage);
}
/// <summary>
@ -105,7 +105,7 @@ namespace Emgu.CV.Dnn
public static partial class DnnInvoke
{
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveDnnBlobCreateFromInputArray(IntPtr image, int dstCn);
internal static extern IntPtr cveDnnBlobCreateFromInputArray(IntPtr image);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveDnnBlobMatRef(IntPtr blob, IntPtr outMat);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]

154
Emgu.CV.Contrib/XPhoto/XPhotoInvoke.cs

@ -2,97 +2,93 @@
// Copyright (C) 2004-2016 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System;
using System.Runtime.InteropServices;
using System.Drawing;
using System.Runtime.CompilerServices;
using Emgu.Util;
namespace Emgu.CV.XPhoto
{
/// <summary>
/// Class that contains entry points for the XPhoto module.
/// </summary>
public static partial class XPhotoInvoke
{
static XPhotoInvoke()
{
CvInvoke.CheckLibraryLoaded();
}
public abstract class WhiteBalancer : UnmanagedObject
{
protected IntPtr _whiteBalancerPtr;
/// <summary>
/// The function implements different algorithm of automatic white balance, i.e. it tries to map image’s white color to perceptual white (this can be violated due to specific illumination or camera settings).
/// </summary>
/// <param name="src">The source.</param>
/// <param name="dst">The DST.</param>
/// <param name="algorithmType">Type of the algorithm to use. Use SIMPLE to perform smart histogram adjustments (ignoring 4% pixels with minimal and maximal values) for each channel.</param>
/// <param name="inputMin">Minimum value in the input image</param>
/// <param name="inputMax">Maximum value in the input image</param>
/// <param name="outputMin">Minimum value in the output image</param>
/// <param name="outputMax">Maximum value in the output image</param>
public static void BalanceWhite(Mat src, Mat dst, CvEnum.WhiteBalanceMethod algorithmType, float inputMin = 0f, float inputMax = 255f, float outputMin = 0f, float outputMax = 255f)
{
cveBalanceWhite(src, dst, algorithmType, inputMin, inputMax, outputMin, outputMax);
}
public void BalanceWhite(IInputArray src, IOutputArray dst)
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
XPhotoInvoke.cveWhiteBalancerBalanceWhite(_whiteBalancerPtr, iaSrc, oaDst);
}
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveBalanceWhite(IntPtr src, IntPtr dst, CvEnum.WhiteBalanceMethod algorithmType, float inputMin, float inputMax, float outputMin, float outputMax);
/// <summary>
/// Class that contains entry points for the XPhoto module.
/// </summary>
public static partial class XPhotoInvoke
{
static XPhotoInvoke()
{
CvInvoke.CheckLibraryLoaded();
}
/// <summary>
/// Implements a simple grayworld white balance algorithm.
/// The function autowbGrayworld scales the values of pixels based on a gray-world assumption which states that the average of all channels should result in a gray image.
/// This function adds a modification which thresholds pixels based on their saturation value and only uses pixels below the provided threshold in finding average pixel values.
/// </summary>
/// <param name="src">Input array.</param>
/// <param name="dst">Output array of the same size and type as src.</param>
/// <param name="thresh">Maximum saturation for a pixel to be included in the gray-world assumption.</param>
public static void AutowbGrayworld(IInputArray src, IOutputArray dst, float thresh = 0.5f)
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveAutowbGrayworld(iaSrc, oaDst, thresh);
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveWhiteBalancerBalanceWhite(IntPtr whiteBalancer, IntPtr src, IntPtr dst);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveAutowbGrayworld(IntPtr src, IntPtr dst, float thresh);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveSimpleWBCreate(ref IntPtr whiteBalancer);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveSimpleWBRelease(ref IntPtr whiteBalancer);
/// <summary>
/// The function implements simple dct-based denoising, link: http://www.ipol.im/pub/art/2011/ys-dct/.
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="sigma">Expected noise standard deviation</param>
/// <param name="psize">Size of block side where dct is computed</param>
public static void DctDenoising(Mat src, Mat dst, double sigma, int psize = 16)
{
cveDctDenoising(src, dst, sigma, psize);
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveGrayworldWBCreate(ref IntPtr whiteBalancer);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveGrayworldWBRelease(ref IntPtr whiteBalancer);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveDctDenoising(IntPtr src, IntPtr dst, double sigma, int psize);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern IntPtr cveLearningBasedWBCreate(ref IntPtr whiteBalancer);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
internal static extern void cveLearningBasedWBRelease(ref IntPtr whiteBalancer);
/// <summary>
/// Inpaint type
/// </summary>
public enum InpaintType
{
/// <summary>
/// Shift map
/// </summary>
Shiftmap = 0
}
/// <summary>
/// The function implements simple dct-based denoising, link: http://www.ipol.im/pub/art/2011/ys-dct/.
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="sigma">Expected noise standard deviation</param>
/// <param name="psize">Size of block side where dct is computed</param>
public static void DctDenoising(Mat src, Mat dst, double sigma, int psize = 16)
{
cveDctDenoising(src, dst, sigma, psize);
}
/// <summary>
/// The function implements different single-image inpainting algorithms
/// </summary>
/// <param name="src">source image, it could be of any type and any number of channels from 1 to 4. In case of 3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first color component shows intensity, while second and third shows colors. Nonetheless you can try any colorspaces.</param>
/// <param name="mask">mask (CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels indicate area to be inpainted</param>
/// <param name="dst">destination image</param>
/// <param name="algorithmType">algoritm type</param>
public static void Inpaint(Mat src, Mat mask, Mat dst, InpaintType algorithmType)
{
cveXInpaint(src, mask, dst, algorithmType);
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveDctDenoising(IntPtr src, IntPtr dst, double sigma, int psize);
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveXInpaint(IntPtr src, IntPtr mask, IntPtr dst, InpaintType algorithmType);
}
/// <summary>
/// Inpaint type
/// </summary>
public enum InpaintType
{
/// <summary>
/// Shift map
/// </summary>
Shiftmap = 0
}
/// <summary>
/// The function implements different single-image inpainting algorithms
/// </summary>
/// <param name="src">source image, it could be of any type and any number of channels from 1 to 4. In case of 3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first color component shows intensity, while second and third shows colors. Nonetheless you can try any colorspaces.</param>
/// <param name="mask">mask (CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels indicate area to be inpainted</param>
/// <param name="dst">destination image</param>
/// <param name="algorithmType">algorithm type</param>
public static void Inpaint(Mat src, Mat mask, Mat dst, InpaintType algorithmType)
{
cveXInpaint(src, mask, dst, algorithmType);
}
[DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
private static extern void cveXInpaint(IntPtr src, IntPtr mask, IntPtr dst, InpaintType algorithmType);
}
}

56
Emgu.CV.Extern/aruco/aruco_c.cpp

@ -8,12 +8,14 @@
cv::aruco::Dictionary const* cveArucoGetPredefinedDictionary(int name)
{
return &cv::aruco::getPredefinedDictionary(static_cast<cv::aruco::PREDEFINED_DICTIONARY_NAME>(name));
return cv::aruco::getPredefinedDictionary(static_cast<cv::aruco::PREDEFINED_DICTIONARY_NAME>(name)).get();
}
void cveArucoDrawMarker(cv::aruco::Dictionary* dictionary, int id, int sidePixels, cv::_OutputArray* img, int borderBits)
{
cv::aruco::drawMarker(*dictionary, id, sidePixels, *img, borderBits);
cv::Ptr<cv::aruco::Dictionary> arucoDict = cv::makePtr<cv::aruco::Dictionary>(dictionary);
arucoDict.addref();
cv::aruco::drawMarker(arucoDict, id, sidePixels, *img, borderBits);
}
void cveArucoDrawAxis(cv::_InputOutputArray* image, cv::_InputArray* cameraMatrix, cv::_InputArray* distCoeffs, cv::_InputArray* rvec, cv::_InputArray* tvec, float length)
@ -26,7 +28,11 @@ void cveArucoDetectMarkers(
cv::_OutputArray* ids, cv::aruco::DetectorParameters* parameters,
cv::_OutputArray* rejectedImgPoints)
{
cv::aruco::detectMarkers(*image, *dictionary, *corners, *ids, *parameters, rejectedImgPoints ? *rejectedImgPoints : (cv::OutputArrayOfArrays) cv::noArray());
cv::Ptr<cv::aruco::Dictionary> arucoDict = cv::makePtr<cv::aruco::Dictionary>(dictionary);
arucoDict.addref();
cv::Ptr<cv::aruco::DetectorParameters> arucoParam = parameters;
arucoParam.addref();
cv::aruco::detectMarkers(*image, arucoDict, *corners, *ids, arucoParam, rejectedImgPoints ? *rejectedImgPoints : (cv::OutputArrayOfArrays) cv::noArray());
}
void cveArucoEstimatePoseSingleMarkers(cv::_InputArray* corners, float markerLength,
@ -38,13 +44,15 @@ void cveArucoEstimatePoseSingleMarkers(cv::_InputArray* corners, float markerLen
cv::aruco::GridBoard* cveArucoGridBoardCreate(
int markersX, int markersY, float markerLength, float markerSeparation,
cv::aruco::Dictionary* dictionary, cv::aruco::Board** boardPtr)
cv::aruco::Dictionary* dictionary, int firstMarker, cv::aruco::Board** boardPtr)
{
cv::aruco::GridBoard gridBoard = cv::aruco::GridBoard::create(markersX, markersY, markerLength, markerSeparation, *dictionary);
cv::Ptr<cv::aruco::GridBoard> ptr = cv::makePtr<cv::aruco::GridBoard>(gridBoard);
ptr.addref();
*boardPtr = dynamic_cast<cv::aruco::Board*>(ptr.get());
return ptr.get();
cv::Ptr<cv::aruco::Dictionary> arucoDict = cv::makePtr<cv::aruco::Dictionary>(dictionary);
arucoDict.addref();
cv::Ptr<cv::aruco::GridBoard> ptr = cv::aruco::GridBoard::create(markersX, markersY, markerLength, markerSeparation, arucoDict, firstMarker);
ptr.addref();
*boardPtr = dynamic_cast<cv::aruco::Board*>(ptr.get());
return ptr.get();
}
void cveArucoGridBoardDraw(cv::aruco::GridBoard* gridBoard, CvSize* outSize, cv::_OutputArray* img, int marginSize, int borderBits)
@ -62,11 +70,13 @@ cv::aruco::CharucoBoard* cveCharucoBoardCreate(
int squaresX, int squaresY, float squareLength, float markerLength,
cv::aruco::Dictionary* dictionary, cv::aruco::Board** boardPtr)
{
cv::aruco::CharucoBoard charucoBoard = cv::aruco::CharucoBoard::create(squaresX, squaresY, squareLength, markerLength, *dictionary);
cv::Ptr<cv::aruco::CharucoBoard> ptr = cv::makePtr<cv::aruco::CharucoBoard>(charucoBoard);
ptr.addref();
*boardPtr = dynamic_cast<cv::aruco::Board*>(ptr.get());
return ptr.get();
cv::Ptr<cv::aruco::Dictionary> dictPtr = cv::makePtr<cv::aruco::Dictionary>( dictionary );
dictPtr.addref();
cv::Ptr<cv::aruco::CharucoBoard> ptr = cv::aruco::CharucoBoard::create(squaresX, squaresY, squareLength, markerLength, dictPtr);
ptr.addref();
*boardPtr = dynamic_cast<cv::aruco::Board*>(ptr.get());
return ptr.get();
}
void cveCharucoBoardDraw(cv::aruco::CharucoBoard* charucoBoard, CvSize* outSize, cv::_OutputArray* img, int marginSize, int borderBits)
{
@ -87,13 +97,22 @@ void cveArucoRefineDetectedMarkers(
float minRepDistance, float errorCorrectionRate, bool checkAllOrders,
cv::_OutputArray* recoveredIdxs, cv::aruco::DetectorParameters* parameters)
{
cv::Ptr<cv::aruco::Board> boardPtr = board;
boardPtr.addref();
cv::Ptr<cv::aruco::DetectorParameters> detectorParametersPtr = cv::aruco::DetectorParameters::create();
if (parameters)
{
detectorParametersPtr = parameters;
detectorParametersPtr.addref();
}
cv::aruco::refineDetectedMarkers(
*image, *board, *detectedCorners, *detectedIds, *rejectedCorners,
*image, boardPtr, *detectedCorners, *detectedIds, *rejectedCorners,
cameraMatrix ? *cameraMatrix : static_cast<cv::InputArray>(cv::noArray()),
distCoeffs ? *distCoeffs : static_cast<cv::InputArray>(cv::noArray()),
minRepDistance, errorCorrectionRate, checkAllOrders,
recoveredIdxs ? *recoveredIdxs : static_cast<cv::OutputArray>(cv::noArray()),
parameters ? *parameters : cv::aruco::DetectorParameters());
detectorParametersPtr);
}
void cveArucoDrawDetectedMarkers(
@ -109,7 +128,10 @@ double cveArucoCalibrateCameraAruco(
cv::_OutputArray* rvecs, cv::_OutputArray* tvecs, int flags,
CvTermCriteria* criteria)
{
return cv::aruco::calibrateCameraAruco(*corners, *ids, *counter, *board, *imageSize,
cv::Ptr<cv::aruco::Board> boardPtr = board;
boardPtr.addref();
return cv::aruco::calibrateCameraAruco(*corners, *ids, *counter, boardPtr, *imageSize,
*cameraMatrix, *distCoeffs, rvecs ? *rvecs : (cv::OutputArrayOfArrays) cv::noArray(),
tvecs ? *tvecs : (cv::OutputArrayOfArrays) cv::noArray(), flags, *criteria);
}

2
Emgu.CV.Extern/aruco/aruco_c.h

@ -30,7 +30,7 @@ CVAPI(void) cveArucoEstimatePoseSingleMarkers(cv::_InputArray* corners, float ma
CVAPI(cv::aruco::GridBoard*) cveArucoGridBoardCreate(
int markersX, int markersY, float markerLength, float markerSeparation,
cv::aruco::Dictionary* dictionary, cv::aruco::Board** boardPtr);
cv::aruco::Dictionary* dictionary, int firstMarker, cv::aruco::Board** boardPtr);
CVAPI(void) cveArucoGridBoardDraw(cv::aruco::GridBoard* gridBoard, CvSize* outSize, cv::_OutputArray* img, int marginSize, int borderBits);

4
Emgu.CV.Extern/dnn/dnn_c.cpp

@ -52,9 +52,9 @@ void cveDnnNetRelease(cv::dnn::Net** net)
}
cv::dnn::Blob* cveDnnBlobCreateFromInputArray(cv::_InputArray* image, int dstCn)
cv::dnn::Blob* cveDnnBlobCreateFromInputArray(cv::_InputArray* image)
{
return new cv::dnn::Blob(*image, dstCn);
return new cv::dnn::Blob(*image);
}
void cveDnnBlobMatRef(cv::dnn::Blob* blob, cv::Mat* outMat)
{

2
Emgu.CV.Extern/dnn/dnn_c.h

@ -23,7 +23,7 @@ CVAPI(cv::dnn::Blob*) cveDnnNetGetBlob(cv::dnn::Net* net, cv::String* outputName
CVAPI(void) cveDnnNetForward(cv::dnn::Net* net);
CVAPI(void) cveDnnNetRelease(cv::dnn::Net** net);
CVAPI(cv::dnn::Blob*) cveDnnBlobCreateFromInputArray(cv::_InputArray* image, int dstCn);
CVAPI(cv::dnn::Blob*) cveDnnBlobCreateFromInputArray(cv::_InputArray* image);
CVAPI(void) cveDnnBlobMatRef(cv::dnn::Blob* blob, cv::Mat* outMat);
CVAPI(void) cveDnnBlobRelease(cv::dnn::Blob** blob);
CVAPI(int) cveDnnBlobDims(cv::dnn::Blob* blob);

44
Emgu.CV.Extern/tesseract/libtesseract/CMakeLists.txt

@ -253,6 +253,46 @@ IF (NOT MSVC)
ADD_DEPENDENCIES(${PROJECT_NAME} tesseract_cutil tesseract_ccstruct tesseract_ccutil tesseract_viewer tesseract_opencl tesseract_dict tesseract_classify libleptonica tesseract_wordrec)
ENDIF()
#arch
PROJECT(tesseract_arch)
file(GLOB tesseract_arch_srcs "${TESSERACT_DIR}/arch/*.cpp")
file(GLOB tesseract_arch_hdrs "${TESSERACT_DIR}/arch/*.h")
source_group("Src" FILES ${tesseract_arch_srcs})
source_group("Include" FILES ${tesseract_arch_hdrs})
add_definitions(-DUSE_STD_NAMESPACE -DHAVE_LIBLEPT)
IF(MSVC)
LIST(APPEND tesseract_arch_srcs "${PROJECT_SOURCE_DIR}/${TESSERACT_DIR}/vs2010/port/gettimeofday.cpp")
add_definitions(-D__MSW32__ -W0)
include_directories("${TESSERACT_DIR}/vs2010/include" "${TESSERACT_DIR}/vs2010/port")
ENDIF()
add_library(${PROJECT_NAME} STATIC ${tesseract_arch_srcs} ${tesseract_arch_hdrs})
set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "tesseract")
IF(DEFINED EMGUCV_PLATFORM_TOOLSET)
set_target_properties(${PROJECT_NAME} PROPERTIES PLATFORM_TOOLSET ${EMGUCV_PLATFORM_TOOLSET})
ENDIF()
#lstm
PROJECT(tesseract_lstm)
file(GLOB tesseract_lstm_srcs "${TESSERACT_DIR}/lstm/*.cpp")
file(GLOB tesseract_lstm_hdrs "${TESSERACT_DIR}/lstm/*.h")
source_group("Src" FILES ${tesseract_lstm_srcs})
source_group("Include" FILES ${tesseract_lstm_hdrs})
add_definitions(-DUSE_STD_NAMESPACE -DHAVE_LIBLEPT)
IF(MSVC)
LIST(APPEND tesseract_lstm_srcs "${PROJECT_SOURCE_DIR}/${TESSERACT_DIR}/vs2010/port/gettimeofday.cpp")
add_definitions(-D__MSW32__ -W0)
include_directories("${TESSERACT_DIR}/vs2010/include" "${TESSERACT_DIR}/vs2010/port")
ENDIF()
include_directories("${TESSERACT_DIR}/arch")
add_library(${PROJECT_NAME} STATIC ${tesseract_lstm_srcs} ${tesseract_lstm_hdrs})
set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "tesseract")
IF(DEFINED EMGUCV_PLATFORM_TOOLSET)
set_target_properties(${PROJECT_NAME} PROPERTIES PLATFORM_TOOLSET ${EMGUCV_PLATFORM_TOOLSET})
ENDIF()
IF (NOT MSVC)
ADD_DEPENDENCIES(${PROJECT_NAME} tesseract_arch)
ENDIF()
#cube
PROJECT(tesseract_cube)
file(GLOB tesseract_cube_srcs "${TESSERACT_DIR}/cube/*.cpp")
@ -286,14 +326,14 @@ IF(MSVC)
add_definitions(-D__MSW32__ -W0)
include_directories("${TESSERACT_DIR}/vs2010/include" "${TESSERACT_DIR}/vs2010/port")
ENDIF()
include_directories("${TESSERACT_DIR}/cutil" "${TESSERACT_DIR}/ccstruct" "${TESSERACT_DIR}/ccutil" "${TESSERACT_DIR}/viewer" "${TESSERACT_DIR}/opencl" "${TESSERACT_DIR}/dict" "${TESSERACT_DIR}/classify" "${LEPTONICA_DIR}" "${TESSERACT_DIR}/neural_networks/runtime" "${TESSERACT_DIR}/wordrec" "${TESSERACT_DIR}/textord" "${TESSERACT_DIR}/cube")
include_directories("${TESSERACT_DIR}/cutil" "${TESSERACT_DIR}/ccstruct" "${TESSERACT_DIR}/ccutil" "${TESSERACT_DIR}/viewer" "${TESSERACT_DIR}/opencl" "${TESSERACT_DIR}/dict" "${TESSERACT_DIR}/classify" "${LEPTONICA_DIR}" "${TESSERACT_DIR}/neural_networks/runtime" "${TESSERACT_DIR}/wordrec" "${TESSERACT_DIR}/textord" "${TESSERACT_DIR}/cube" "${TESSERACT_DIR}/lstm")
add_library(${PROJECT_NAME} STATIC ${tesseract_ccmain_srcs} ${tesseract_ccmain_hdrs})
set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "tesseract")
IF(DEFINED EMGUCV_PLATFORM_TOOLSET)
set_target_properties(${PROJECT_NAME} PROPERTIES PLATFORM_TOOLSET ${EMGUCV_PLATFORM_TOOLSET})
ENDIF()
IF (NOT MSVC)
ADD_DEPENDENCIES(${PROJECT_NAME} tesseract_cutil tesseract_ccstruct tesseract_ccutil tesseract_viewer tesseract_opencl tesseract_dict tesseract_classify libleptonica tesseract_neural_networks tesseract_wordrec tesseract_textord tesseract_cube)
ADD_DEPENDENCIES(${PROJECT_NAME} tesseract_cutil tesseract_ccstruct tesseract_ccutil tesseract_viewer tesseract_opencl tesseract_dict tesseract_classify libleptonica tesseract_neural_networks tesseract_wordrec tesseract_textord tesseract_cube tesseract_lstm)
ENDIF()
#api

45
Emgu.CV.Extern/xphoto/xphoto_c.cpp

@ -6,16 +6,49 @@
#include "xphoto_c.h"
void cveBalanceWhite(const cv::Mat* src, cv::Mat* dst, const int algorithmType,
const float inputMin, const float inputMax,
const float outputMin, const float outputMax)
void cveWhiteBalancerBalanceWhite(cv::xphoto::WhiteBalancer* whiteBalancer, cv::_InputArray* src, cv::_OutputArray* dst)
{
cv::xphoto::balanceWhite(*src, *dst, algorithmType, inputMin, inputMax, outputMin, outputMax);
whiteBalancer->balanceWhite(*src, *dst);
}
void cveAutowbGrayworld(cv::_InputArray* src, cv::_OutputArray* dst, float thresh)
cv::xphoto::SimpleWB* cveSimpleWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer)
{
cv::xphoto::autowbGrayworld(*src, *dst, thresh);
cv::Ptr<cv::xphoto::SimpleWB> ptr = cv::xphoto::createSimpleWB();
ptr.addref();
*whiteBalancer = dynamic_cast<cv::xphoto::WhiteBalancer*>(ptr.get());
return ptr.get();
}
void cveSimpleWBRelease(cv::xphoto::SimpleWB** whiteBalancer)
{
delete *whiteBalancer;
*whiteBalancer = 0;
}
cv::xphoto::GrayworldWB* cveGrayworldWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer)
{
cv::Ptr<cv::xphoto::GrayworldWB> ptr = cv::xphoto::createGrayworldWB();
ptr.addref();
*whiteBalancer = dynamic_cast<cv::xphoto::WhiteBalancer*>(ptr.get());
return ptr.get();
}
void cveGrayworldWBRelease(cv::xphoto::GrayworldWB** whiteBalancer)
{
delete *whiteBalancer;
*whiteBalancer = 0;
}
cv::xphoto::LearningBasedWB* cveLearningBasedWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer)
{
cv::Ptr<cv::xphoto::LearningBasedWB> ptr = cv::xphoto::createLearningBasedWB();
ptr.addref();
*whiteBalancer = dynamic_cast<cv::xphoto::WhiteBalancer*>(ptr.get());
return ptr.get();
}
void cveLearningBasedWBRelease(cv::xphoto::LearningBasedWB** whiteBalancer)
{
delete *whiteBalancer;
*whiteBalancer = 0;
}
void cveDctDenoising(const cv::Mat* src, cv::Mat* dst, const double sigma, const int psize)

16
Emgu.CV.Extern/xphoto/xphoto_c.h

@ -8,15 +8,21 @@
#ifndef EMGU_XPHOTO_C_H
#define EMGU_XPHOTO_C_H
//#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/xphoto.hpp"
CVAPI(void) cveBalanceWhite(const cv::Mat* src, cv::Mat* dst, const int algorithmType,
const float inputMin, const float inputMax,
const float outputMin, const float outputMax);
CVAPI(void) cveWhiteBalancerBalanceWhite(cv::xphoto::WhiteBalancer* whiteBalancer, cv::_InputArray* src, cv::_OutputArray* dst);
CVAPI(cv::xphoto::SimpleWB*) cveSimpleWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer);
CVAPI(void) cveSimpleWBRelease(cv::xphoto::SimpleWB** whiteBalancer);
CVAPI(cv::xphoto::GrayworldWB*) cveGrayworldWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer);
CVAPI(void) cveGrayworldWBRelease(cv::xphoto::GrayworldWB** whiteBalancer);
CVAPI(cv::xphoto::LearningBasedWB*) cveLearningBasedWBCreate(cv::xphoto::WhiteBalancer** whiteBalancer);
CVAPI(void) cveLearningBasedWBRelease(cv::xphoto::LearningBasedWB** whiteBalancer);
CVAPI(void) cveAutowbGrayworld(cv::_InputArray* src, cv::_OutputArray* dst, float thresh);
CVAPI(void) cveDctDenoising(const cv::Mat* src, cv::Mat* dst, const double sigma, const int psize);

15
Emgu.CV/PInvoke/CvEnum.cs

@ -3764,21 +3764,6 @@ namespace Emgu.CV.CvEnum
UPnP = 4
}
/// <summary>
/// White balance algorithms
/// </summary>
public enum WhiteBalanceMethod
{
/// <summary>
/// Simple
/// </summary>
Simple = 0,
/// <summary>
/// Grayworld
/// </summary>
Grayworld = 1
}
/// <summary>
/// Seamless clone method
/// </summary>

2
opencv

@ -1 +1 @@
Subproject commit 6334b9dd1d37ddf8cd3a32ade92c9085e31a6c5b
Subproject commit 9834041d9ac91260a569d34b43c42c51054a9f8b

2
opencv_extra

@ -1 +1 @@
Subproject commit 8f3ed4a204dd4c55a2c3f4c73b13f26cbe24f317
Subproject commit c41d35c0be5feeb884e7da57c201461cb9877863
Loading…
Cancel
Save