Browse Source

Documentation update.

pull/481/head
Canming Huang 4 years ago
parent
commit
252e24dea8
  1. 33
      Emgu.CV.Contrib/XImgproc/EdgeDrawing.cs
  2. 1
      Emgu.CV.Cuda/CudaDeviceInfo.cs
  3. 7
      Emgu.CV.Cuda/Optflow/NvidiaOpticalFlow_1_0.cs
  4. 46
      Emgu.CV.Models/CascadeFaceAndEyeDetector.cs
  5. 1
      Emgu.CV.Models/Dnn/FaceAndLandmarkDetector.cs
  6. 2
      Emgu.CV.Models/Dnn/SceneTextDetector.cs
  7. 1
      Emgu.CV.Models/Dnn/VehicleLicensePlateDetector.cs
  8. 38
      Emgu.CV.Models/PedestrianDetector.cs
  9. 20
      Emgu.CV.Models/TesseractModel.cs
  10. 10
      Emgu.CV/PInvoke/CvInvokeCore.cs

33
Emgu.CV.Contrib/XImgproc/EdgeDrawing.cs

@ -13,7 +13,9 @@ using Emgu.CV.Util;
namespace Emgu.CV.XImgproc
{
/// <summary>
/// Class implementing the ED (EdgeDrawing)
/// </summary>
public class EdgeDrawing : SharedPtrObject, IAlgorithm
{
private IntPtr _algorithm;
@ -27,6 +29,9 @@ namespace Emgu.CV.XImgproc
}
/// <summary>
/// Create a new Edge Drawing object using default parameters.
/// </summary>
public EdgeDrawing()
{
_ptr = XImgprocInvoke.cveEdgeDrawingCreate(ref _algorithm, ref _sharedPtr);
@ -43,34 +48,54 @@ namespace Emgu.CV.XImgproc
}
}
/// <summary>
/// Detects edges and prepares them to detect lines and ellipses.
/// </summary>
/// <param name="src">Input image</param>
public void DetectEdges(IInputArray src)
{
using (InputArray iaSrc = src.GetInputArray())
XImgprocInvoke.cveEdgeDrawingDetectEdges(_ptr, iaSrc);
}
/// <summary>
/// Get the edge image
/// </summary>
/// <param name="dst">The output edge image</param>
public void GetEdgeImage(IOutputArray dst)
{
using (OutputArray oaDst = dst.GetOutputArray())
XImgprocInvoke.cveEdgeDrawingGetEdgeImage(_ptr, oaDst);
}
/// <summary>
/// Get the gradient image
/// </summary>
/// <param name="dst">The output gradient image</param>
public void GetGradientImage(IOutputArray dst)
{
using (OutputArray oaDst = dst.GetOutputArray())
XImgprocInvoke.cveEdgeDrawingGetGradientImage(_ptr, oaDst);
}
/// <summary>
/// Detects lines.
/// </summary>
/// <param name="dst">Output Vec&lt;4f&gt; contains start point and end point of detected lines.</param>
public void DetectLines(IOutputArray dst)
{
using (OutputArray oaDst = dst.GetOutputArray())
XImgprocInvoke.cveEdgeDrawingGetGradientImage(_ptr, oaDst);
XImgprocInvoke.cveEdgeDrawingDetectLines(_ptr, oaDst);
}
public void DetectDetectEllipses(IOutputArray dst)
/// <summary>
/// Detects circles and ellipses.
/// </summary>
/// <param name="dst">Output Vec&lt;6d&gt; contains center point and perimeter for circles.</param>
public void DetectEllipses(IOutputArray dst)
{
using (OutputArray oaDst = dst.GetOutputArray())
XImgprocInvoke.cveEdgeDrawingGetGradientImage(_ptr, oaDst);
XImgprocInvoke.cveEdgeDrawingDetectEllipses(_ptr, oaDst);
}
}

1
Emgu.CV.Cuda/CudaDeviceInfo.cs

@ -263,7 +263,6 @@ namespace Emgu.CV.Cuda
/// <summary>
/// Print short cuda device info
/// </summary>
/// <param name="device"></param>
/// <param name="device">cuda device id</param>
[DllImport(CvInvoke.ExternCudaLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint = "cudaPrintShortCudaDeviceInfo")]
public static extern void PrintShortCudaDeviceInfo(int device);

7
Emgu.CV.Cuda/Optflow/NvidiaOpticalFlow_1_0.cs

@ -82,6 +82,13 @@ namespace Emgu.CV.Cuda
ref _sharedPtr);
}
/// <summary>
/// The NVIDIA optical flow hardware generates flow vectors at granularity gridSize, which can be queried via function getGridSize(). Upsampler() helper function converts the hardware-generated flow vectors to dense representation (1 flow vector for each pixel) using nearest neighbour upsampling method.
/// </summary>
/// <param name="flow">Buffer of type CV_16FC2 containing flow vectors generated by calc().</param>
/// <param name="imageSize">Size of the input image in pixels for which these flow vectors were generated.</param>
/// <param name="gridSize">Granularity of the optical flow vectors returned by calc() function.</param>
/// <param name="upsampledFlow">Buffer of type CV_32FC2, containing upsampled flow vectors, each flow vector for 1 pixel, in the pitch-linear layout.</param>
public void UpSampler(
IInputArray flow,
Size imageSize,

46
Emgu.CV.Models/CascadeFaceAndEyeDetector.cs

@ -16,14 +16,24 @@ using Emgu.Util;
namespace Emgu.CV.Models
{
/// <summary>
/// Face and eye detector using HaarCascade.
/// </summary>
public class CascadeFaceAndEyeDetector : DisposableObject, IProcessAndRenderModel
{
private CascadeClassifier _faceCascadeClassifier = null;
private CascadeClassifier _eyeCascadeClassifier = null;
public static void Detect(
IInputArray image, CascadeClassifier face, CascadeClassifier eye,
List<Rectangle> faces, List<Rectangle> eyes)
/// <summary>
/// Detect faces and eyes region from the input image
/// </summary>
/// <param name="image">The input image.</param>
/// <param name="faces">The region of the faces.</param>
/// <param name="eyes">The region of the eyes.</param>
public void Detect(
IInputArray image,
List<Rectangle> faces,
List<Rectangle> eyes)
{
using (Mat gray = new Mat())
{
@ -35,7 +45,7 @@ namespace Emgu.CV.Models
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(
Rectangle[] facesDetected = _faceCascadeClassifier.DetectMultiScale(
gray,
1.1,
10,
@ -48,7 +58,7 @@ namespace Emgu.CV.Models
//Get the region of interest on the faces
using (Mat faceRegion = new Mat(gray, f))
{
Rectangle[] eyesDetected = eye.DetectMultiScale(
Rectangle[] eyesDetected = _eyeCascadeClassifier.DetectMultiScale(
faceRegion,
1.1,
10,
@ -63,7 +73,6 @@ namespace Emgu.CV.Models
}
}
}
}
@ -71,6 +80,7 @@ namespace Emgu.CV.Models
/// Download and initialize the face and eye cascade classifier detection model
/// </summary>
/// <param name="onDownloadProgressChanged">Call back method during download</param>
/// <param name="initOptions">Initialization options. None supported at the moment, any value passed will be ignored.</param>
/// <returns>Asyn task</returns>
public async Task Init(DownloadProgressChangedEventHandler onDownloadProgressChanged = null, Object initOptions = null)
{
@ -91,13 +101,19 @@ namespace Emgu.CV.Models
}
}
/// <summary>
/// Process the input image and render into the output image
/// </summary>
/// <param name="imageIn">The input image</param>
/// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
/// <returns>The messages that we want to display.</returns>
public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
{
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
Stopwatch watch = Stopwatch.StartNew();
Detect(imageIn, _faceCascadeClassifier, _eyeCascadeClassifier, faces, eyes);
Detect(imageIn, faces, eyes);
watch.Stop();
if (imageOut != imageIn)
@ -120,12 +136,10 @@ namespace Emgu.CV.Models
}
/// <summary>
/// Clear and reset the model. Required Init function to be called again before calling ProcessAndRender.
/// </summary>
public void Clear()
{
DisposeObject();
}
protected override void DisposeObject()
{
if (_faceCascadeClassifier != null)
{
@ -139,5 +153,13 @@ namespace Emgu.CV.Models
_eyeCascadeClassifier = null;
}
}
/// <summary>
/// Release the memory associated with this face and eye detector
/// </summary>
protected override void DisposeObject()
{
Clear();
}
}
}

1
Emgu.CV.Models/Dnn/FaceAndLandmarkDetector.cs

@ -77,6 +77,7 @@ namespace Emgu.CV.Models
/// Download and initialize the DNN face and facemark detector
/// </summary>
/// <param name="onDownloadProgressChanged">Callback when download progress has been changed</param>
/// <param name="initOptions">Initialization options. None supported at the moment, any value passed will be ignored.</param>
/// <returns>Async task</returns>
public async Task Init(
DownloadProgressChangedEventHandler onDownloadProgressChanged = null,

2
Emgu.CV.Models/Dnn/SceneTextDetector.cs

@ -45,6 +45,7 @@ namespace Emgu.CV.Models
/// Download and initialize the vehicle detector, the license plate detector and OCR.
/// </summary>
/// <param name="onDownloadProgressChanged">Callback when download progress has been changed</param>
/// <param name="initOptions">Initialization options. None supported at the moment, any value passed will be ignored.</param>
/// <returns>Async task</returns>
public async Task Init(
System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null,
@ -100,7 +101,6 @@ namespace Emgu.CV.Models
}
}
private async Task InitTextRecognizer(System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null)
{
if (_ocr == null)

1
Emgu.CV.Models/Dnn/VehicleLicensePlateDetector.cs

@ -224,6 +224,7 @@ namespace Emgu.CV.Models
/// Download and initialize the vehicle detector, the license plate detector and OCR.
/// </summary>
/// <param name="onDownloadProgressChanged">Callback when download progress has been changed</param>
/// <param name="initOptions">Initialization options. A string in the format of "backend;target" that represent the DNN backend and target.</param>
/// <returns>Async task</returns>
public async Task Init(
System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null,

38
Emgu.CV.Models/PedestrianDetector.cs

@ -17,6 +17,9 @@ using Emgu.Util;
namespace Emgu.CV.Models
{
/// <summary>
/// Pedestrian detector
/// </summary>
public class PedestrianDetector : DisposableObject, IProcessAndRenderModel
{
private CudaHOG _hogCuda;
@ -26,11 +29,6 @@ namespace Emgu.CV.Models
/// Clear and reset the model. Required Init function to be called again before calling ProcessAndRender.
/// </summary>
public void Clear()
{
DisposeObject();
}
protected override void DisposeObject()
{
if (_hog != null)
{
@ -45,12 +43,20 @@ namespace Emgu.CV.Models
}
}
/// <summary>
/// Release the memory associated with this pedestrian detector
/// </summary>
protected override void DisposeObject()
{
Clear();
}
/// <summary>
/// Find the pedestrian in the image
/// </summary>
/// <param name="image">The image</param>
/// <returns>The region where pedestrians are detected</returns>
public static Rectangle[] Find(IInputArray image, HOGDescriptor hog, CudaHOG hogCuda = null)
public Rectangle[] Find(IInputArray image)
{
Rectangle[] regions;
@ -58,21 +64,21 @@ namespace Emgu.CV.Models
{
//if the input array is a GpuMat
//check if there is a compatible Cuda device to run pedestrian detection
if (iaImage.Kind == InputArray.Type.CudaGpuMat && hogCuda != null)
if (iaImage.Kind == InputArray.Type.CudaGpuMat && _hogCuda != null)
{
//this is the Cuda version
using (GpuMat cudaBgra = new GpuMat())
using (VectorOfRect vr = new VectorOfRect())
{
CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
hogCuda.DetectMultiScale(cudaBgra, vr);
_hogCuda.DetectMultiScale(cudaBgra, vr);
regions = vr.ToArray();
}
}
else
{
//this is the CPU/OpenCL version
MCvObjectDetection[] results = hog.DetectMultiScale(image);
MCvObjectDetection[] results = _hog.DetectMultiScale(image);
regions = new Rectangle[results.Length];
for (int i = 0; i < results.Length; i++)
regions[i] = results[i].Rect;
@ -82,6 +88,12 @@ namespace Emgu.CV.Models
}
}
/// <summary>
/// Initialize the pedestrian detection model
/// </summary>
/// <param name="onDownloadProgressChanged">Call back method during download</param>
/// <param name="initOptions">Initialization options. None supported at the moment, any value passed will be ignored.</param>
/// <returns>Asyn task</returns>
public async Task Init(DownloadProgressChangedEventHandler onDownloadProgressChanged = null, Object initOptions = null)
{
_hog = new HOGDescriptor();
@ -99,10 +111,16 @@ namespace Emgu.CV.Models
}
/// <summary>
/// Process the input image and render into the output image
/// </summary>
/// <param name="imageIn">The input image</param>
/// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
/// <returns>The messages that we want to display.</returns>
public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
{
Stopwatch watch = Stopwatch.StartNew();
Rectangle[] pedestrians = Find(imageIn, _hog, _hogCuda);
Rectangle[] pedestrians = Find(imageIn);
watch.Stop();
if (imageOut != imageIn)

20
Emgu.CV.Models/TesseractModel.cs

@ -20,6 +20,9 @@ using System.Diagnostics;
namespace Emgu.CV.Models
{
/// <summary>
/// Tesseract Ocr model.
/// </summary>
public class TesseractModel : DisposableObject, IProcessAndRenderModel
{
private String _modelFolderName = "tessdata";
@ -59,11 +62,21 @@ namespace Emgu.CV.Models
_ocr = null;
}
}
/// <summary>
/// Release all the unmanaged memory associated to this tesseract OCR model.
/// </summary>
protected override void DisposeObject()
{
Clear();
}
/// <summary>
/// Process the input image and render into the output image
/// </summary>
/// <param name="imageIn">The input image</param>
/// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
/// <returns>The messages that we want to display.</returns>
public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
{
Stopwatch watch = Stopwatch.StartNew();
@ -90,11 +103,16 @@ namespace Emgu.CV.Models
}
/// <summary>
/// Initialize the tesseract ocr model
/// </summary>
/// <param name="onDownloadProgressChanged">Call back method during download</param>
/// <param name="initOptions">Initialization options. None supported at the moment, any value passed will be ignored.</param>
/// <returns>Asyn task</returns>
public async Task Init(System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null, Object initOptions = null)
{
await InitTesseract("eng", OcrEngineMode.TesseractOnly, onDownloadProgressChanged);
}
}
}

10
Emgu.CV/PInvoke/CvInvokeCore.cs

@ -3052,6 +3052,13 @@ namespace Emgu.CV
private static extern int cveGetNumberOfCPUs();
#endif
/// <summary>
/// Replace OpenCV parallel_for backend.
/// </summary>
/// <param name="backendName">The name of the backend.</param>
/// <param name="propagateNumThreads">It true, the number of threads of the current enviroment will be passed to the new backend.</param>
/// <returns>True if backend is set</returns>
/// <remarks>This call is not thread-safe. Consider calling this function from the main() before any other OpenCV processing functions (and without any other created threads).</remarks>
public static bool SetParallelForBackend(String backendName, bool propagateNumThreads = true)
{
using (CvString csBackendName = new CvString(backendName))
@ -3062,6 +3069,9 @@ namespace Emgu.CV
[return: MarshalAs(CvInvoke.BoolMarshalType)]
private static extern bool cveSetParallelForBackend(IntPtr backendName, bool propagateNumThreads);
/// <summary>
/// Get a list of the available parallel backends.
/// </summary>
public static String[] AvailableParallelBackends
{
get

Loading…
Cancel
Save