//----------------------------------------------------------------------------
// Copyright (C) 2004-2021 by EMGU Corporation. All rights reserved.
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.IO;
using Emgu.CV.CvEnum;
using System.Runtime.InteropServices;
using System.Runtime.Serialization;
using Emgu.CV.Reflection;
using Emgu.CV.Structure;
using Emgu.CV.Util;
using Emgu.Util;
namespace Emgu.CV
{
///
/// An Image is a wrapper to IplImage of OpenCV.
///
/// Color type of this image (either Gray, Bgr, Bgra, Hsv, Hls, Lab, Luv, Xyz, Ycc, Rgb or Rbga)
/// Depth of this image (either Byte, SByte, Single, double, UInt16, Int16 or Int32)
[Serializable]
public partial class Image
: CvArray, IEquatable>, IInputOutputArray
where TColor : struct, IColor
where TDepth : new()
{
private ImageDataReleaseMode _imageDataReleaseMode;
private TDepth[,,] _array;
///
/// The dimension of color
///
private static readonly int _numberOfChannels = new TColor().Dimension;
#region constructors
///
/// Create an empty Image
///
protected Image()
{
}
///
/// Create image from the specific multi-dimensional data, where the 1st dimension is # of rows (height), the 2nd dimension is # cols (width) and the 3rd dimension is the channel
///
/// The multi-dimensional data where the 1st dimension is # of rows (height), the 2nd dimension is # cols (width) and the 3rd dimension is the channel
public Image(TDepth[,,] data)
{
Data = data;
}
///
/// Create an Image from unmanaged data.
///
/// The width of the image
/// The height of the image
/// Size of aligned image row in bytes
/// Pointer to aligned image data, where each row should be 4-align
/// The caller is responsible for allocating and freeing the block of memory specified by the scan0 parameter, however, the memory should not be released until the related Image is released.
public Image(int width, int height, int stride, IntPtr scan0)
{
MapDataToImage(width, height, stride, scan0);
}
///
/// Let this Image object use the specific Image data.
///
/// The width of the image
/// The height of the image
/// The data stride (bytes per row)
/// The origin of the data
protected void MapDataToImage(int width, int height, int stride, IntPtr scan0)
{
_ptr = CvInvoke.cvCreateImageHeader(new Size(width, height), CvDepth, NumberOfChannels);
_imageDataReleaseMode = ImageDataReleaseMode.ReleaseHeaderOnly;
GC.AddMemoryPressure(StructSize.MIplImage);
MIplImage iplImage = MIplImage;
iplImage.ImageData = scan0;
iplImage.WidthStep = stride;
Marshal.StructureToPtr(iplImage, _ptr, false);
}
///
/// Allocate the image from the image header.
///
/// This should be only a header to the image. When the image is disposed, the cvReleaseImageHeader will be called on the pointer.
internal Image(IntPtr ptr)
{
_ptr = ptr;
}
///
/// Read image from a file
///
/// the name of the file that contains the image
public Image(String fileName)
{
if (!File.Exists(fileName))
throw new ArgumentException(String.Format("File {0} does not exist", fileName));
try
{
using (Mat m = CvInvoke.Imread(fileName, CvEnum.ImreadModes.AnyColor | CvEnum.ImreadModes.AnyDepth))
{
if (m.IsEmpty)
throw new NullReferenceException(String.Format("Unable to load image from file \"{0}\".", fileName));
LoadImageFromMat(m);
}
}
catch (TypeInitializationException e)
{
//possibly Exception in CvInvoke's static constructor.
throw e;
}
catch (Exception e)
{
throw new ArgumentException(String.Format("Unable to decode file: {0}", fileName), e);
}
}
///
/// Create a blank Image of the specified width, height and color.
///
/// The width of the image
/// The height of the image
/// The initial color of the image
public Image(int width, int height, TColor value)
: this(width, height)
{
//int n1 = MIplImage.nSize;
SetValue(value);
//int n2 = MIplImage.nSize;
//int nDiff = n2 - n1;
}
///
/// Create a blank Image of the specified width and height.
///
/// The width of the image
/// The height of the image
public Image(int width, int height)
{
AllocateData(height, width, NumberOfChannels);
}
///
/// Create a blank Image of the specific size
///
/// The size of the image
public Image(Size size)
: this(size.Width, size.Height)
{
}
///
/// Get or Set the data for this matrix. The Get function has O(1) complexity. The Set function make a copy of the data
///
///
/// If the image contains Byte and width is not a multiple of 4. The second dimension of the array might be larger than the Width of this image.
/// This is necessary since the length of a row need to be 4 align for OpenCV optimization.
/// The Set function always make a copy of the specific value. If the image contains Byte and width is not a multiple of 4. The second dimension of the array created might be larger than the Width of this image.
///
public TDepth[,,] Data
{
get
{
return _array;
}
set
{
Debug.Assert(value != null, "Data cannot be set to null");
Debug.Assert(value.GetLength(2) == NumberOfChannels, "The number of channels must equal");
AllocateData(value.GetLength(0), value.GetLength(1), NumberOfChannels);
int rows = value.GetLength(0);
int valueRowLength = value.GetLength(1) * value.GetLength(2);
int arrayRowLength = _array.GetLength(1) * _array.GetLength(2);
for (int i = 0; i < rows; i++)
Array.Copy(value, i * valueRowLength, _array, i * arrayRowLength, valueRowLength);
}
}
///
/// Re-allocate data for the array
///
/// The number of rows
/// The number of columns
/// The number of channels of this image
protected override void AllocateData(int rows, int cols, int numberOfChannels)
{
DisposeObject();
Debug.Assert(!_dataHandle.IsAllocated, "Handle should be free");
_ptr = CvInvoke.cvCreateImageHeader(new Size(cols, rows), CvDepth, numberOfChannels);
_imageDataReleaseMode = ImageDataReleaseMode.ReleaseHeaderOnly;
GC.AddMemoryPressure(StructSize.MIplImage);
Debug.Assert(MIplImage.Align == 4, "Only 4 align is supported at this moment");
if (typeof(TDepth) == typeof(Byte) && (cols & 3) != 0 && (numberOfChannels & 3) != 0)
{ //if the managed data isn't 4 aligned, make it so
_array = new TDepth[rows, (cols & (~3)) + 4, numberOfChannels];
}
else
{
_array = new TDepth[rows, cols, numberOfChannels];
}
_dataHandle = GCHandle.Alloc(_array, GCHandleType.Pinned);
//int n1 = MIplImage.nSize;
CvInvoke.cvSetData(_ptr, _dataHandle.AddrOfPinnedObject(), _array.GetLength(1) * _array.GetLength(2) * SizeOfElement);
//int n2 = MIplImage.nSize;
//int nDiff = n2 - n1;
}
///
/// Create a multi-channel image from multiple gray scale images
///
/// The image channels to be merged into a single image
public Image(Image[] channels)
{
Debug.Assert(NumberOfChannels == channels.Length);
AllocateData(channels[0].Height, channels[0].Width, NumberOfChannels);
if (NumberOfChannels == 1)
{
//if this image only have a single channel
CvInvoke.cvCopy(channels[0].Ptr, Ptr, IntPtr.Zero);
}
else
{
using (VectorOfMat mv = new VectorOfMat())
{
for (int i = 0; i < channels.Length; i++)
{
mv.Push(channels[i].Mat);
}
CvInvoke.Merge(mv, this);
}
}
}
#endregion
#region Implement ISerializable interface
///
/// Constructor used to deserialize runtime serialized object
///
/// The serialization info
/// The streaming context
public Image(SerializationInfo info, StreamingContext context)
{
DeserializeObjectData(info, context);
ROI = (Rectangle)info.GetValue("Roi", typeof(Rectangle));
}
///
/// A function used for runtime serialization of the object
///
/// Serialization info
/// streaming context
public override void GetObjectData(SerializationInfo info, StreamingContext context)
{
if (IsROISet)
{
Rectangle roi = ROI;
ROI = Rectangle.Empty;
base.GetObjectData(info, context);
ROI = roi;
info.AddValue("Roi", roi);
}
else
{
base.GetObjectData(info, context);
info.AddValue("Roi", ROI);
}
}
#endregion
#region Image Properties
///
/// The IplImage structure
///
public MIplImage MIplImage
{
get
{
return (MIplImage)Marshal.PtrToStructure(Ptr, typeof(MIplImage));
}
}
///
/// Get or Set the region of interest for this image. To clear the ROI, set it to System.Drawing.Rectangle.Empty
///
public Rectangle ROI
{
set
{
if (value.Equals(Rectangle.Empty))
{
//reset the image ROI
CvInvoke.cvResetImageROI(Ptr);
}
else
{ //set the image ROI to the specific value
CvInvoke.cvSetImageROI(Ptr, value);
}
if (_cvMat != null)
{
_cvMat.Dispose();
_cvMat = null;
}
_cvMat = CvInvoke.CvArrToMat(Ptr);
}
get
{
//return the image ROI
return CvInvoke.cvGetImageROI(Ptr);
}
}
///
/// Get the number of channels for this image
///
public override int NumberOfChannels
{
get
{
return _numberOfChannels;
}
}
///
/// Get the underneath managed array
///
public override Array ManagedArray
{
get { return _array; }
set
{
TDepth[,,] data = value as TDepth[,,];
if (data == null)
throw new InvalidCastException(String.Format("Cannot convert ManagedArray to type of {0}[,,].", typeof(TDepth).ToString()));
Data = data;
}
}
///
/// Get the equivalent opencv depth type for this image
///
public static CvEnum.IplDepth CvDepth
{
get
{
Type typeOfDepth = typeof(TDepth);
if (typeOfDepth == typeof(Single))
return CvEnum.IplDepth.IplDepth32F;
else if (typeOfDepth == typeof(Byte))
return CvEnum.IplDepth.IplDepth_8U;
else if (typeOfDepth == typeof(Double))
return CvEnum.IplDepth.IplDepth64F;
else if (typeOfDepth == typeof(SByte))
return Emgu.CV.CvEnum.IplDepth.IplDepth_8S;
else if (typeOfDepth == typeof(UInt16))
return Emgu.CV.CvEnum.IplDepth.IplDepth16U;
else if (typeOfDepth == typeof(Int16))
return Emgu.CV.CvEnum.IplDepth.IplDepth16S;
else if (typeOfDepth == typeof(Int32))
return Emgu.CV.CvEnum.IplDepth.IplDepth32S;
else
throw new NotImplementedException("Unsupported image depth");
}
}
///
/// Indicates if the region of interest has been set
///
public bool IsROISet
{
get
{
return Marshal.ReadIntPtr(Ptr, ImageConstants.RoiOffset) != IntPtr.Zero;
}
}
///
/// Get the average value on this image
///
/// The average color of the image
public TColor GetAverage()
{
return GetAverage(null);
}
///
/// Get the average value on this image, using the specific mask
///
/// The mask for find the average value
/// The average color of the masked area
public TColor GetAverage(Image mask)
{
TColor res = new TColor();
res.MCvScalar = CvInvoke.Mean(this, mask);
return res;
}
/// Get the sum for each color channel
/// The sum for each color channel
public TColor GetSum()
{
TColor res = new TColor();
res.MCvScalar = CvInvoke.Sum(this);
return res;
}
#endregion
#region Coping and Filling
///
/// Set every pixel of the image to the specific color
///
/// The color to be set
public void SetValue(TColor color)
{
SetValue(color.MCvScalar);
}
///
/// Set every pixel of the image to the specific color, using a mask
///
/// The color to be set
/// The mask for setting color
public void SetValue(TColor color, Image mask)
{
SetValue(color.MCvScalar, mask);
}
///
/// Copy the masked area of this image to destination
///
/// the destination to copy to
/// the mask for copy
public void Copy(Image dest, Image mask)
{
CvInvoke.cvCopy(Ptr, dest.Ptr, mask == null ? IntPtr.Zero : mask.Ptr);
}
///
/// Make a copy of the image using a mask, if ROI is set, only copy the ROI
///
/// the mask for coping
/// A copy of the image
public Image Copy(Image mask)
{
Image res = new Image(Size);
Copy(res, mask);
return res;
}
///
/// Make a copy of the specific ROI (Region of Interest) from the image
///
/// The roi to be copied
/// The region of interest
public Image Copy(Rectangle roi)
{
/*
Rectangle currentRoi = ROI; //cache the current roi
Image res = new Image(roi.Size);
ROI = roi;
CvInvoke.cvCopy(Ptr, res.Ptr, IntPtr.Zero);
ROI = currentRoi; //reset the roi
return res;*/
using (Image subrect = GetSubRect(roi))
{
return subrect.Copy();
}
}
///
/// Get a copy of the boxed region of the image
///
/// The boxed region of the image
/// A copy of the boxed region of the image
public Image Copy(RotatedRect box)
{
PointF[] srcCorners = box.GetVertices();
PointF[] destCorners = new PointF[] {
new PointF(0, box.Size.Height - 1),
new PointF(0, 0),
new PointF(box.Size.Width - 1, 0),
new PointF(box.Size.Width - 1, box.Size.Height - 1)};
using (Mat rot = CvInvoke.GetAffineTransform(srcCorners, destCorners))
{
Image res = new Image((int)box.Size.Width, (int)box.Size.Height);
CvInvoke.WarpAffine(this, res, rot, res.Size);
return res;
}
}
/// Make a copy of the image, if ROI is set, only copy the ROI
/// A copy of the image
public Image Copy()
{
return Copy(null);
}
///
/// Create an image of the same size
///
/// The initial pixel in the image equals zero
/// The image of the same size
public Image CopyBlank()
{
return new Image(Size);
}
///
/// Make a clone of the current image. All image data as well as the COI and ROI are cloned
///
/// A clone of the current image. All image data as well as the COI and ROI are cloned
public Image Clone()
{
int coi = CvInvoke.cvGetImageCOI(Ptr); //get the COI for current image
Rectangle roi = ROI; //get the ROI for current image
CvInvoke.cvSetImageCOI(Ptr, 0); //clear COI for current image
ROI = Rectangle.Empty; // clear ROI for current image
#region create a clone of the current image with the same COI and ROI
Image res = Copy();
CvInvoke.cvSetImageCOI(res.Ptr, coi);
res.ROI = roi;
#endregion
CvInvoke.cvSetImageCOI(Ptr, coi); //reset the COI for the current image
ROI = roi; // reset the ROI for the current image
return res;
}
///
/// Get a subimage which image data is shared with the current image.
///
/// The rectangle area of the sub-image
/// A subimage which image data is shared with the current image
public Image GetSubRect(Rectangle rect)
{
Image subRect = new Image();
subRect._array = _array;
GC.AddMemoryPressure(StructSize.MIplImage); //This pressure will be released once the result image is disposed.
subRect._ptr = CvInvoke.cvGetImageSubRect(_ptr, ref rect);
return subRect;
}
#endregion
#region Drawing functions
/// Draw an Rectangle of the specific color and thickness
/// The rectangle to be drawn
/// The color of the rectangle
/// If thickness is less than 1, the rectangle is filled up
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public virtual void Draw(Rectangle rect, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
CvInvoke.Rectangle(this, rect, color.MCvScalar, thickness, lineType, shift);
}
/// Draw a 2D Cross using the specific color and thickness
/// The 2D Cross to be drawn
/// The color of the cross
/// Must be > 0
public void Draw(Cross2DF cross, TColor color, int thickness)
{
Debug.Assert(thickness > 0, "Thickness should be > 0");
if (thickness > 0)
{
Draw(cross.Horizontal, color, thickness);
Draw(cross.Vertical, color, thickness);
}
}
/// Draw a line segment using the specific color and thickness
/// The line segment to be drawn
/// The color of the line segment
/// The thickness of the line segment
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public virtual void Draw(LineSegment2DF line, TColor color, int thickness, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
Debug.Assert(thickness > 0, "Thickness should be > 0");
if (thickness > 0)
CvInvoke.Line(
this,
Point.Round(line.P1),
Point.Round(line.P2),
color.MCvScalar,
thickness,
lineType,
shift);
}
/// Draw a line segment using the specific color and thickness
/// The line segment to be drawn
/// The color of the line segment
/// The thickness of the line segment
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public virtual void Draw(LineSegment2D line, TColor color, int thickness, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
Debug.Assert(thickness > 0, "Thickness should be > 0");
if (thickness > 0)
CvInvoke.Line(
this,
line.P1,
line.P2,
color.MCvScalar,
thickness,
lineType,
shift);
}
/// Draw a convex polygon using the specific color and thickness
/// The convex polygon to be drawn
/// The color of the triangle
/// If thickness is less than 1, the triangle is filled up
public virtual void Draw(IConvexPolygonF polygon, TColor color, int thickness)
{
PointF[] polygonVertices = polygon.GetVertices();
Point[] vertices = new Point[polygonVertices.Length];
for (int i = 0; i < polygonVertices.Length; i++)
vertices[i] = Point.Round(polygonVertices[i]);
if (thickness > 0)
DrawPolyline(vertices, true, color, thickness);
else
{
FillConvexPoly(vertices, color);
}
}
///
/// Fill the convex polygon with the specific color
///
/// The array of points that define the convex polygon
/// The color to fill the polygon with
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public void FillConvexPoly(Point[] pts, TColor color, Emgu.CV.CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
using (VectorOfPoint vp = new VectorOfPoint(pts))
CvInvoke.FillConvexPoly(this, vp, color.MCvScalar, lineType, shift);
}
///
/// Draw the polyline defined by the array of 2D points
///
/// A polyline defined by its point
/// if true, the last line segment is defined by the last point of the array and the first point of the array
/// the color used for drawing
/// the thinkness of the line
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public void DrawPolyline(Point[] pts, bool isClosed, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
DrawPolyline(new Point[][] { pts }, isClosed, color, thickness, lineType, shift);
}
///
/// Draw the polylines defined by the array of array of 2D points
///
/// An array of polylines each represented by an array of points
/// if true, the last line segment is defined by the last point of the array and the first point of the array
/// the color used for drawing
/// the thickness of the line
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public void DrawPolyline(Point[][] pts, bool isClosed, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
if (thickness > 0)
{
using (VectorOfVectorOfPoint vvp = new VectorOfVectorOfPoint(pts))
{
CvInvoke.Polylines(this, vvp, isClosed, color.MCvScalar, thickness, lineType, shift);
}
}
}
/// Draw a Circle of the specific color and thickness
/// The circle to be drawn
/// The color of the circle
/// If thickness is less than 1, the circle is filled up
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public virtual void Draw(CircleF circle, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
CvInvoke.Circle(
this,
Point.Round(circle.Center),
(int)circle.Radius,
color.MCvScalar,
(thickness <= 0) ? -1 : thickness,
lineType,
shift);
}
/// Draw a Ellipse of the specific color and thickness
/// The ellipse to be draw
/// The color of the ellipse
/// If thickness is less than 1, the ellipse is filled up
/// Line type
/// Number of fractional bits in the center coordinates and radius value
public void Draw(Ellipse ellipse, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
CvInvoke.Ellipse(this, ellipse.RotatedRect, color.MCvScalar, thickness, lineType, shift);
}
///
/// Draw the text using the specific font on the image
///
/// The text message to be draw
/// Font type.
/// Font scale factor that is multiplied by the font-specific base size.
/// The location of the bottom left corner of the font
/// The color of the text
/// Thickness of the lines used to draw a text.
/// Line type
/// When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.
public virtual void Draw(String message, Point bottomLeft, CvEnum.FontFace fontFace, double fontScale, TColor color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, bool bottomLeftOrigin = false)
{
CvInvoke.PutText(this,
message,
bottomLeft,
fontFace,
fontScale,
color.MCvScalar,
thickness,
lineType,
bottomLeftOrigin);
}
///
/// Draws contour outlines in the image if thickness>=0 or fills area bounded by the contours if thickness<0
///
/// All the input contours. Each contour is stored as a point vector.
/// Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
/// Color of the contours
/// Maximal level for drawn contours. If 0, only contour is drawn. If 1, the contour and all contours after it on the same level are drawn. If 2, all contours after and all contours one level below the contours are drawn, etc. If the value is negative, the function does not draw the contours following after contour but draws child contours of contour up to abs(maxLevel)-1 level.
/// Thickness of lines the contours are drawn with. If it is negative the contour interiors are drawn
/// Type of the contour segments
/// Optional information about hierarchy. It is only needed if you want to draw only some of the contours
/// Shift all the point coordinates by the specified value. It is useful in case if the contours retrieved in some image ROI and then the ROI offset needs to be taken into account during the rendering.
public void Draw(
IInputArrayOfArrays contours,
int contourIdx,
TColor color,
int thickness = 1,
CvEnum.LineType lineType = CvEnum.LineType.EightConnected,
IInputArray hierarchy = null,
int maxLevel = int.MaxValue,
Point offset = new Point())
{
CvInvoke.DrawContours(
this,
contours,
contourIdx,
color.MCvScalar,
thickness,
lineType,
hierarchy,
maxLevel,
offset);
}
///
/// Draws contour outlines in the image if thickness>=0 or fills area bounded by the contours if thickness<0
///
/// The input contour stored as a point vector.
/// Color of the contours
/// Thickness of lines the contours are drawn with. If it is negative the contour interiors are drawn
/// Type of the contour segments
/// Shift all the point coordinates by the specified value. It is useful in case if the contours retrieved in some image ROI and then the ROI offset needs to be taken into account during the rendering.
public void Draw(
Point[] contours,
TColor color,
int thickness = 1,
CvEnum.LineType lineType = CvEnum.LineType.EightConnected,
Point offset = new Point())
{
using (VectorOfPoint vp = new VectorOfPoint(contours))
using (VectorOfVectorOfPoint vvp = new VectorOfVectorOfPoint())
{
vvp.Push(vp);
Draw(vvp, 0, color, thickness, lineType, null, int.MaxValue);
}
}
#endregion
#region Hough line and circles
///
/// Apply Probabilistic Hough transform to find line segments.
/// The current image must be a binary image (eg. the edges as a result of the Canny edge detector)
///
/// Distance resolution in pixel-related units.
/// Angle resolution measured in radians
/// A line is returned by the function if the corresponding accumulator value is greater than threshold
/// Minimum width of a line
/// Minimum gap between lines
/// The line segments detected for each of the channels
public LineSegment2D[][] HoughLinesBinary(double rhoResolution, double thetaResolution, int threshold, double minLineWidth, double gapBetweenLines)
{
return this.ForEachDuplicateChannel(
delegate (IInputArray img, int channel)
{
return CvInvoke.HoughLinesP(this, rhoResolution, thetaResolution, threshold, minLineWidth, gapBetweenLines);
});
}
///
/// Apply Canny Edge Detector follows by Probabilistic Hough transform to find line segments in the image
///
/// The threshold to find initial segments of strong edges
/// The threshold used for edge Linking
/// Distance resolution in pixel-related units.
/// Angle resolution measured in radians
/// A line is returned by the function if the corresponding accumulator value is greater than threshold
/// Minimum width of a line
/// Minimum gap between lines
/// The line segments detected for each of the channels
public LineSegment2D[][] HoughLines(double cannyThreshold, double cannyThresholdLinking, double rhoResolution, double thetaResolution, int threshold, double minLineWidth, double gapBetweenLines)
{
using (Image canny = Canny(cannyThreshold, cannyThresholdLinking))
{
return canny.HoughLinesBinary(
rhoResolution,
thetaResolution,
threshold,
minLineWidth,
gapBetweenLines);
}
}
///
/// First apply Canny Edge Detector on the current image,
/// then apply Hough transform to find circles
///
/// The higher threshold of the two passed to Canny edge detector (the lower one will be twice smaller).
/// Accumulator threshold at the center detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first
/// Resolution of the accumulator used to detect centers of the circles. For example, if it is 1, the accumulator will have the same resolution as the input image, if it is 2 - accumulator will have twice smaller width and height, etc
/// Minimal radius of the circles to search for
/// Maximal radius of the circles to search for
/// Minimum distance between centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed
/// The circle detected for each of the channels
public CircleF[][] HoughCircles(TColor cannyThreshold, TColor accumulatorThreshold, double dp, double minDist, int minRadius = 0, int maxRadius = 0)
{
double[] cannyThresh = cannyThreshold.MCvScalar.ToArray();
double[] accumulatorThresh = accumulatorThreshold.MCvScalar.ToArray();
return this.ForEachDuplicateChannel(
delegate (IInputArray img, int channel)
{
return CvInvoke.HoughCircles(img, CvEnum.HoughModes.Gradient, dp, minDist, cannyThresh[channel], accumulatorThresh[channel], minRadius, maxRadius);
});
}
#endregion
#region Indexer
///
/// Get or Set the specific channel of the current image.
/// For Get operation, a copy of the specific channel is returned.
/// For Set operation, the specific channel is copied to this image.
///
/// The channel to get from the current image, zero based index
/// The specific channel of the current image
public Image this[int channel]
{
get
{
Image imageChannel = new Image(Size);
CvInvoke.MixChannels(this, imageChannel, new int[] { channel, 0 });
return imageChannel;
}
set
{
CvInvoke.MixChannels(value, this, new int[] { 0, channel });
}
}
///
/// Get or Set the color in the th row (y direction) and th column (x direction)
///
/// The zero-based row (y direction) of the pixel
/// The zero-based column (x direction) of the pixel
/// The color in the specific and
public TColor this[int row, int column]
{
get
{
TColor res = new TColor();
res.MCvScalar = CvInvoke.cvGet2D(Ptr, row, column);
return res;
}
set
{
CvInvoke.cvSet2D(Ptr, row, column, value.MCvScalar);
}
}
///
/// Get or Set the color in the
///
/// the location of the pixel
/// the color in the
public TColor this[Point location]
{
get
{
return this[location.Y, location.X];
}
set
{
this[location.Y, location.X] = value;
}
}
#endregion
#region utilities
///
/// Return parameters based on ROI
///
/// The Pointer to the IplImage
/// The address of the pointer that point to the start of the Bytes taken into consideration ROI
/// ROI.Width * ColorType.Dimension
/// The number of bytes in a row taken into consideration ROI
/// The number of rows taken into consideration ROI
/// The width step required to jump to the next row
protected static void RoiParam(IntPtr ptr, out Int64 start, out int rows, out int elementCount, out int byteWidth, out int widthStep)
{
MIplImage ipl = (MIplImage)Marshal.PtrToStructure(ptr, typeof(MIplImage));
start = ipl.ImageData.ToInt64();
widthStep = ipl.WidthStep;
if (ipl.Roi != IntPtr.Zero)
{
Rectangle rec = CvInvoke.cvGetImageROI(ptr);
elementCount = rec.Width * ipl.NChannels;
byteWidth = ((int)ipl.Depth >> 3) * elementCount;
start += rec.Y * widthStep
+ ((int)ipl.Depth >> 3) * rec.X;
rows = rec.Height;
}
else
{
byteWidth = widthStep;
elementCount = ipl.Width * ipl.NChannels;
rows = ipl.Height;
}
}
///
/// Apply convertor and compute result for each channel of the image.
///
///
/// For single channel image, apply converter directly.
/// For multiple channel image, set the COI for the specific channel before appling the convertor
///
/// The return type
/// The converter such that accept the IntPtr of a single channel IplImage, and image channel index which returning result of type R
/// An array which contains result for each channel
private TResult[] ForEachChannel(Func conv)
{
TResult[] res = new TResult[NumberOfChannels];
if (NumberOfChannels == 1)
res[0] = conv(Ptr, 0);
else
{
for (int i = 0; i < NumberOfChannels; i++)
{
CvInvoke.cvSetImageCOI(Ptr, i + 1);
res[i] = conv(Ptr, i);
}
CvInvoke.cvSetImageCOI(Ptr, 0);
}
return res;
}
///
/// If the image has only one channel, apply the action directly on the IntPtr of this image and ,
/// otherwise, make copy each channel of this image to a temperary one, apply action on it and another temperory image and copy the resulting image back to image2
///
/// The type of the depth of the image
/// The function which acepts the src IntPtr, dest IntPtr and index of the channel as input
/// The destination image
private void ForEachDuplicateChannel(Action act, Image dest)
where TOtherDepth : new()
{
if (NumberOfChannels == 1)
act(this, dest, 0);
else
{
using (Mat tmp1 = new Mat())
using (Mat tmp2 = new Mat())
{
for (int i = 0; i < NumberOfChannels; i++)
{
CvInvoke.ExtractChannel(this, tmp1, i);
act(tmp1, tmp2, i);
CvInvoke.InsertChannel(tmp2, dest, i);
}
}
}
}
#endregion
#region Gradient, Edges and Features
///
/// Calculates the image derivative by convolving the image with the appropriate kernel
/// The Sobel operators combine Gaussian smoothing and differentiation so the result is more or less robust to the noise. Most often, the function is called with (xorder=1, yorder=0, aperture_size=3) or (xorder=0, yorder=1, aperture_size=3) to calculate first x- or y- image derivative.
///
/// Order of the derivative x
/// Order of the derivative y
/// Size of the extended Sobel kernel, must be 1, 3, 5 or 7. In all cases except 1, aperture_size xaperture_size separable kernel will be used to calculate the derivative.
/// The result of the sobel edge detector
[ExposableMethod(Exposable = true, Category = "Gradients, Edges")]
public Image Sobel(int xorder, int yorder, int apertureSize)
{
Image res = new Image(Size);
CvInvoke.Sobel(this, res, CvInvoke.GetDepthType(typeof(Single)), xorder, yorder, apertureSize, 1.0, 0.0, CvEnum.BorderType.Default);
return res;
}
///
/// Calculates Laplacian of the source image by summing second x- and y- derivatives calculated using Sobel operator.
/// Specifying aperture_size=1 gives the fastest variant that is equal to convolving the image with the following kernel:
///
/// |0 1 0|
/// |1 -4 1|
/// |0 1 0|
///
/// Aperture size
/// The Laplacian of the image
[ExposableMethod(Exposable = true, Category = "Gradients, Edges")]
public Image Laplace(int apertureSize)
{
Image res = new Image(Size);
CvInvoke.Laplacian(this, res, CvInvoke.GetDepthType(typeof(Single)), apertureSize, 1.0, 0.0, CvEnum.BorderType.Default);
return res;
}
/// Find the edges on this image and marked them in the returned image.
/// The threshhold to find initial segments of strong edges
/// The threshold used for edge Linking
/// The edges found by the Canny edge detector
[ExposableMethod(Exposable = true, Category = "Gradients, Edges")]
public Image Canny(double thresh, double threshLinking)
{
return Canny(thresh, threshLinking, 3, false);
}
/// Find the edges on this image and marked them in the returned image.
/// The threshhold to find initial segments of strong edges
/// The threshold used for edge Linking
/// The aperture size, use 3 for default
/// a flag, indicating whether a more accurate norm should be used to calculate the image gradient magnitude ( L2gradient=true ), or whether the default norm is enough ( L2gradient=false ).
/// The edges found by the Canny edge detector
public Image Canny(double thresh, double threshLinking, int apertureSize, bool l2Gradient)
{
Image res = new Image(Size);
CvInvoke.Canny(this, res, thresh, threshLinking, apertureSize, l2Gradient);
return res;
}
///
/// Iterates to find the sub-pixel accurate location of corners, or radial saddle points
///
/// Coordinates of the input corners, the values will be modified by this function call
/// Half sizes of the search window. For example, if win=(5,5) then 5*2+1 x 5*2+1 = 11 x 11 search window is used
/// Half size of the dead region in the middle of the search zone over which the summation in formulae below is not done. It is used sometimes to avoid possible singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such size
/// Criteria for termination of the iterative process of corner refinement. That is, the process of corner position refinement stops either after certain number of iteration or when a required accuracy is achieved. The criteria may specify either of or both the maximum number of iteration and the required accuracy
/// Refined corner coordinates
public void FindCornerSubPix(
PointF[][] corners,
Size win,
Size zeroZone,
MCvTermCriteria criteria)
{
this.ForEachDuplicateChannel(delegate (IInputArray img, int channel)
{
using (VectorOfPointF vec = new VectorOfPointF())
{
vec.Push(corners[channel]);
CvInvoke.CornerSubPix(
img,
vec,
win,
zeroZone,
criteria);
Array.Copy(vec.ToArray(), corners[channel], corners[channel].Length);
}
});
}
#endregion
#region Matching
///
/// The function slides through image, compares overlapped patches of size wxh with templ using the specified method and return the comparison results
///
/// Searched template; must be not greater than the source image and the same data type as the image
/// Specifies the way the template must be compared with image regions
/// The comparison result: width = this.Width - template.Width + 1; height = this.Height - template.Height + 1
public Image MatchTemplate(Image template, CvEnum.TemplateMatchingType method)
{
Image res = new Image(Width - template.Width + 1, Height - template.Height + 1);
CvInvoke.MatchTemplate(this, template, res, method);
return res;
}
#endregion
#region Logic
#region And Methods
/// Perform an elementwise AND operation with another image and return the result
/// The second image for the AND operation
/// The result of the AND operation
public Image And(Image img2)
{
Image res = new Image(Size);
CvInvoke.BitwiseAnd(this, img2, res, null);
return res;
}
///
/// Perform an elementwise AND operation with another image, using a mask, and return the result
///
/// The second image for the AND operation
/// The mask for the AND operation
/// The result of the AND operation
public Image And(Image img2, Image mask)
{
Image res = new Image(Size);
CvInvoke.BitwiseAnd(this, img2, res, mask);
return res;
}
/// Perform an binary AND operation with some color
/// The color for the AND operation
/// The result of the AND operation
public Image And(TColor val)
{
return And(val, null);
}
/// Perform an binary AND operation with some color using a mask
/// The color for the AND operation
/// The mask for the AND operation
/// The result of the AND operation
public Image And(TColor val, Image mask)
{
Image res = new Image(Size);
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.BitwiseAnd(this, ia, res, mask);
}
return res;
}
#endregion
#region Or Methods
/// Perform an elementwise OR operation with another image and return the result
/// The second image for the OR operation
/// The result of the OR operation
public Image Or(Image img2)
{
return Or(img2, null);
}
/// Perform an elementwise OR operation with another image, using a mask, and return the result
/// The second image for the OR operation
/// The mask for the OR operation
/// The result of the OR operation
public Image Or(Image img2, Image mask)
{
Image res = CopyBlank();
CvInvoke.BitwiseOr(this, img2, res, mask);
return res;
}
/// Perform an elementwise OR operation with some color
/// The value for the OR operation
/// The result of the OR operation
[ExposableMethod(Exposable = true, Category = "Logic")]
public Image Or(TColor val)
{
return Or(val, null);
}
/// Perform an elementwise OR operation with some color using a mask
/// The color for the OR operation
/// The mask for the OR operation
/// The result of the OR operation
public Image Or(TColor val, Image mask)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.BitwiseOr(this, ia, res, mask);
}
return res;
}
#endregion
#region Xor Methods
/// Perform an elementwise XOR operation with another image and return the result
/// The second image for the XOR operation
/// The result of the XOR operation
public Image Xor(Image img2)
{
return Xor(img2, null);
}
///
/// Perform an elementwise XOR operation with another image, using a mask, and return the result
///
/// The second image for the XOR operation
/// The mask for the XOR operation
/// The result of the XOR operation
public Image Xor(Image img2, Image mask)
{
Image res = CopyBlank();
CvInvoke.BitwiseXor(this, img2, res, mask);
return res;
}
///
/// Perform an binary XOR operation with some color
///
/// The value for the XOR operation
/// The result of the XOR operation
[ExposableMethod(Exposable = true, Category = "Logic")]
public Image Xor(TColor val)
{
return Xor(val, null);
}
///
/// Perform an binary XOR operation with some color using a mask
///
/// The color for the XOR operation
/// The mask for the XOR operation
/// The result of the XOR operation
public Image Xor(TColor val, Image mask)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.BitwiseXor(this, ia, res, mask);
}
return res;
}
#endregion
///
/// Compute the complement image
///
/// The complement image
public Image Not()
{
Image res = CopyBlank();
CvInvoke.BitwiseNot(this, res, null);
return res;
}
#endregion
#region Comparison
/// Find the elementwise maximum value
/// The second image for the Max operation
/// An image where each pixel is the maximum of this image and the parameter image
public Image Max(Image img2)
{
Image res = CopyBlank();
CvInvoke.Max(this, img2, res);
return res;
}
/// Find the elementwise maximum value
/// The value to compare with
/// An image where each pixel is the maximum of this image and
public Image Max(double value)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(value))
{
CvInvoke.Max(this, ia, res);
}
return res;
}
/// Find the elementwise minimum value
/// The second image for the Min operation
/// An image where each pixel is the minimum of this image and the parameter image
public Image Min(Image img2)
{
Image res = CopyBlank();
CvInvoke.Min(this, img2, res);
return res;
}
/// Find the elementwise minimum value
/// The value to compare with
/// An image where each pixel is the minimum of this image and
public Image Min(double value)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(value))
{
CvInvoke.Min(this, ia, res);
}
return res;
}
/// Checks that image elements lie between two scalars
/// The inclusive lower limit of color value
/// The inclusive upper limit of color value
/// res[i,j] = 255 if <= this[i,j] <= , 0 otherwise
[ExposableMethod(Exposable = true, Category = "Logic")]
public Image InRange(TColor lower, TColor higher)
{
Image res = new Image(Size);
using (ScalarArray ialower = new ScalarArray(lower.MCvScalar))
using (ScalarArray iaupper = new ScalarArray(higher.MCvScalar))
CvInvoke.InRange(this, ialower, iaupper, res);
return res;
}
/// Checks that image elements lie between values defined by two images of same size and type
/// The inclusive lower limit of color value
/// The inclusive upper limit of color value
/// res[i,j] = 255 if [i,j] <= this[i,j] <= [i,j], 0 otherwise
public Image InRange(Image lower, Image higher)
{
Image res = new Image(Size);
CvInvoke.InRange(this, lower, higher, res);
return res;
}
///
/// Compare the current image with and returns the comparison mask
///
/// The other image to compare with
/// The comparison type
/// The result of the comparison as a mask
public Image Cmp(Image img2, CvEnum.CmpType cmpType)
{
Size size = Size;
Image res = new Image(size);
if (NumberOfChannels == 1)
{
CvInvoke.Compare(this, img2, res, cmpType);
}
else
{
using (Image src1 = new Image(size))
using (Image src2 = new Image(size))
using (Image dest = new Image(size))
for (int i = 0; i < NumberOfChannels; i++)
{
CvInvoke.cvSetImageCOI(Ptr, i + 1);
CvInvoke.cvSetImageCOI(img2.Ptr, i + 1);
CvInvoke.cvCopy(Ptr, src1.Ptr, IntPtr.Zero);
CvInvoke.cvCopy(img2.Ptr, src2.Ptr, IntPtr.Zero);
CvInvoke.Compare(src1, src2, dest, cmpType);
CvInvoke.cvSetImageCOI(res.Ptr, i + 1);
CvInvoke.cvCopy(dest.Ptr, res.Ptr, IntPtr.Zero);
}
CvInvoke.cvSetImageCOI(Ptr, 0);
CvInvoke.cvSetImageCOI(img2.Ptr, 0);
CvInvoke.cvSetImageCOI(res.Ptr, 0);
}
return res;
}
///
/// Compare the current image with and returns the comparison mask
///
/// The value to compare with
/// The comparison type
/// The result of the comparison as a mask
[ExposableMethod(Exposable = true, Category = "Logic")]
public Image Cmp(double value, CvEnum.CmpType comparisonType)
{
Size size = Size;
Image res = new Image(size);
using (ScalarArray ia = new ScalarArray(value))
{
if (NumberOfChannels == 1)
{
CvInvoke.Compare(this, ia, res, comparisonType);
}
else
{
this.ForEachDuplicateChannel(
delegate (IInputArray img1, IOutputArray img2, int channel)
{
CvInvoke.Compare(img1, ia, img2, comparisonType);
},
res);
}
}
return res;
}
///
/// Compare two images, returns true if the each of the pixels are equal, false otherwise
///
/// The other image to compare with
/// true if the each of the pixels for the two images are equal, false otherwise
public bool Equals(Image img2)
{
//true if the references are equal
if (Object.ReferenceEquals(this, img2)) return true;
//false if size are not equal
if (!Size.Equals(img2.Size)) return false;
using (Image neqMask = new Image(Size))
{
CvInvoke.BitwiseXor(this, img2, neqMask, null);
if (NumberOfChannels == 1)
return CvInvoke.CountNonZero(neqMask) == 0;
else
{
IntPtr singleChannel = Marshal.AllocHGlobal(StructSize.MCvMat);
try
{
CvInvoke.cvReshape(neqMask, singleChannel, 1, 0);
using (Mat m = CvInvoke.CvArrToMat(singleChannel))
{
return CvInvoke.CountNonZero(m) == 0;
}
}
finally
{
Marshal.FreeHGlobal(singleChannel);
}
}
}
}
#endregion
#region Segmentation
///
/// Use grabcut to perform background foreground segmentation.
///
/// The initial rectangle region for the foreground
/// The number of iterations to run GrabCut
/// The background foreground mask where 2 indicates background and 3 indicates foreground
public Image GrabCut(Rectangle rect, int iteration)
{
Image mask = new Image(Size);
using (Matrix bgdModel = new Matrix(1, 13 * 5))
using (Matrix fgdModel = new Matrix(1, 13 * 5))
{
CvInvoke.GrabCut(this, mask, rect, bgdModel, fgdModel, 0, Emgu.CV.CvEnum.GrabcutInitType.InitWithRect);
CvInvoke.GrabCut(this, mask, rect, bgdModel, fgdModel, iteration, Emgu.CV.CvEnum.GrabcutInitType.Eval);
}
return mask;
}
#endregion
#region Arithmatic
#region Subtraction methods
/// Elementwise subtract another image from the current image
/// The second image to be subtracted from the current image
/// The result of elementwise subtracting img2 from the current image
public Image Sub(Image img2)
{
Image res = CopyBlank();
CvInvoke.Subtract(this, img2, res, null, CvInvoke.GetDepthType(typeof(TDepth)));
return res;
}
/// Elementwise subtract another image from the current image, using a mask
/// The image to be subtracted from the current image
/// The mask for the subtract operation
/// The result of elementwise subtracting img2 from the current image, using the specific mask
public Image Sub(Image img2, Image mask)
{
Image res = CopyBlank();
CvInvoke.Subtract(this, img2, res, mask, CvInvoke.GetDepthType(typeof(TDepth)));
return res;
}
/// Elementwise subtract a color from the current image
/// The color value to be subtracted from the current image
/// The result of elementwise subtracting color 'val' from the current image
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Sub(TColor val)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.Subtract(this, ia, res, null, CvInvoke.GetDepthType(typeof(TDepth)));
}
return res;
}
///
/// result = val - this
///
/// the value which subtract this image
/// val - this
[ExposableMethod(Exposable = true, Category = "Math")]
public Image SubR(TColor val)
{
return SubR(val, null);
}
///
/// result = val - this, using a mask
///
/// The value which subtract this image
/// The mask for subtraction
/// val - this, with mask
public Image SubR(TColor val, Image mask)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.Subtract(ia, this, res, mask, CvInvoke.GetDepthType(typeof(TDepth)));
}
return res;
}
#endregion
#region Addition methods
/// Elementwise add another image with the current image
/// The image to be added to the current image
/// The result of elementwise adding img2 to the current image
public Image Add(Image img2)
{
return Add(img2, null);
}
/// Elementwise add with the current image, using a mask
/// The image to be added to the current image
/// The mask for the add operation
/// The result of elementwise adding img2 to the current image, using the specific mask
public Image Add(Image img2, Image mask)
{
Image res = CopyBlank();
CvInvoke.Add(this, img2, res, mask, CvInvoke.GetDepthType(typeof(TDepth)));
return res;
}
/// Elementwise add a color to the current image
/// The color value to be added to the current image
/// The result of elementwise adding color from the current image
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Add(TColor val)
{
Image res = CopyBlank();
using (ScalarArray ia = new ScalarArray(val.MCvScalar))
{
CvInvoke.Add(this, ia, res, null, CvInvoke.GetDepthType(typeof(TDepth)));
}
return res;
}
#endregion
#region Multiplication methods
/// Elementwise multiply another image with the current image and the
/// The image to be elementwise multiplied to the current image
/// The scale to be multiplied
/// this .* img2 * scale
public Image Mul(Image img2, double scale)
{
Image res = CopyBlank();
CvInvoke.Multiply(this, img2, res, scale, CvInvoke.GetDepthType(typeof(TDepth)));
return res;
}
/// Elementwise multiply with the current image
/// The image to be elementwise multiplied to the current image
/// this .* img2
public Image Mul(Image img2)
{
return Mul(img2, 1.0);
}
/// Elementwise multiply the current image with
/// The scale to be multiplied
/// The scaled image
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Mul(double scale)
{
Image res = CopyBlank();
CvInvoke.cvConvertScale(Ptr, res.Ptr, scale, 0.0);
return res;
}
#endregion
///
/// Accumulate to the current image using the specific mask
///
/// The image to be added to the current image
/// the mask
public void Accumulate(Image img2, Image mask)
{
CvInvoke.Accumulate(img2, this, mask);
}
///
/// Accumulate to the current image using the specific mask
///
/// The image to be added to the current image
public void Accumulate(Image img2)
{
CvInvoke.Accumulate(img2, this, null);
}
///
/// Return the weighted sum such that: res = this * alpha + img2 * beta + gamma
///
/// img2 in: res = this * alpha + img2 * beta + gamma
/// alpha in: res = this * alpha + img2 * beta + gamma
/// beta in: res = this * alpha + img2 * beta + gamma
/// gamma in: res = this * alpha + img2 * beta + gamma
/// this * alpha + img2 * beta + gamma
public Image AddWeighted(Image img2, double alpha, double beta, double gamma)
{
Image res = CopyBlank();
CvInvoke.AddWeighted(this, alpha, img2, beta, gamma, res, CvInvoke.GetDepthType(typeof(TDepth)));
return res;
}
///
/// Update Running Average. this = (1-alpha)*this + alpha*img
///
/// Input image, 1- or 3-channel, Byte or Single (each channel of multi-channel image is processed independently).
/// the weight of
public void AccumulateWeighted(Image img, double alpha)
{
AccumulateWeighted(img, alpha, null);
}
///
/// Update Running Average. this = (1-alpha)*this + alpha*img, using the mask
///
/// Input image, 1- or 3-channel, Byte or Single (each channel of multi-channel image is processed independently).
/// The weight of
/// The mask for the running average
public void AccumulateWeighted(Image img, double alpha, Image mask)
{
CvInvoke.AccumulateWeighted(img, this, alpha, mask);
}
///
/// Computes absolute different between this image and the other image
///
/// The other image to compute absolute different with
/// The image that contains the absolute different value
public Image AbsDiff(Image img2)
{
Image res = CopyBlank();
CvInvoke.AbsDiff(this, img2, res);
return res;
}
///
/// Computes absolute different between this image and the specific color
///
/// The color to compute absolute different with
/// The image that contains the absolute different value
[ExposableMethod(Exposable = true, Category = "Math")]
public Image AbsDiff(TColor color)
{
Image res = new Image(Size);
using (ScalarArray ia = new ScalarArray(color.MCvScalar))
{
CvInvoke.AbsDiff(this, ia, res);
}
return res;
}
#endregion
#region Math Functions
///
/// Raises every element of input array to p
/// dst(I)=src(I)^p, if p is integer
/// dst(I)=abs(src(I))^p, otherwise
///
/// The exponent of power
/// The power image
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Pow(double power)
{
Image res = CopyBlank();
CvInvoke.Pow(this, power, res);
return res;
}
///
/// Calculates exponent of every element of input array:
/// dst(I)=exp(src(I))
///
/// Maximum relative error is ~7e-6. Currently, the function converts denormalized values to zeros on output.
/// The exponent image
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Exp()
{
Image res = CopyBlank();
CvInvoke.Exp(this, res);
return res;
}
///
/// Calculates natural logarithm of absolute value of every element of input array
///
/// Natural logarithm of absolute value of every element of input array
[ExposableMethod(Exposable = true, Category = "Math")]
public Image Log()
{
Image res = CopyBlank();
CvInvoke.Log(this, res);
return res;
}
#endregion
#region Sampling, Interpolation and Geometrical Transforms
/*
/// Sample the pixel values on the specific line segment
/// The line to obtain samples
///The values on the (Eight-connected) line
public TDepth[,] Sample(LineSegment2D line)
{
return Sample(line, Emgu.CV.CvEnum.Connectivity.EightConnected);
}
///
/// Sample the pixel values on the specific line segment
///
/// The line to obtain samples
/// The sampling type
/// The values on the line, the first dimension is the index of the point, the second dimension is the index of color channel
public TDepth[,] Sample(LineSegment2D line, CvEnum.Connectivity type)
{
int size = type == Emgu.CV.CvEnum.Connectivity.EightConnected ?
Math.Max(Math.Abs(line.P2.X - line.P1.X), Math.Abs(line.P2.Y - line.P1.Y))
: Math.Abs(line.P2.X - line.P1.X) + Math.Abs(line.P2.Y - line.P1.Y);
TDepth[,] data = new TDepth[size, NumberOfChannels];
GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned);
Point p1 = line.P1;
Point p2 = line.P2;
CvInvoke.cvSampleLine(
Ptr,
ref p1,
ref p2,
handle.AddrOfPinnedObject(),
type);
handle.Free();
return data;
}*/
///
/// Scale the image to the specific size
///
/// The width of the returned image.
/// The height of the returned image.
/// The type of interpolation
/// The resized image
[ExposableMethod(Exposable = true)]
public Image Resize(int width, int height, CvEnum.Inter interpolationType)
{
Image imgScale = new Image(width, height);
CvInvoke.Resize(this, imgScale, new Size(width, height), 0, 0, interpolationType);
return imgScale;
}
///
/// Scale the image to the specific size
///
/// The width of the returned image.
/// The height of the returned image.
/// The type of interpolation
/// if true, the scale is preservered and the resulting image has maximum width(height) possible that is <= (), if false, this function is equaivalent to Resize(int width, int height)
/// The resized image
public Image Resize(int width, int height, CvEnum.Inter interpolationType, bool preserveScale)
{
return preserveScale ?
Resize(Math.Min((double)width / Width, (double)height / Height), interpolationType)
: Resize(width, height, interpolationType);
}
///
/// Scale the image to the specific size: width *= scale; height *= scale
///
/// The scale to resize
/// The type of interpolation
/// The scaled image
[ExposableMethod(Exposable = true)]
public Image Resize(double scale, CvEnum.Inter interpolationType)
{
return Resize(
(int)(Width * scale),
(int)(Height * scale),
interpolationType);
}
///
/// Rotate the image the specified angle cropping the result to the original size
///
/// The angle of rotation in degrees.
/// The color with which to fill the background
/// The image rotates by the specific angle
public Image Rotate(double angle, TColor background)
{
return Rotate(angle, background, true);
}
///
/// Transforms source image using the specified matrix
///
/// 2x3 transformation matrix
/// Interpolation type
/// Warp type
/// Pixel extrapolation method
/// A value used to fill outliers
/// The result of the transformation
public Image WarpAffine(Mat mapMatrix, CvEnum.Inter interpolationType, CvEnum.Warp warpType, CvEnum.BorderType borderMode, TColor backgroundColor)
{
return WarpAffine(mapMatrix, Width, Height, interpolationType, warpType, borderMode, backgroundColor);
}
///
/// Transforms source image using the specified matrix
///
/// 2x3 transformation matrix
/// The width of the resulting image
/// the height of the resulting image
/// Interpolation type
/// Warp type
/// Pixel extrapolation method
/// A value used to fill outliers
/// The result of the transformation
public Image WarpAffine(Mat mapMatrix, int width, int height, CvEnum.Inter interpolationType, CvEnum.Warp warpType, CvEnum.BorderType borderMode, TColor backgroundColor)
{
Image res = new Image(width, height);
CvInvoke.WarpAffine(this, res, mapMatrix, res.Size, interpolationType, warpType, borderMode, backgroundColor.MCvScalar);
return res;
}
///
/// Transforms source image using the specified matrix
///
/// 3x3 transformation matrix
/// Interpolation type
/// Warp type
/// Pixel extrapolation method
/// A value used to fill outliers
/// The depth type of , should be either float or double
/// The result of the transformation
public Image WarpPerspective(Matrix mapMatrix, CvEnum.Inter interpolationType, CvEnum.Warp warpType, CvEnum.BorderType borderMode, TColor backgroundColor)
where TMapDepth : new()
{
return WarpPerspective(mapMatrix, Width, Height, interpolationType, warpType, borderMode, backgroundColor);
}
///
/// Transforms source image using the specified matrix
///
/// 3x3 transformation matrix
/// The width of the resulting image
/// the height of the resulting image
/// Interpolation type
/// Warp type
/// Border type
/// A value used to fill outliers
/// The depth type of , should be either float or double
/// The result of the transformation
public Image WarpPerspective(
Matrix mapMatrix,
int width, int height,
CvEnum.Inter interpolationType,
CvEnum.Warp warpType,
CvEnum.BorderType borderType,
TColor backgroundColor)
where TMapDepth : new()
{
Image res = new Image(width, height);
CvInvoke.WarpPerspective(this, res, mapMatrix, res.Size, interpolationType, warpType, borderType, backgroundColor.MCvScalar);
return res;
}
///
/// Rotate this image the specified
///
/// The angle of rotation in degrees.
/// The color with which to fill the background
/// If set to true the image is cropped to its original size, possibly losing corners information. If set to false the result image has different size than original and all rotation information is preserved
/// The rotated image
[ExposableMethod(Exposable = true, Category = "Transform")]
public Image Rotate(double angle, TColor background, bool crop)
{
Size size = Size;
PointF center = new PointF(size.Width * 0.5f, size.Height * 0.5f);
return Rotate(angle, center, CvEnum.Inter.Cubic, background, crop);
}
///
/// Rotate this image the specified
///
/// The angle of rotation in degrees. Positive means clockwise.
/// The color with with to fill the background
/// If set to true the image is cropped to its original size, possibly losing corners information. If set to false the result image has different size than original and all rotation information is preserved
/// The center of rotation
/// The interpolation method
/// The rotated image
public Image Rotate(double angle, PointF center, CvEnum.Inter interpolationMethod, TColor background, bool crop)
{
if (crop)
{
using (Mat rotationMatrix = new Mat())
{
CvInvoke.GetRotationMatrix2D(center, -angle, 1, rotationMatrix);
return WarpAffine(rotationMatrix, interpolationMethod, Emgu.CV.CvEnum.Warp.FillOutliers,
CvEnum.BorderType.Constant, background);
}
}
else
{
Size dstImgSize;
using (Mat rotationMatrix = RotationMatrix2D.CreateRotationMatrix(center, -angle, Size, out dstImgSize))
{
//CvInvoke.GetRotationMatrix2D(center, -angle, 1.0, rotationMatrix);
return WarpAffine(rotationMatrix, dstImgSize.Width, dstImgSize.Height, interpolationMethod, Emgu.CV.CvEnum.Warp.FillOutliers, CvEnum.BorderType.Constant, background);
}
}
}
///
/// Convert the image to log polar, simulating the human foveal vision
///
/// The transformation center, where the output precision is maximal
/// Magnitude scale parameter
/// interpolation type
/// Warp type
/// The converted image
[ExposableMethod(Exposable = true, Category = "Transform")]
public Image LogPolar(
PointF center,
double magnitude,
CvEnum.Inter interpolationType = CvEnum.Inter.Linear,
CvEnum.Warp warpType = CvEnum.Warp.FillOutliers)
{
Image imgPolar = CopyBlank();
CvInvoke.LogPolar(this, imgPolar, center, magnitude, interpolationType, warpType);
return imgPolar;
}
#endregion
#region Image color and depth conversion
/// Convert the current image to the specific color and depth
/// The type of color to be converted to
/// The type of pixel depth to be converted to
/// Image of the specific color and depth
[ExposableMethod(
Exposable = true,
Category = "Conversion",
GenericParametersOptions = new Type[] {
typeof(Bgr), typeof(Bgra), typeof(Gray), typeof(Hsv), typeof(Hls), typeof(Lab), typeof(Luv), typeof(Xyz), typeof(Ycc),
typeof(Single), typeof(Byte), typeof(Double)},
GenericParametersOptionSizes = new int[] { 9, 3 }
)]
public Image Convert()
where TOtherColor : struct, IColor
where TOtherDepth : new()
{
Image res = new Image(Size);
res.ConvertFrom(this);
return res;
}
///
/// Convert the source image to the current image, if the size are different, the current image will be a resized version of the srcImage.
///
/// The color type of the source image
/// The color depth of the source image
/// The sourceImage
public void ConvertFrom(Image srcImage)
where TSrcColor : struct, IColor
where TSrcDepth : new()
{
if (!Size.Equals(srcImage.Size))
{ //if the size of the source image do not match the size of the current image
using (Image tmp = new Image(this.Size))
{
CvInvoke.Resize(srcImage, tmp, this.Size);
ConvertFrom(tmp);
return;
}
}
if (typeof(TColor) == typeof(TSrcColor))
{
#region same color
if (typeof(TDepth) == typeof(TSrcDepth))
{ //same depth
srcImage.Mat.CopyTo(this);
//CvInvoke.cvCopy(srcImage.Ptr, Ptr, IntPtr.Zero);
}
else
{
//different depth
//int channelCount = NumberOfChannels;
{
if (typeof(TDepth) == typeof(Byte) && typeof(TSrcDepth) != typeof(Byte))
{
double[] minVal, maxVal;
Point[] minLoc, maxLoc;
srcImage.MinMax(out minVal, out maxVal, out minLoc, out maxLoc);
double min = minVal[0];
double max = maxVal[0];
for (int i = 1; i < minVal.Length; i++)
{
min = Math.Min(min, minVal[i]);
max = Math.Max(max, maxVal[i]);
}
double scale = 1.0, shift = 0.0;
if (max > 255.0 || min < 0)
{
scale = (max.Equals(min)) ? 0.0 : 255.0 / (max - min);
shift = (scale.Equals(0)) ? min : -min * scale;
}
CvInvoke.ConvertScaleAbs(srcImage, this, scale, shift);
}
else
{
srcImage.Mat.ConvertTo(this, this.Mat.Depth, 1.0, 0.0);
//CvInvoke.cvConvertScale(srcImage, this, 1.0, 0.0);
}
}
}
#endregion
}
else
{
#region different color
if (typeof(TDepth) == typeof(TSrcDepth))
{ //same depth
CvInvoke.CvtColor(srcImage, this, typeof(TSrcColor), typeof(TColor));
}
else
{ //different depth
if (typeof(TSrcDepth) == typeof(Byte))
{ //Do color conversion first, then depth conversion
using (Image tmp = srcImage.Convert())
{
this.ConvertFrom(tmp);
}
}
else
{ //Do depth conversion first, then color conversion
using (Image tmp = srcImage.Convert()) //convert depth
//using (Mat tmp = new CV.Mat())
{
//srcImage.Mat.ConvertTo(tmp, CvInvoke.GetDepthType(typeof(TDepth),
CvInvoke.CvtColor(tmp, this, typeof(TSrcColor), typeof(TColor));
}
}
}
#endregion
}
}
///
/// Convert the source image to the current image, if the size are different, the current image will be a resized version of the srcImage.
///
/// The sourceImage
public void ConvertFrom(IInputArray srcImage)
{
using (InputArray iaSrcImage = srcImage.GetInputArray())
{
Size srcImageSize = iaSrcImage.GetSize();
if (!Size.Equals(srcImageSize))
{
//if the size of the source image do not match the size of the current image
using (Mat tmp = new Mat())
{
CvInvoke.Resize(srcImage, tmp, this.Size);
ConvertFrom(tmp);
return;
}
}
int srcImageNumberOfChannels = iaSrcImage.GetChannels();
if (NumberOfChannels == srcImageNumberOfChannels)
{
#region same color
DepthType srcImageDepth = iaSrcImage.GetDepth();
if (CvInvoke.GetDepthType(typeof(TDepth)) == srcImageDepth)
{
//same depth
iaSrcImage.CopyTo(this);
//srcImage.CopyTo(this);
}
else
{
//different depth
//int channelCount = NumberOfChannels;
{
if (typeof(TDepth) == typeof(Byte) && DepthType.Cv8U != CvInvoke.GetDepthType(typeof(Byte)))
{
double[] minVal, maxVal;
Point[] minLoc, maxLoc;
CvInvoke.MinMax(srcImage, out minVal, out maxVal, out minLoc, out maxLoc);
double min = minVal[0];
double max = maxVal[0];
for (int i = 1; i < minVal.Length; i++)
{
min = Math.Min(min, minVal[i]);
max = Math.Max(max, maxVal[i]);
}
double scale = 1.0, shift = 0.0;
if (max > 255.0 || min < 0)
{
scale = max.Equals(min) ? 0.0 : 255.0 / (max - min);
shift = scale.Equals(0) ? min : -min * scale;
}
CvInvoke.ConvertScaleAbs(srcImage, this, scale, shift);
}
else
{
using (Mat srcMat = iaSrcImage.GetMat())
{
srcMat.ConvertTo(this, this.Mat.Depth, 1.0, 0.0);
}
}
}
}
#endregion
}
else
{
if (!(srcImageNumberOfChannels == 1 || srcImageNumberOfChannels == 3 || srcImageNumberOfChannels == 4))
throw new Exception("Color conversion not suppported");
Type srcColorType =
srcImageNumberOfChannels == 1
? typeof(Gray)
: srcImageNumberOfChannels == 3
? typeof(Bgr)
: typeof(Bgra);
#region different color
DepthType srcImageDepth = iaSrcImage.GetDepth();
if (CvInvoke.GetDepthType(typeof(TDepth)) == srcImageDepth)
{
//same depth
CvInvoke.CvtColor(srcImage, this, srcColorType, typeof(TColor));
}
else
{
//different depth
//using (Image tmp = srcImage.Convert()) //convert depth
using (Mat tmp = new Mat())
using (Mat srcMat = iaSrcImage.GetMat())
{
srcMat.ConvertTo(tmp, CvInvoke.GetDepthType(typeof(TDepth)));
//srcImage.Mat.ConvertTo(tmp, CvInvoke.GetDepthType(typeof(TDepth),
CvInvoke.CvtColor(tmp, this, srcColorType, typeof(TColor));
}
}
#endregion
}
}
}
/// Convert the current image to the specific depth, at the same time scale and shift the values of the pixel
/// The value to be multiplied with the pixel
/// The value to be added to the pixel
/// The type of depth to convert to
/// Image of the specific depth, val = val * scale + shift
public Image ConvertScale(double scale, double shift)
where TOtherDepth : new()
{
Image res = new Image(Width, Height);
if (typeof(TOtherDepth) == typeof(Byte))
CvInvoke.ConvertScaleAbs(this, res, scale, shift);
else
CvInvoke.cvConvertScale(this, res, scale, shift);
return res;
}
#endregion
#region Pyramids
///
/// Performs downsampling step of Gaussian pyramid decomposition.
/// First it convolves this image with the specified filter and then downsamples the image
/// by rejecting even rows and columns.
///
/// The down-sampled image
[ExposableMethod(Exposable = true, Category = "Pyramids")]
public Image PyrDown()
{
Image res = new Image(Width >> 1, Height >> 1);
CvInvoke.PyrDown(this, res, CvEnum.BorderType.Default);
return res;
}
///
/// Performs up-sampling step of Gaussian pyramid decomposition.
/// First it up-samples this image by injecting even zero rows and columns and then convolves
/// result with the specified filter multiplied by 4 for interpolation.
/// So the resulting image is four times larger than the source image.
///
/// The up-sampled image
[ExposableMethod(Exposable = true, Category = "Pyramids")]
public Image PyrUp()
{
Image res = new Image(Width << 1, Height << 1);
CvInvoke.PyrUp(this, res, CvEnum.BorderType.Default);
return res;
}
///
/// Compute the image pyramid
///
/// The number of level's for the pyramid; Level 0 referes to the current image, level n is computed by calling the PyrDown() function on level n-1
/// The image pyramid
public Image[] BuildPyramid(int maxLevel)
{
Debug.Assert(maxLevel >= 0, "The pyramid should have at lease maxLevel of 0");
Image[] pyr = new Image[maxLevel + 1];
pyr[0] = this;
for (int i = 1; i <= maxLevel; i++)
pyr[i] = pyr[i - 1].PyrDown();
return pyr;
}
#endregion
#region Special Image Transforms
/// Use inpaint to recover the intensity of the pixels which location defined by on this image
/// The inpainting mask. Non-zero pixels indicate the area that needs to be inpainted
/// The radius of circular neighborhood of each point inpainted that is considered by the algorithm
/// The inpainted image
public Image InPaint(Image mask, double radius)
{
Image res = CopyBlank();
CvInvoke.Inpaint(this, mask, res, radius, CvEnum.InpaintType.Telea);
return res;
}
#endregion
#region Morphological Operations
///
/// Perform advanced morphological transformations using erosion and dilation as basic operations.
///
/// Structuring element
/// Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
/// Type of morphological operation
/// Number of times erosion and dilation are applied
/// Border type
/// Border value
/// The result of the morphological operation
public Image MorphologyEx(CvEnum.MorphOp operation, IInputArray kernel, Point anchor, int iterations, CvEnum.BorderType borderType, MCvScalar borderValue)
{
Image res = CopyBlank();
CvInvoke.MorphologyEx(
this, res, operation,
kernel, anchor, iterations, borderType, borderValue);
return res;
}
///
/// Perform inplace advanced morphological transformations using erosion and dilation as basic operations.
///
/// Structuring element
/// Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
/// Type of morphological operation
/// Number of times erosion and dilation are applied
/// Border type
/// Border value
public void _MorphologyEx(CvEnum.MorphOp operation, IInputArray kernel, Point anchor, int iterations, CvEnum.BorderType borderType, MCvScalar borderValue)
{
CvInvoke.MorphologyEx(
this, this, operation,
kernel, anchor, iterations, borderType, borderValue);
}
///
/// Erodes this image using a 3x3 rectangular structuring element.
/// Erosion are applied several (iterations) times
///
/// The number of erode iterations
/// The eroded image
public Image Erode(int iterations)
{
Image res = CopyBlank();
CvInvoke.Erode(this, res, null, new Point(-1, -1), iterations, CvEnum.BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
return res;
}
///
/// Dilates this image using a 3x3 rectangular structuring element.
/// Dilation are applied several (iterations) times
///
/// The number of dilate iterations
/// The dilated image
public Image Dilate(int iterations)
{
Image res = CopyBlank();
CvInvoke.Dilate(this, res, null, new Point(-1, -1), iterations, CvEnum.BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
return res;
}
///
/// Erodes this image inplace using a 3x3 rectangular structuring element.
/// Erosion are applied several (iterations) times
///
/// The number of erode iterations
[ExposableMethod(Exposable = true, Category = "Morphology")]
public void _Erode(int iterations)
{
CvInvoke.Erode(this, this, null, new Point(-1, -1), iterations, CvEnum.BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
}
///
/// Dilates this image inplace using a 3x3 rectangular structuring element.
/// Dilation are applied several (iterations) times
///
/// The number of dilate iterations
[ExposableMethod(Exposable = true, Category = "Morphology")]
public void _Dilate(int iterations)
{
CvInvoke.Dilate(this, this, null, new Point(-1, -1), iterations, CvEnum.BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
}
#endregion
#region generic operations
///
/// perform an generic action based on each element of the image
///
/// The action to be applied to each element of the image
public void Action(Action action)
{
int cols1 = Width * new TColor().Dimension;
int step1;
IntPtr start;
Size roiSize;
CvInvoke.cvGetRawData(Ptr, out start, out step1, out roiSize);
Int64 data1 = start.ToInt64();
int width1 = SizeOfElement * cols1;
using (PinnedArray row1 = new PinnedArray(cols1))
for (int row = 0; row < Height; row++, data1 += step1)
{
CvToolbox.Memcpy(row1.AddrOfPinnedObject(), new IntPtr(data1), width1);
foreach (TDepth v in row1.Array)
action(v);
}
}
///
/// Perform an generic operation based on the elements of the two images
///
/// The depth of the second image
/// The second image to perform action on
/// An action such that the first parameter is the a single channel of a pixel from the first image, the second parameter is the corresponding channel of the correspondind pixel from the second image
public void Action(Image img2, Action action)
where TOtherDepth : new()
{
Debug.Assert(Size.Equals(img2.Size));
Int64 data1;
int height1, cols1, width1, step1;
RoiParam(Ptr, out data1, out height1, out cols1, out width1, out step1);
Int64 data2;
int height2, cols2, width2, step2;
RoiParam(img2.Ptr, out data2, out height2, out cols2, out width2, out step2);
TDepth[] row1 = new TDepth[cols1];
TOtherDepth[] row2 = new TOtherDepth[cols1];
GCHandle handle1 = GCHandle.Alloc(row1, GCHandleType.Pinned);
GCHandle handle2 = GCHandle.Alloc(row2, GCHandleType.Pinned);
for (int row = 0; row < height1; row++, data1 += step1, data2 += step2)
{
CvToolbox.Memcpy(handle1.AddrOfPinnedObject(), (IntPtr)data1, width1);
CvToolbox.Memcpy(handle2.AddrOfPinnedObject(), (IntPtr)data2, width2);
for (int col = 0; col < cols1; action(row1[col], row2[col]), col++) ;
}
handle1.Free();
handle2.Free();
}
///
/// Compute the element of a new image based on the value as well as the x and y positions of each pixel on the image
///
/// The function to be applied to the image pixels
/// The depth type to convert the image to.
/// The result image
public Image Convert(Func converter)
where TOtherDepth : new()
{
Image res = new Image(Width, Height);
int nchannel = MIplImage.NChannels;
Int64 data1;
int height1, cols1, width1, step1;
RoiParam(Ptr, out data1, out height1, out cols1, out width1, out step1);
Int64 data2;
int height2, cols2, width2, step2;
RoiParam(res.Ptr, out data2, out height2, out cols2, out width2, out step2);
TDepth[] row1 = new TDepth[cols1];
TOtherDepth[] row2 = new TOtherDepth[cols1];
GCHandle handle1 = GCHandle.Alloc(row1, GCHandleType.Pinned);
GCHandle handle2 = GCHandle.Alloc(row2, GCHandleType.Pinned);
for (int row = 0; row < height1; row++, data1 += step1, data2 += step2)
{
CvToolbox.Memcpy(handle1.AddrOfPinnedObject(), (IntPtr)data1, width1);
for (int col = 0; col < cols1; row2[col] = converter(row1[col], row, col / nchannel), col++) ;
CvToolbox.Memcpy((IntPtr)data2, handle2.AddrOfPinnedObject(), width2);
}
handle1.Free();
handle2.Free();
return res;
}
/// Compute the element of the new image based on element of this image
/// The depth type of the result image
/// The function to be applied to the image pixels
/// The result image
public Image Convert(Func converter)
where TOtherDepth : new()
{
Image res = new Image(Size);
Int64 data1;
int height1, cols1, width1, step1;
RoiParam(Ptr, out data1, out height1, out cols1, out width1, out step1);
Int64 data2;
int height2, cols2, width2, step2;
RoiParam(res.Ptr, out data2, out height2, out cols2, out width2, out step2);
TDepth[] row1 = new TDepth[cols1];
TOtherDepth[] row2 = new TOtherDepth[cols1];
GCHandle handle1 = GCHandle.Alloc(row1, GCHandleType.Pinned);
GCHandle handle2 = GCHandle.Alloc(row2, GCHandleType.Pinned);
for (int row = 0; row < height1; row++, data1 += step1, data2 += step2)
{
CvToolbox.Memcpy(handle1.AddrOfPinnedObject(), (IntPtr)data1, width1);
for (int col = 0; col < cols1; row2[col] = converter(row1[col]), col++) ;
CvToolbox.Memcpy((IntPtr)data2, handle2.AddrOfPinnedObject(), width2);
}
handle1.Free();
handle2.Free();
return res;
}
/// Compute the element of the new image based on the elements of the two image
/// The depth type of img2
/// The depth type of the result image
/// The second image
/// The function to be applied to the image pixels
/// The result image
public Image Convert(Image