65.9K
CodeProject 正在变化。 阅读更多。
Home

OpenCvSharp 3 入门

starIconstarIconstarIcon
emptyStarIcon
starIcon
emptyStarIcon

3.34/5 (13投票s)

2016年3月16日

CPOL

3分钟阅读

viewsIcon

120479

downloadIcon

2854

OpenCvSharp 3 快速入门的示例代码。

引言

我对图像处理是新手,但对编程并不陌生。我上传了这个项目作为演示代码资源,供其他希望快速入门并感受 OpenCvSharp3 编码 API 的新手使用。这是一篇介绍如何将 C++ 代码转换为 OpenSharp3 代码的文章。您可以尝试代码参数,并查阅 C++ 文档以了解 API 的操作。

我考虑了 C# 包装类 emgu 和 OpenCV.NET... 但我觉得 OpenCvSharp3 是最简单且支持最好的平台。

演示代码被分成了(独立的区域),使得理解代码的意图非常简单。我不得不编译 https://github.com/shimat/opencvsharp/releases 的示例代码至少几次才能获得干净的构建。

背景

OpenVC 是一个流行的 C++ 计算机视觉库。它处理图像像素以找到感兴趣的特征。然而,C++ 是一个非托管代码平台,与 C# 相比有点笨拙。因此,我放弃了 C++ 编程,转而使用 C#。

Using the Code

要运行演示,请创建一个新的控制台应用程序,并将图像和程序文件复制到其中。包含其中一个程序文件(programprogram2program3),并从项目构建中排除其他程序文件。然后取消注释(如果适用)其中一个代码区域,并编译和执行该命令选择。根据需要更改 API 参数,然后重新编译并运行。请注意,您需要使用 Visual Studio nuget 管理器插件工具来安装 OpenCvSharp3 库。基本包将包含 OpenCv C++ DLL。

    /*
     * More Samples are available at: https://github.com/shimat/opencvsharp/releases
     * Class NameSpace listing is at: http://shimat.github.io/opencvsharp/
    */

    using System;
    using System.Collections.Generic;
    using System.Windows.Forms;
    using OpenCvSharp;
    using OpenCvSharp.ML;

    class Program
    {
        static void Main()
        {
    #region Canny Edge Detection

            //Mat source = new Mat("../../Images/lena.jpg", ImreadModes.Color);
            //Mat grayFiltered = new Mat();
            //Mat ClearEdge = new Mat();
            //Mat filtered = new Mat();
            //Mat d = createADiamond();
            //Mat x = createAXShape();
            //Mat Canny = new Mat();

            //Cv2.Canny(source, Canny, 32, 192);
            //binImage(source, out filtered);

            //// A discussion on erode and dilate is at:
            //// https://docs.opencv.ac.cn/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html

            //Cv2.Erode(filtered, ClearEdge, x);
            //Cv2.Dilate(ClearEdge, ClearEdge, x);
            //Cv2.Erode(ClearEdge, ClearEdge, d);
            //Cv2.Dilate(ClearEdge, ClearEdge, d);

            //grayFiltered = Mat.FromImageData(filtered.ToBytes(), ImreadModes.GrayScale);

            //new Window("source image", source);
            //new Window("CannyEdge image", Canny);
            //new Window("filtered image", filtered);
            //new Window("SharpEdge image", ClearEdge);
            //new Window("grayFiltered image", grayFiltered);
            //Cv2.WaitKey();

    #endregion
    

    #region Sobel Edge Detection

            //Mat dst = new Mat();
            //Mat grad_x = new Mat(), grad_y = new Mat();
            //Mat abs_grad_x = new Mat(), abs_grad_y = new Mat();
            //Mat src_gray = new Mat("../../Images/lena.jpg", ImreadModes.GrayScale);

            //// try to reduce image noises.
            //Cv2.GaussianBlur(src_gray, src_gray, new Size(3, 3), 1.5);

            //// Gradient X, ddepth is set it to CV_16S to avoid overflow
            //Cv2.Sobel(src_gray, grad_x, MatType.CV_16U, 1, 0, 3);
            //Cv2.ConvertScaleAbs(grad_x, abs_grad_x);

            //// Gradient Y
            //Cv2.Sobel(src_gray, grad_y, MatType.CV_16U, 0, 1, 3);
            //Cv2.ConvertScaleAbs(grad_y, abs_grad_y);

            //// Total Gradient (approximate)
            //Cv2.AddWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);

            //Cv2.ImShow("Sobel Edge", dst);

            //Cv2.WaitKey(0);

    #endregion
    

    #region Camcorder Capture

            //VideoCapture capture;
            //Mat frame;
 
            //capture = new VideoCapture();
            //frame = new Mat();
            //capture.Open(0);
 
            //// Read the video stream
            //{
            //    Cv2.NamedWindow("Video", WindowMode.AutoSize);
            //    while (true)
            //    {
            //        if (capture.Read(frame))
            //        {
            //            Cv2.ImShow("Video", frame);
 
            //            // press a key to end execution
            //            int c = Cv2.WaitKey(10);
            //            if (c != -1) { break; } // Assuming image has focus
            //        }
            //    }
            //}

    #endregion 
    
 
    #region Assigning pixel values

            //Mat src = new Mat("../../Images/lena.jpg", ImreadModes.Color);
 
            //// place a green box at the upper left hand corner
            //for (int i = 10; i < 99; i++)
            //    for (int j = 10; j < 99; j++)
            //        src.Set<Vec3b>(i, j, new Vec3b(0, 255, 0));
 
            //using (new Window("dot image", src2))
            //{
            //    MessageBox.Show("Depth: " + src.Depth());
            //    MessageBox.Show("Channels: " + src.Channels());
            //    Cv2.WaitKey();
            //}

    #endregion
    
 
    #region Erode/Dilate Morphing

            //Mat src, dst;
            //src = Cv2.ImRead("../../Images/lena.jpg", ImreadModes.Color);
            //dst = new Mat();
 
            // Create a structuring element
            //int erosion_size = 6;
            //Mat element = Cv2.GetStructuringElement(MorphShapes.Cross,
            //      new Size(2 * erosion_size + 1, 2 * erosion_size + 1),
            //      new Point(erosion_size, erosion_size));
 
            // Apply erosion "OR" dilation on the image
            //Cv2.Erode(src, dst, element);
            //Cv2.Dilate(src, dst, element);
 
            //using (new Window("Display window", src))
            //using (new Window("Result window", dst))
            //    Cv2.WaitKey(0);

    #endregion
 
 
    #region draw a line

            //// Create black empty images
            //Mat image = Mat.Zeros(400, 400, MatType.CV_8UC3);
 
            //// Draw a line 
            //Cv2.Line(image, new Point(15, 20), new Point(375, 375),
            //    new Scalar(255, 128, 0), 2);
            //using (new Window("Image", image))
            //    Cv2.WaitKey(0);

    #endregion
 
 
    #region draw a circle

            //// Create black empty images
            //Mat image = Mat.Zeros(400, 400, MatType.CV_8UC3);
 
            //// Draw a line 
            //Cv2.Circle(image, new Point(200, 200), 100, new Scalar(255, 128, 0), 2);
            //using (new Window("Image", image))
            //    Cv2.WaitKey(0);

    #endregion
 
 
    #region draw a Polygon

            //Mat src = new Mat(new Size(400, 400), MatType.CV_8U, new Scalar(1));
            //src.SetTo(Scalar.Black);

            //List<List<Point>> ListOfListOfPoint = new List<List<Point>>();
            //List<Point> points = new List<Point>();

            //points.Add(new Point(100, 100));
            //points.Add(new Point(100, 300));
            //points.Add(new Point(300, 300));
            //points.Add(new Point(300, 100));
            //ListOfListOfPoint.Add(points);

            //src.FillPoly(ListOfListOfPoint, Scalar.White);
            //Cv2.ImShow("Square", src);
            //Cv2.WaitKey();;

    #endregion
 
 
    #region draw a text string

            //// Create black empty images
            //Mat src = Mat.Zeros( 400, 400, MatType.CV_8UC3 );
   
            //Cv2.PutText(src, "Hi all...", new Point(50,100),
            //    HersheyFonts.HersheySimplex, 1, new Scalar(0,200,200), 4);
            //using (new Window("Image", src))
            //    Cv2.WaitKey(0);

    #endregion
 
 
    #region weighed filter
 
            //Mat src = new Mat("../../Images/lena.jpg", ImreadModes.GrayScale);
            //Mat kernel = new Mat(3, 3, MatType.CV_32F, new Scalar(0));
            //Mat dst = new Mat();
 
            //kernel.Set<float>(1, 1,  5.0f);
            //kernel.Set<float>(0, 1, -1.0f);
            //kernel.Set<float>(2, 1, -1.0f);
            //kernel.Set<float>(1, 0, -1.0f);
            //kernel.Set<float>(1, 2, -1.0f);
 
            //Cv2.Filter2D(src, dst, MatType.CV_32F, kernel);
            //using (new Window("src image", src))
            //using (new Window("dst image", dst))
            //{
            //    Cv2.WaitKey();
            //}

    #endregion


    #region find circles in image

            //Mat train = new Mat("../../Images/cartoon-train.png", ImreadModes.GrayScale);
            //CircleSegment[] circles;
            //Mat dst = new Mat();

            //Cv2.GaussianBlur(train, dst, new Size(5, 5), 1.5, 1.5);

            //// Note, the minimum distance between concentric circles is 25. Otherwise
            //// false circles are detected as a result of the circle's thickness.
            //circles = Cv2.HoughCircles(dst, HoughMethods.Gradient, 1, 25, 75, 60, 5, 200);

            //for (int i = 0; i < circles.Length; i++)
            //{
            //    Cv2.Circle(dst, circles[i].Center, (int)circles[i].Radius, new Scalar(0), 2);
            //}

            //using (new Window("Circles", dst))
            //{
            //    Cv2.WaitKey();
            //}

    #endregion


    #region Get corners on image

            //Mat src = new Mat("../../Images/building.jpg", ImreadModes.GrayScale);
 
            //// Show Edges
            //Mat edges = getEdges(src, 50);
            //new Window("Edges", edges);

            //// Corner detection
            //// Get All Processing Images
            //Mat cross = createACross();
            //Mat diamond = createADiamond();
            //Mat square = createASquare();
            //Mat x = createAXShape();
            //Mat dst = new Mat();

            //// Dilate with a cross
            //Cv2.Dilate(src, dst, cross);

            //// Erode with a diamond
            //Cv2.Erode(dst, dst, diamond);

            //Mat dst2 = new Mat();

            //// Dilate with a X
            //Cv2.Dilate(src, dst2, x);

            //// Erode with a square
            //Cv2.Erode(dst2, dst2, square);

            //// Corners are obtain by differencing the two closed images
            //Cv2.Absdiff(dst, dst2, dst);
            //applyThreshold(dst, 45);

            //// The following code Identifies the founded corners by
            //// drawing circle on the src image.
            //IDTheCorners(dst, src);
            //new Window("Corner on Image", src);
            //Cv2.WaitKey();

    #endregion


    #region Machine Learning

            // Translated from C++ article:
            // https://docs.opencv.ac.cn/3.1.0/d1/d73/tutorial_introduction_to_svm.html

            // Data for visual representation 
            int width = 512, height = 512;
            Mat image = Mat.Zeros(height, width, MatType.CV_8UC3);

            // Set up training data
            int[] labels = new int[] { 1, -1, -1, -1 };
            float[,] trainingData = new float[,] { { 501, 10 }, { 255, 10 }, { 501, 255 }, { 10, 501 } };
            Mat trainingDataMat = new Mat(4, 2, MatType.CV_32FC1, trainingData);
            Mat labelsMat = new Mat(4, 1, MatType.CV_32SC1, labels);

            // Train the SVM
            SVM svm = SVM.Create();
            svm.Type = SVM.Types.CSvc;
            svm.KernelType = SVM.KernelTypes.Linear;
            svm.TermCriteria = new TermCriteria(CriteriaType.MaxIter, 100, 1e-6);
            svm.Train(trainingDataMat, SampleTypes.RowSample, labelsMat);

            // Show the decision regions given by the SVM
            Vec3b green = new Vec3b(0, 255, 0), blue = new Vec3b(255, 0, 0);
            for (int i = 0; i < image.Rows; ++i)
                for (int j = 0; j < image.Cols; ++j)
                {
                    Mat sampleMat = new Mat(1, 2, MatType.CV_32F, new float[] { j, i });
                    float response = svm.Predict(sampleMat);
                    if (response == 1)
                        image.Set<Vec3b>(i, j, green);
                    else if (response == -1)
                        image.Set<Vec3b>(i, j, blue);
                }

            // Show the training data
            int thickness = -1;
            Cv2.Circle(image, new Point(501, 10), 5, Scalar.Black, thickness);
            Cv2.Circle(image, new Point(255, 10), 5, Scalar.White, thickness);
            Cv2.Circle(image, new Point(501, 255), 5, Scalar.White, thickness);
            Cv2.Circle(image, new Point(10, 501), 5, Scalar.White, thickness);

            // Show support vectors
            thickness = 2;
            Mat sv = svm.GetSupportVectors();
            for (int i = 0; i < sv.Rows; ++i)
            {
                unsafe
                {
                    float* v = (float*)sv.Ptr(i);
                    Cv2.Circle(image, new Point((int)v[0], (int)v[1]), 6, Scalar.Gray, thickness);
                    Console.WriteLine("{0:d}, {1:d}", (int)v[0], (int)v[1]);
                }
            }
            Cv2.ImWrite("result.png", image);

            // save the image
            Cv2.ImShow("SVM Simple Example", image); // show it to the user
            Cv2.WaitKey(0);

    #endregion


    #region Feature SURF flann

            //// Token from C++ example:
            //// https://docs.opencv.ac.cn/3.1.0/d5/d6f/tutorial_feature_flann_matcher.html
            //// As of the writing of this routine, 
            //// SIFT and SURF is a non-free code and is moved to the
            //// contrib repository and then link to the xfeatures2d library.

            //// So, you will get runtime error: 
            //// Unable to find an entry point named 'xfeatures2d_SURF_create'
            //// in DLL 'OpenCvSharpExtern'.

            //// See these 2 links for an method on how to install the contrib library, 
            //// it's a bit trick but do-able:
            //// https://github.com/shimat/opencvsharp/issues/146
            //// https://github.com/shimat/opencvsharp/issues/180


            //Mat img_1 = Cv2.ImRead("../../Images/icons.png", ImreadModes.GrayScale);
            //Mat img_2 = Cv2.ImRead("../../Images/subIcons.png", ImreadModes.GrayScale);

            ////-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors 
            //int minHessian = 400;
            //SURF detector = SURF.Create(minHessian);

            //KeyPoint[] keypoints_1, keypoints_2;
            //Mat descriptors_1 = new Mat(), descriptors_2 = new Mat();

            //detector.DetectAndCompute(img_1, new Mat(), out keypoints_1, descriptors_1);
            //detector.DetectAndCompute(img_2, new Mat(), out keypoints_2, descriptors_2);

            ////-- Step 2: Matching descriptor vectors using FLANN matcher 
            //FlannBasedMatcher matcher = new FlannBasedMatcher();
            //DMatch[] matches;

            //matches = matcher.Match(descriptors_1, descriptors_2);
            //double max_dist = 0; double min_dist = 100;

            ////-- Quick calculation of max and min distances between keypoints 
            //for (int i = 0; i < descriptors_1.Rows; i++)
            //{
            //    double dist = matches[i].Distance;
            //    if (dist < min_dist) min_dist = dist;
            //    if (dist > max_dist) max_dist = dist;
            //}
            //Console.WriteLine("-- Max dist : %f", max_dist);
            //Console.WriteLine("-- Min dist : %f", min_dist);

            ////-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
            ////-- or a small arbitrary value ( 0.02 ) in the event that min_dist is very 
            ////-- small)  //-- PS.- radiusMatch can also be used here.
            //List<DMatch> good_matches = new List<DMatch>();
            //for (int i = 0; i < descriptors_1.Rows; i++)
            //{
            //    if (matches[i].Distance <= Math.Max(2 * min_dist, 0.02))
            //    {
            //        good_matches.Add(matches[i]);
            //    }
            //}

            ////-- Draw only "good" matches
            //Mat img_matches = new Mat();
            //Cv2.DrawMatches(img_1, keypoints_1, img_2, keypoints_2,
            //    good_matches, img_matches, Scalar.All(-1), Scalar.All(-1),
            //    new List<byte>(), DrawMatchesFlags.NotDrawSinglePoints);

            ////-- Show detected matches  imshow( "Good Matches", img_matches );
            //for (int i = 0; i < (int)good_matches.Count; i++)
            //{
            //    Console.WriteLine("-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d", i,
            //        good_matches[i].QueryIdx, good_matches[i].TrainIdx);
            //}

            //Cv2.WaitKey(0);

    #endregion

        }

        /***********************************************************************
        ************************** SUPPORT ROUTINES  ***************************
        ***********************************************************************/

        #region Support code for Get Corners on image region

        static Mat getEdges(Mat image, int threshold)
        {
            // Get the gradient image
            Mat result = new Mat();
            Cv2.MorphologyEx(image, result, MorphTypes.Gradient, new Mat());
            applyThreshold(result, threshold);

            return result;
        }

        static Mat createACross()
        {
            Mat cross = new Mat(5, 5, MatType.CV_8U, new Scalar(0));
        
            // creating the cross-shaped structuring element
            for (int i = 0; i < 5; i++)
            {
                cross.Set<byte>(2, i, 1);
                cross.Set<byte>(i, 2, 1);
            }

            return cross;
        }

        static Mat createADiamond()
        {
            Mat diamond = new Mat(5, 5, MatType.CV_8U, new Scalar(1));
        
            // Creating the diamond-shaped structuring element
            diamond.Set<byte>(0, 0, 0);
            diamond.Set<byte>(1, 0, 0);
            diamond.Set<byte>(3, 0, 0);
            diamond.Set<byte>(4, 0, 0);
            diamond.Set<byte>(0, 1, 0);
            diamond.Set<byte>(4, 1, 0);
            diamond.Set<byte>(0, 3, 0);
            diamond.Set<byte>(4, 3, 0);
            diamond.Set<byte>(4, 4, 0);
            diamond.Set<byte>(0, 4, 0);
            diamond.Set<byte>(1, 4, 0);
            diamond.Set<byte>(3, 4, 0);

            return diamond;
        }

        static Mat createASquare()
        {
            Mat Square = new Mat(5, 5, MatType.CV_8U, new Scalar(1));

            return Square;
        }

        static Mat createAXShape()
        {
            Mat x = new Mat(5, 5, MatType.CV_8U, new Scalar(0));
        
            // Creating the x-shaped structuring element
            for (int i = 0; i < 5; i++)
            {
                x.Set<byte>(i, i, 1);
                x.Set<byte>(4 - i, i, 1);
            }

            return x;
        }

        static void applyThreshold(Mat result, int threshold)
        {
            Cv2.Threshold(result, result, threshold, 255, ThresholdTypes.Binary);
        }

        static void IDTheCorners(Mat binary, Mat image)
        {
            for (int r = 0; r < binary.Rows; r++)
                for (int c = 0; c < binary.Cols; c++)
                    if (binary.At<byte>(r, c) != 0)
                        Cv2.Circle(image, c, r, 5, new Scalar(255));
        }

        #endregion

    
        #region nearest palette color averaging

        /// <summary>
        /// 
        /// </summary>
        /// <param name="src">input image</param>
        /// <param name="dst">output image</param>
        /// <param name="binSize">color channel size.
        /// i.e., Number of Red shades = floor(255/binSize),
        /// where binSize is between 1 and 128.</param>
        static void binImage(Mat src, out Mat dst, int binSize = 51)
        {
            dst = src.Clone();
            if (binSize <= 0 || binSize > 255) return;

            for (int r = 0; r<src.Rows; r++)
                for (int c = 0; c < src.Cols; c++)
                {
                    Vec3b color = src.Get<Vec3b>(r, c);
                     int binCenter = binSize / 2;
                    dst.Set<Vec3b>(r, c, new Vec3b(
                        // nearest color, note the size of the black and white
                        // bins are only half the size of the other bins.
                        // Note rounding colors to the nearest color palette
                        // can "sharply change" a gradient color in a region. 
                        (byte)(((color[0] + binCenter) / binSize) * binSize),
                        (byte)(((color[1] + binCenter) / binSize) * binSize),
                        (byte)(((color[2] + binCenter) / binSize) * binSize)
                        ));
                }
        }

        #endregion
    }

简要的 C++ 到 C# 演练

关于常用 C++ API 调用的完整讨论可在 opencvexamples.blogspot.com/p/learning-opencv-functions-step-by-step.html 找到。 C# API 与 C++ API 几乎完全匹配。此外,请参阅 opencv 主页,了解方法和类的讨论:docs.opencv.org/3.1.0/ 和 (docs.opencv.org/2.4/ 用于向后兼容。)

C# API 的命名空间位于 shimat.github.io/opencvsharp/github.com/shimat/opencvsharp/wiki

本演练的目的是反映从 C++ 转换为 C# 时 C# API 的用法。请注意,我们需要一个 CvTrackbar 的引用... 因为 C# CvTrackbar 不会在输入参数列表中返回修改后的值... 而 C++ 计数器 API 会在输入参数列表中返回一个修改后的值。演练的目的不是解释 C++ 库调用... 而是展示翻译所需的 C# 语法。

    using System;
    using System.Collections.Generic;
    using System.Windows.Forms;
    using OpenCvSharp;
    /* 
        * Token from C++ example:
        * https://docs.opencv.ac.cn/master/d0/d2a/contours2_8cpp-example.html
        * C# APIs:
        * http://shimat.github.io/opencvsharp/html/1bb515b3-6278-49e4-9a33-1054bd279323.htm
        * Note to, IntelliSense will usually show the APIs arguments.
    */
    class Program
    {
        const int w = 500;
        static CvTrackbar Track;
        static Point[][] contours; // IntelliSense shows the vector array of vectors in C#
        static HierarchyIndex[] hierarchy;

        static void on_trackbar(int pos, object usedata) // The C# APIs doc shows void* is an object
        {
            int _levels = Track.Pos - 3;
            Mat cnt_img = Mat.Zeros(w, w, MatType.CV_8UC3);
            Cv2.DrawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar.White,
                2, LineTypes.AntiAlias, hierarchy, Math.Abs(_levels) );
            Cv2.ImShow("contours", cnt_img);
        }

        static void Main()
        {
            Mat img = Mat.Zeros(w, w, MatType.CV_8UC1);
       
            //Draw "6" faces
            for( int i = 0; i < 6; i++ )
            {
                int dx = (i % 2) * 250 - 30;
                int dy = (i/2)*150;
                Scalar white = Scalar.White;
                Scalar black = Scalar.Black;
            
                if( i == 0 )
                {
                    for( int j = 0; j <= 10; j++ ) 
                    {
                        double angle = (j + 5) * Math.PI/21;
                        Cv2.Line(img,
                            new Point(Math.Round(dx + 100 + j * 10 - 80 * Math.Cos(angle), 0),
                                Math.Round(dy + 100 - 90 * Math.Sin(angle), 0)),
                            new Point(Math.Round(dx + 100 + j * 10 - 30 * Math.Cos(angle), 0),
                                Math.Round(dy + 100 - 30 * Math.Sin(angle), 0)),
                                white, 1);
                    }
                }

                Cv2.Ellipse(img, new Point(dx + 150, dy + 100), new Size(100, 70), 0, 0, 360, white);
                Cv2.Ellipse(img, new Point(dx + 115, dy +  70), new Size( 30, 20), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx + 185, dy +  70), new Size( 30, 20), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx + 115, dy +  70), new Size( 15, 15), 0, 0, 360, white);
                Cv2.Ellipse(img, new Point(dx + 185, dy +  70), new Size( 15, 15), 0, 0, 360, white);
                Cv2.Ellipse(img, new Point(dx + 115, dy +  70), new Size(  5,  5), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx + 185, dy +  70), new Size(  5,  5), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx + 150, dy + 100), new Size( 10,  5), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx + 150, dy + 150), new Size( 40, 10), 0, 0, 360, black);
                Cv2.Ellipse(img, new Point(dx +  27, dy + 100), new Size( 20, 35), 0, 0, 360, white);
                Cv2.Ellipse(img, new Point(dx + 273, dy + 100), new Size( 20, 35), 0, 0, 360, white);
            }
        
            //show the faces
                Cv2.ImShow( "image", img );
        
            //Extract the contours
            Point[][] contours0;
            Cv2.FindContours( img, out contours0, out hierarchy, 
			RetrievalModes.Tree, ContourApproximationModes.ApproxSimple); 
            contours = new Point[contours0.Length][];
        
            for( int k = 0; k < contours0.Length; k++ )
                // Here the C# equivalent is of the C++ API is not a one-to-one match
                contours[k] = Cv2.ApproxPolyDP(contours0[k], 3, true);

            //show the contours and activate trackbar event control
            Cv2.NamedWindow("contours", WindowMode.AutoSize);
            Track = new CvTrackbar("levels+3", "contours", 4, 7, on_trackbar);
            on_trackbar(0, 0);
        
            Cv2.WaitKey(); 
        }
    }

我注意到的另一个有趣的 C++ 到 C# 代码转换是 docs.opencv.org/2.4/doc/tutorials/imgproc/threshold/threshold.html 中的阈值演示。 Threshold_Demo 回调事件需要一个替代窗口定义,指定窗口名称,导致在 C++ 库中执行一个新线程或 null 指针。

    using OpenCvSharp;

    class Program
    {
        static void Main()
        {
            TestCode t = new TestCode();
            t.Function();
        }

    // I did not want to use a lot of static objects so, I put the code
	// in a user definite class. The code is derived from:
    // https://docs.opencv.ac.cn/2.4/doc/tutorials/imgproc/threshold/threshold.html
	// This code shows how to program multiple CvTrackbar controls.
 
        class TestCode
        {
            int max_BINARY_value = 255;
            CvTrackbar CvTrackbarValue;
            CvTrackbar CvTrackbarType;
            Mat src_gray = new Mat();
            Mat dst = new Mat();
            Window MyWindow;

            public void Function()
            {
                int threshold_value = 0;
                int threshold_type = 3;
                int max_value = 255;
                int max_type = 4;

                string trackbar_type = "Type: \n 0: Binary \n 1: 
		Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
                string trackbar_value = "Value";

                // Load an image
                Mat src = new Mat("../../Images/cartoon-train.png", ImreadModes.Color);

                /// Convert the image to Gray
                Cv2.CvtColor(src, src_gray, ColorConversionCodes.BGR2GRAY);

                // Create output window
                MyWindow = new Window("Track", WindowMode.AutoSize);

                // Whenever the user changes the value of any of the Trackbars,
                // the function Threshold_Demo is called. I needed a common global
                // window object for Threshold_Demo and Function to prevent a
                // internal null reference. 
                CvTrackbarType = MyWindow.CreateTrackbar(trackbar_type,
                                threshold_type, max_type, Threshold_Demo);

                CvTrackbarValue = MyWindow.CreateTrackbar(trackbar_value,
                                threshold_value, max_value, Threshold_Demo);

                /// Call the function to initialize
                Threshold_Demo(0, 0);

                /// Wait until user finishes program
                while (true)
                {
                    int c;
                    c = Cv2.WaitKey(20);
                    if ((char)c == 27)
                    { break; }
                }
            }

            void Threshold_Demo(int pos, object userdata)
            {
                /* 0: Binary
                   1: Binary Inverted
                   2: Threshold Truncated
                   3: Threshold to Zero
                   4: Threshold to Zero Inverted
                 */

                Cv2.Threshold(src_gray, dst, CvTrackbarValue.Pos,
                    max_BINARY_value, (ThresholdTypes)CvTrackbarType.Pos);

                MyWindow.Image = dst;
            }
        }
    }

关注点

我重写了一些更常见的 C++ 实现演示代码,以展示它在 OpenCvSharp 中的工作方式。我发现使用 Vision Studio 对象浏览器来查看 OpenCvSharp3 API 方法参数非常有帮助。我还建议查看 https://github.com/shimat/opencvsharp/releases 的代码示例。它说明了额外的 API 调用代码片段和实现。

历史

我添加这篇文章是因为我在此网站上没有找到关于 OpenCvSharp3 的信息。

在第一次更新中:我添加了 2 个额外的代码片段以补充完整性。

在第二次更新中:我修复了 Draw FillPoly 代码段的问题。

在第三次更新中:我只更新了文章内容。可下载的代码片段“最近调色板颜色平均值”有效,但略逊于上方打印输出中列出的代码。下载文件不允许使用亮色。因此,我为基本颜色添加了一个偏移值。这使得调色板颜色更接近真实平均值,并允许纯白色。

此外,由于对“opencvsharp”的 nuget 搜索仅显示版本 2,因此我更新了文本以反映“opencvsharp3”。

在第四次更新中:我在代码集中添加了 2 个更复杂的示例。

在第五次更新中:我添加了不安全代码以修复机器学习代码段部分中的浮点指针转换。

在本次更新中:我添加了一个简单的代码演练部分。

© . All rights reserved.