You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

508 lines
20 KiB

  1. //----------------------------------------------------------------------------
  2. // Copyright (C) 2004-2025 by EMGU Corporation. All rights reserved.
  3. //----------------------------------------------------------------------------
  4. using Emgu.CV;
  5. using Emgu.CV.CvEnum;
  6. using Emgu.CV.Dnn;
  7. using Emgu.CV.Models;
  8. using Emgu.CV.Platform.Maui.UI;
  9. namespace MauiDemoApp
  10. {
  11. public partial class MainPage : ContentPage
  12. {
  13. //int count = 0;
  14. public MainPage()
  15. {
  16. InitializeComponent();
  17. #if DEBUG
  18. CvInvoke.LogLevel = LogLevel.Verbose; //LogLevel.Debug;
  19. #endif
  20. String aboutIcon = null;
  21. ToolbarItem aboutItem = new ToolbarItem("About", aboutIcon,
  22. () =>
  23. {
  24. this.Navigation.PushAsync(new AboutPage());
  25. //page.DisplayAlert("Emgu CV Examples", "App version: ...", "Ok");
  26. }
  27. );
  28. this.ToolbarItems.Add(aboutItem);
  29. Button helloWorldButton = new Button();
  30. helloWorldButton.Text = "Hello world";
  31. Button planarSubdivisionButton = new Button();
  32. planarSubdivisionButton.Text = "Planar Subdivision";
  33. Button sceneTextDetectionButton = new Button();
  34. sceneTextDetectionButton.Text = "Scene Text detection (DNN Module)";
  35. Button featureDetectionButton = new Button();
  36. featureDetectionButton.Text = "Feature Matching";
  37. Button shapeDetectionButton = new Button();
  38. shapeDetectionButton.Text = "Shape Detection";
  39. Button maskRcnnButton = new Button();
  40. maskRcnnButton.Text = "Mask RCNN (DNN module)";
  41. Button yoloButton = new Button();
  42. yoloButton.Text = "Yolo (DNN module)";
  43. Button stopSignDetectionButton = new Button();
  44. stopSignDetectionButton.Text = "Stop Sign Detection (DNN module)";
  45. Button licensePlateRecognitionButton = new Button();
  46. licensePlateRecognitionButton.Text = "License Plate Recognition (DNN Module)";
  47. Button superresButton = new Button();
  48. superresButton.Text = "Super resolution (DNN Module)";
  49. List<View> buttonList = new List<View>()
  50. {
  51. helloWorldButton,
  52. planarSubdivisionButton,
  53. sceneTextDetectionButton,
  54. featureDetectionButton,
  55. shapeDetectionButton,
  56. maskRcnnButton,
  57. stopSignDetectionButton,
  58. yoloButton,
  59. licensePlateRecognitionButton,
  60. superresButton
  61. };
  62. var openCVConfigDict = CvInvoke.ConfigDict;
  63. bool haveViz = (openCVConfigDict["HAVE_OPENCV_VIZ"] != 0);
  64. bool haveDNN = (openCVConfigDict["HAVE_OPENCV_DNN"] != 0);
  65. bool haveFreetype = (openCVConfigDict["HAVE_OPENCV_FREETYPE"] != 0);
  66. bool haveFace = (openCVConfigDict["HAVE_OPENCV_FACE"] != 0);
  67. bool haveWechatQRCode = (openCVConfigDict["HAVE_OPENCV_WECHAT_QRCODE"] != 0);
  68. //bool haveBarcode = (openCVConfigDict["HAVE_OPENCV_BARCODE"] != 0);
  69. bool haveObjdetect = (openCVConfigDict["HAVE_OPENCV_OBJDETECT"] != 0);
  70. bool haveTesseract = (openCVConfigDict["HAVE_EMGUCV_TESSERACT"] != 0);
  71. bool haveFeatures2D = (openCVConfigDict["HAVE_OPENCV_FEATURES2D"] != 0);
  72. bool haveVideo = (openCVConfigDict["HAVE_OPENCV_VIDEO"] != 0);
  73. // bool haveOptFlow = (openCVConfigDict["HAVE_OPENCV_OPTFLOW"] != 0);
  74. bool hasInferenceEngine = false;
  75. if (haveDNN)
  76. {
  77. var dnnBackends = DnnInvoke.AvailableBackends;
  78. hasInferenceEngine = Array.Exists(dnnBackends, dnnBackend =>
  79. (dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngine
  80. || dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngineNgraph
  81. || dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngineNnBuilder2019));
  82. #if DEBUG
  83. DnnInvoke.EnableModelDiagnostics(true);
  84. #endif
  85. }
  86. bool haveCamera = true;
  87. /*
  88. if (haveOptFlow && haveCamera)
  89. {
  90. #if !(__MACCATALYST__ || __ANDROID__ || __IOS__ || NETFX_CORE)
  91. Button motionDetectionButton = new Button();
  92. motionDetectionButton.Text = "Motion Detection";
  93. buttonList.Add(motionDetectionButton);
  94. motionDetectionButton.Clicked += (sender, args) =>
  95. {
  96. ProcessAndRenderPage motionDetectionPage = new ProcessAndRenderPage(
  97. new MotionDetectionModel(),
  98. "Open Camera",
  99. null,
  100. "This demo use MotionHistory for motion detection. The 3 images shown once it is up and running: 1. original image; 2. Foreground image; 3. Motion history");
  101. MainPage.Navigation.PushAsync(motionDetectionPage);
  102. };
  103. #endif
  104. }*/
  105. helloWorldButton.Clicked += (sender, args) =>
  106. {
  107. this.Navigation.PushAsync(new HelloWorldPage());
  108. };
  109. planarSubdivisionButton.Clicked += (sender, args) =>
  110. {
  111. this.Navigation.PushAsync(new PlanarSubdivisionPage());
  112. };
  113. shapeDetectionButton.Clicked += (sender, args) =>
  114. {
  115. ProcessAndRenderPage shapeDetectionPage = new ProcessAndRenderPage(
  116. new ShapeDetector(),
  117. "Shape detection",
  118. "pic3.png",
  119. "Shape detection");
  120. this.Navigation.PushAsync(shapeDetectionPage);
  121. };
  122. featureDetectionButton.Clicked += (sender, args) =>
  123. {
  124. this.Navigation.PushAsync(new FeatureMatchingPage());
  125. };
  126. //licensePlateRecognitionButton.Clicked += (sender, args) =>
  127. //{
  128. // ProcessAndRenderPage vehicleLicensePlateDetectorPage = new ProcessAndRenderPage(
  129. // new VehicleLicensePlateDetector(),
  130. // "Perform License Plate Recognition",
  131. // "cars_license_plate.png",
  132. // "This demo is based on the security barrier camera demo in the OpenVino model zoo. The models is trained with BIT-vehicle dataset. License plate is trained based on Chinese license plate that has white character on blue background. You will need to re-train your own model if you intend to use this in other countries.");
  133. // Picker p = vehicleLicensePlateDetectorPage.Picker;
  134. // p.IsVisible = true;
  135. // p.Title = "Preferred DNN backend & target";
  136. // foreach (String option in GetDnnBackends(DnnBackendType.InferenceEngineOnly))
  137. // {
  138. // p.Items.Add(option);
  139. // }
  140. // this.Navigation.PushAsync(vehicleLicensePlateDetectorPage);
  141. //};
  142. maskRcnnButton.Clicked += (sender, args) =>
  143. {
  144. ProcessAndRenderPage maskRcnnPage = new ProcessAndRenderPage(
  145. new MaskRcnn(),
  146. "Mask-rcnn Detection",
  147. "dog416.png",
  148. "");
  149. Picker p = maskRcnnPage.Picker;
  150. p.IsVisible = true;
  151. p.Title = "Preferred DNN backend & target";
  152. foreach (String option in GetDnnBackends())
  153. {
  154. p.Items.Add(option);
  155. }
  156. this.Navigation.PushAsync(maskRcnnPage);
  157. };
  158. sceneTextDetectionButton.Clicked += (sender, args) =>
  159. {
  160. ProcessAndRenderPage sceneTextDetectionPage = new ProcessAndRenderPage(
  161. new SceneTextDetector(),
  162. "Perform Scene Text Detection",
  163. "cars_license_plate.png",
  164. "This model is trained on MSRA-TD500, so it can detect both English and Chinese text instances.");
  165. this.Navigation.PushAsync(sceneTextDetectionPage);
  166. };
  167. stopSignDetectionButton.Clicked += (sender, args) =>
  168. {
  169. MaskRcnn model = new MaskRcnn();
  170. model.ObjectsOfInterest = new string[] { "stop sign" };
  171. ProcessAndRenderPage stopSignDetectionPage = new ProcessAndRenderPage(
  172. model,
  173. "Mask-rcnn Detection",
  174. "stop-sign.jpg",
  175. "Stop sign detection using Mask RCNN");
  176. Picker p = stopSignDetectionPage.Picker;
  177. p.IsVisible = true;
  178. p.Title = "Preferred DNN backend & target";
  179. foreach (String option in GetDnnBackends())
  180. {
  181. p.Items.Add(option);
  182. }
  183. this.Navigation.PushAsync(stopSignDetectionPage);
  184. };
  185. yoloButton.Clicked += (sender, args) =>
  186. {
  187. ProcessAndRenderPage yoloPage = new ProcessAndRenderPage(
  188. new Yolo(),
  189. "Yolo Detection",
  190. "dog416.png",
  191. "");
  192. Picker p = yoloPage.Picker;
  193. p.Title = "Yolo model version";
  194. p.IsVisible = true;
  195. p.Items.Add("YoloV10N");
  196. p.Items.Add("YoloV10S");
  197. p.Items.Add("YoloV10M");
  198. p.Items.Add("YoloV10B");
  199. p.Items.Add("YoloV10L");
  200. p.Items.Add("YoloV10X");
  201. p.Items.Add("YoloV4");
  202. p.Items.Add("YoloV4Tiny");
  203. p.Items.Add("YoloV3");
  204. p.Items.Add("YoloV3Spp");
  205. p.Items.Add("YoloV3Tiny");
  206. this.Navigation.PushAsync(yoloPage);
  207. };
  208. superresButton.Clicked += (sender, args) =>
  209. {
  210. ProcessAndRenderPage superresPage = new ProcessAndRenderPage(
  211. new Superres(),
  212. "Super resolution",
  213. "dog416.png",
  214. "");
  215. Picker p = superresPage.Picker;
  216. p.Title = "Super resolution version";
  217. p.IsVisible = true;
  218. p.Items.Add("EdsrX2");
  219. p.Items.Add("EdsrX3");
  220. p.Items.Add("EdsrX4");
  221. p.Items.Add("EspcnX2");
  222. p.Items.Add("EspcnX3");
  223. p.Items.Add("EspcnX4");
  224. p.Items.Add("FsrcnnX2");
  225. p.Items.Add("FsrcnnX3");
  226. p.Items.Add("FsrcnnX4");
  227. p.Items.Add("LapsrnX2");
  228. p.Items.Add("LapsrnX4");
  229. p.Items.Add("LapsrnX8");
  230. this.Navigation.PushAsync(superresPage);
  231. };
  232. maskRcnnButton.IsVisible = haveDNN;
  233. //faceLandmarkDetectionButton.IsVisible = haveDNN;
  234. stopSignDetectionButton.IsVisible = haveDNN;
  235. yoloButton.IsVisible = haveDNN;
  236. superresButton.IsVisible = haveDNN;
  237. sceneTextDetectionButton.IsVisible = haveDNN && haveFreetype;
  238. //licensePlateRecognitionButton.IsVisible = hasInferenceEngine;
  239. licensePlateRecognitionButton.IsVisible = false;
  240. featureDetectionButton.IsVisible = haveFeatures2D;
  241. if (haveTesseract)
  242. {
  243. Button ocrButton = new Button();
  244. ocrButton.Text = "Tesseract OCR";
  245. buttonList.Add(ocrButton);
  246. ocrButton.Clicked += (sender, args) =>
  247. {
  248. ProcessAndRenderPage ocrPage = new ProcessAndRenderPage(
  249. new TesseractModel(),
  250. "Perform Text Detection",
  251. "test_image.png",
  252. "");
  253. ocrPage.HasCameraOption = false;
  254. this.Navigation.PushAsync(ocrPage);
  255. };
  256. }
  257. if (haveVideo && haveCamera)
  258. {
  259. Button videoSurveillanceButton = new Button();
  260. videoSurveillanceButton.Text = "Video Surveillance";
  261. buttonList.Add(videoSurveillanceButton);
  262. videoSurveillanceButton.Clicked += (sender, args) =>
  263. {
  264. ProcessAndRenderPage videoPage = new ProcessAndRenderPage(
  265. new VideoSurveillanceModel(),
  266. "Open Camera",
  267. null,
  268. "");
  269. videoPage.HasCameraOption = true;
  270. this.Navigation.PushAsync(videoPage);
  271. };
  272. }
  273. if (haveObjdetect)
  274. {
  275. Button faceDetectionButton = new Button();
  276. faceDetectionButton.Text = "Face Detection (CascadeClassifier)";
  277. buttonList.Add(faceDetectionButton);
  278. faceDetectionButton.Clicked += (sender, args) =>
  279. {
  280. ProcessAndRenderPage faceAndEyeDetectorPage = new ProcessAndRenderPage(
  281. new CascadeFaceAndEyeDetector(),
  282. "Face and eye detection (Cascade classifier)",
  283. "lena.jpg",
  284. "Cascade classifier");
  285. this.Navigation.PushAsync(faceAndEyeDetectorPage);
  286. };
  287. if (haveDNN)
  288. {
  289. Button faceDetectionYNButton = new Button();
  290. faceDetectionYNButton.Text = "FaceDetection Yunet";
  291. buttonList.Add(faceDetectionYNButton);
  292. faceDetectionYNButton.Clicked += (sender, args) =>
  293. {
  294. ProcessAndRenderPage faceDetectionYNPage = new ProcessAndRenderPage(
  295. new FaceDetectorYNModel(),
  296. "Face detection Yunet",
  297. "lena.jpg",
  298. "Face detection (Yunet)");
  299. this.Navigation.PushAsync(faceDetectionYNPage);
  300. };
  301. }
  302. Button pedestrianDetectionButton = new Button();
  303. pedestrianDetectionButton.Text = "Pedestrian Detection";
  304. buttonList.Add(pedestrianDetectionButton);
  305. pedestrianDetectionButton.Clicked += (sender, args) =>
  306. {
  307. ProcessAndRenderPage pedestrianDetectorPage = new ProcessAndRenderPage(
  308. new PedestrianDetector(),
  309. "Pedestrian detection",
  310. "pedestrian.png",
  311. "HOG pedestrian detection");
  312. this.Navigation.PushAsync(pedestrianDetectorPage);
  313. };
  314. }
  315. if (haveFace && haveDNN)
  316. {
  317. Button faceLandmarkDetectionButton = new Button();
  318. faceLandmarkDetectionButton.Text = "Face Landmark Detection (DNN Module)";
  319. buttonList.Add(faceLandmarkDetectionButton);
  320. faceLandmarkDetectionButton.Clicked += (sender, args) =>
  321. {
  322. ProcessAndRenderPage faceLandmarkDetectionPage = new ProcessAndRenderPage(
  323. new FaceAndLandmarkDetector(),
  324. "Perform Face Landmark Detection",
  325. "lena.jpg",
  326. "");
  327. this.Navigation.PushAsync(faceLandmarkDetectionPage);
  328. };
  329. }
  330. if (haveWechatQRCode && haveObjdetect
  331. //TODO: WeChatQRCode detector doesn't work on iOS, probably a bug in iOS
  332. //Will need to figure out why.
  333. && (Microsoft.Maui.Devices.DeviceInfo.Platform != DevicePlatform.iOS)
  334. )
  335. {
  336. Button barcodeQrcodeDetectionButton = new Button();
  337. barcodeQrcodeDetectionButton.Text = "Barcode and QRCode Detection";
  338. buttonList.Add(barcodeQrcodeDetectionButton);
  339. barcodeQrcodeDetectionButton.Clicked += (sender, args) =>
  340. {
  341. BarcodeDetectorModel barcodeDetector = new BarcodeDetectorModel();
  342. WeChatQRCodeDetector qrcodeDetector = new WeChatQRCodeDetector();
  343. CombinedModel combinedModel = new CombinedModel(barcodeDetector, qrcodeDetector);
  344. ProcessAndRenderPage barcodeQrcodeDetectionPage = new ProcessAndRenderPage(
  345. combinedModel,
  346. "Perform Barcode and QRCode Detection",
  347. "qrcode_barcode.png",
  348. "");
  349. this.Navigation.PushAsync(barcodeQrcodeDetectionPage);
  350. };
  351. }
  352. if (haveViz)
  353. {
  354. Button viz3dButton = new Button();
  355. viz3dButton.Text = "Simple 3D reconstruction";
  356. buttonList.Add(viz3dButton);
  357. viz3dButton.Clicked += async (sender, args) =>
  358. {
  359. using (Mat left = new Mat())
  360. using (Stream streamL = await FileSystem.OpenAppPackageFileAsync("imL.png"))
  361. using (MemoryStream msL = new MemoryStream())
  362. using (Mat right = new Mat())
  363. using (Stream streamR = await FileSystem.OpenAppPackageFileAsync("imR.png"))
  364. using (MemoryStream msR = new MemoryStream())
  365. using (Mat points = new Mat())
  366. using (Mat colors = new Mat())
  367. {
  368. streamL.CopyTo(msL);
  369. CvInvoke.Imdecode(msL.ToArray(), ImreadModes.Color, left);
  370. streamR.CopyTo(msR);
  371. CvInvoke.Imdecode(msR.ToArray(), ImreadModes.Color, right);
  372. Simple3DReconstruct.GetPointAndColor(left, right, points, colors);
  373. Viz3d v = Simple3DReconstruct.GetViz3d(points, colors);
  374. v.Spin();
  375. }
  376. };
  377. }
  378. if (haveFreetype)
  379. {
  380. Button freetypeButton = new Button();
  381. freetypeButton.Text = "Free Type";
  382. buttonList.Add(freetypeButton);
  383. freetypeButton.Clicked += (sender, args) =>
  384. {
  385. this.Navigation.PushAsync(new FreetypePage());
  386. };
  387. }
  388. StackLayout buttonsLayout = new StackLayout
  389. {
  390. VerticalOptions = LayoutOptions.Start,
  391. };
  392. foreach (View b in buttonList)
  393. buttonsLayout.Children.Add(b);
  394. this.Content = new ScrollView()
  395. {
  396. Content = buttonsLayout,
  397. };
  398. }
  399. private enum DnnBackendType
  400. {
  401. Default,
  402. InferenceEngineOnly
  403. }
  404. private String[] GetDnnBackends(DnnBackendType backendType = DnnBackendType.Default)
  405. {
  406. var openCVConfigDict = CvInvoke.ConfigDict;
  407. bool haveDNN = (openCVConfigDict["HAVE_OPENCV_DNN"] != 0);
  408. if (haveDNN)
  409. {
  410. var dnnBackends = DnnInvoke.AvailableBackends;
  411. List<String> dnnBackendsText = new List<string>();
  412. foreach (var dnnBackend in dnnBackends)
  413. {
  414. if (backendType == DnnBackendType.InferenceEngineOnly &&
  415. !((dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngine)
  416. || (dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngineNgraph)
  417. || (dnnBackend.Backend == Emgu.CV.Dnn.Backend.InferenceEngineNnBuilder2019)))
  418. continue;
  419. dnnBackendsText.Add(String.Format("{0};{1}", dnnBackend.Backend, dnnBackend.Target));
  420. }
  421. return dnnBackendsText.ToArray();
  422. }
  423. else
  424. {
  425. return new string[0];
  426. }
  427. }
  428. }
  429. }