|
| 1 | +// ignore_for_file: constant_identifier_names |
| 2 | + |
1 | 3 | library cv; |
2 | 4 |
|
3 | 5 | import 'dart:ffi' as ffi; |
| 6 | +import 'dart:typed_data'; |
4 | 7 |
|
5 | 8 | import 'package:ffi/ffi.dart'; |
6 | 9 |
|
@@ -598,3 +601,213 @@ class QRCodeDetector extends CvStruct<cvg.QRCodeDetector> { |
598 | 601 | @override |
599 | 602 | List<int> get props => [ptr.address]; |
600 | 603 | } |
| 604 | + |
| 605 | +/// DNN-based face detector. |
| 606 | +/// |
| 607 | +/// model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet |
| 608 | +class FaceDetectorYN extends CvStruct<cvg.FaceDetectorYN> { |
| 609 | + FaceDetectorYN._(cvg.FaceDetectorYNPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { |
| 610 | + if (attach) { |
| 611 | + finalizer.attach(this, ptr.cast(), detach: this); |
| 612 | + } |
| 613 | + } |
| 614 | + |
| 615 | + factory FaceDetectorYN.fromFile( |
| 616 | + String model, |
| 617 | + String config, |
| 618 | + Size inputSize, { |
| 619 | + double scoreThreshold = 0.9, |
| 620 | + double nmsThreshold = 0.3, |
| 621 | + int topK = 5000, |
| 622 | + int backendId = 0, |
| 623 | + int targetId = 0, |
| 624 | + }) { |
| 625 | + final p = calloc<cvg.FaceDetectorYN>(); |
| 626 | + return using<FaceDetectorYN>((arena) { |
| 627 | + final cModel = model.toNativeUtf8().cast<ffi.Char>(); |
| 628 | + final cConfig = config.toNativeUtf8().cast<ffi.Char>(); |
| 629 | + cvRun( |
| 630 | + () => CFFI.FaceDetectorYN_New( |
| 631 | + cModel, |
| 632 | + cConfig, |
| 633 | + inputSize.toSize(arena).ref, |
| 634 | + scoreThreshold, |
| 635 | + nmsThreshold, |
| 636 | + topK, |
| 637 | + backendId, |
| 638 | + targetId, |
| 639 | + p, |
| 640 | + ), |
| 641 | + ); |
| 642 | + calloc.free(cModel); |
| 643 | + calloc.free(cConfig); |
| 644 | + return FaceDetectorYN._(p); |
| 645 | + }); |
| 646 | + } |
| 647 | + |
| 648 | + factory FaceDetectorYN.fromBuffer( |
| 649 | + String framework, |
| 650 | + Uint8List bufferModel, |
| 651 | + Uint8List bufferConfig, |
| 652 | + Size inputSize, { |
| 653 | + double scoreThreshold = 0.9, |
| 654 | + double nmsThreshold = 0.3, |
| 655 | + int topK = 5000, |
| 656 | + int backendId = 0, |
| 657 | + int targetId = 0, |
| 658 | + }) { |
| 659 | + final p = calloc<cvg.FaceDetectorYN>(); |
| 660 | + return using<FaceDetectorYN>((arena) { |
| 661 | + final cFramework = framework.toNativeUtf8().cast<ffi.Char>(); |
| 662 | + cvRun( |
| 663 | + () => CFFI.FaceDetectorYN_NewFromBuffer( |
| 664 | + cFramework, |
| 665 | + VecUChar.fromList(bufferModel).ref, |
| 666 | + VecUChar.fromList(bufferConfig).ref, |
| 667 | + inputSize.toSize(arena).ref, |
| 668 | + scoreThreshold, |
| 669 | + nmsThreshold, |
| 670 | + topK, |
| 671 | + backendId, |
| 672 | + targetId, |
| 673 | + p, |
| 674 | + ), |
| 675 | + ); |
| 676 | + calloc.free(cFramework); |
| 677 | + return FaceDetectorYN._(p); |
| 678 | + }); |
| 679 | + } |
| 680 | + |
| 681 | + (int, int) getInputSize() { |
| 682 | + final p = calloc<cvg.Size>(); |
| 683 | + cvRun(() => CFFI.FaceDetectorYN_GetInputSize(ref, p)); |
| 684 | + final ret = (p.ref.width, p.ref.height); |
| 685 | + calloc.free(p); |
| 686 | + return ret; |
| 687 | + } |
| 688 | + |
| 689 | + double getScoreThreshold() { |
| 690 | + return using<double>((arena) { |
| 691 | + final p = arena<ffi.Float>(); |
| 692 | + cvRun(() => CFFI.FaceDetectorYN_GetScoreThreshold(ref, p)); |
| 693 | + return p.value; |
| 694 | + }); |
| 695 | + } |
| 696 | + |
| 697 | + double getNmsThreshold() { |
| 698 | + return using<double>((arena) { |
| 699 | + final p = arena<ffi.Float>(); |
| 700 | + cvRun(() => CFFI.FaceDetectorYN_GetNMSThreshold(ref, p)); |
| 701 | + return p.value; |
| 702 | + }); |
| 703 | + } |
| 704 | + |
| 705 | + int getTopK() { |
| 706 | + return using<int>((arena) { |
| 707 | + final p = arena<ffi.Int>(); |
| 708 | + cvRun(() => CFFI.FaceDetectorYN_GetTopK(ref, p)); |
| 709 | + return p.value; |
| 710 | + }); |
| 711 | + } |
| 712 | + |
| 713 | + Mat detect(Mat img) { |
| 714 | + final p = calloc<cvg.Mat>(); |
| 715 | + cvRun(() => CFFI.FaceDetectorYN_Detect(ref, img.ref, p)); |
| 716 | + return Mat.fromPointer(p); |
| 717 | + } |
| 718 | + |
| 719 | + void setInputSize(Size inputSize) { |
| 720 | + using<void>((arena) { |
| 721 | + cvRun(() => CFFI.FaceDetectorYN_SetInputSize(ref, inputSize.toSize(arena).ref)); |
| 722 | + }); |
| 723 | + } |
| 724 | + |
| 725 | + void setScoreThreshold(double scoreThreshold) { |
| 726 | + cvRun(() => CFFI.FaceDetectorYN_SetScoreThreshold(ref, scoreThreshold)); |
| 727 | + } |
| 728 | + |
| 729 | + void setNMSThreshold(double nmsThreshold) { |
| 730 | + cvRun(() => CFFI.FaceDetectorYN_SetNMSThreshold(ref, nmsThreshold)); |
| 731 | + } |
| 732 | + |
| 733 | + void setTopK(int topK) { |
| 734 | + cvRun(() => CFFI.FaceDetectorYN_SetTopK(ref, topK)); |
| 735 | + } |
| 736 | + |
| 737 | + @override |
| 738 | + cvg.FaceDetectorYN get ref => ptr.ref; |
| 739 | + |
| 740 | + static final finalizer = OcvFinalizer<cvg.FaceDetectorYNPtr>(CFFI.addresses.FaceDetectorYN_Close); |
| 741 | + |
| 742 | + void dispose() { |
| 743 | + finalizer.detach(this); |
| 744 | + CFFI.FaceDetectorYN_Close(ptr); |
| 745 | + } |
| 746 | + |
| 747 | + @override |
| 748 | + List<int> get props => [ptr.address]; |
| 749 | +} |
| 750 | + |
| 751 | +/// DNN-based face recognizer. |
| 752 | +/// |
| 753 | +/// model download link: https://github.com/opencv/opencv_zoo/tree/master/models/face_recognition_sface |
| 754 | +class FaceRecognizerSF extends CvStruct<cvg.FaceRecognizerSF> { |
| 755 | + FaceRecognizerSF._(cvg.FaceRecognizerSFPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { |
| 756 | + if (attach) { |
| 757 | + finalizer.attach(this, ptr.cast(), detach: this); |
| 758 | + } |
| 759 | + } |
| 760 | + |
| 761 | + factory FaceRecognizerSF.newRecognizer( |
| 762 | + String model, |
| 763 | + String config, |
| 764 | + int backendId, |
| 765 | + int targetId, |
| 766 | + ) { |
| 767 | + final p = calloc<cvg.FaceRecognizerSF>(); |
| 768 | + final cModel = model.toNativeUtf8().cast<ffi.Char>(); |
| 769 | + final cConfig = config.toNativeUtf8().cast<ffi.Char>(); |
| 770 | + cvRun(() => CFFI.FaceRecognizerSF_New(cModel, cConfig, backendId, targetId, p)); |
| 771 | + calloc.free(cModel); |
| 772 | + calloc.free(cConfig); |
| 773 | + return FaceRecognizerSF._(p); |
| 774 | + } |
| 775 | + |
| 776 | + Mat alignCrop(Mat srcImg, Mat faceBox) { |
| 777 | + final p = calloc<cvg.Mat>(); |
| 778 | + cvRun(() => CFFI.FaceRecognizerSF_AlignCrop(ref, srcImg.ref, faceBox.ref, p)); |
| 779 | + return Mat.fromPointer(p); |
| 780 | + } |
| 781 | + |
| 782 | + Mat feature(Mat alignedImg) { |
| 783 | + final p = calloc<cvg.Mat>(); |
| 784 | + cvRun(() => CFFI.FaceRecognizerSF_Feature(ref, alignedImg.ref, p)); |
| 785 | + return Mat.fromPointer(p); |
| 786 | + } |
| 787 | + |
| 788 | + double match(Mat faceFeature1, Mat faceFeature2, int disType) { |
| 789 | + return using<double>((arena) { |
| 790 | + final distance = arena<ffi.Double>(); |
| 791 | + cvRun(() => |
| 792 | + CFFI.FaceRecognizerSF_Match(ref, faceFeature1.ref, faceFeature2.ref, disType, distance)); |
| 793 | + return distance.value; |
| 794 | + }); |
| 795 | + } |
| 796 | + |
| 797 | + @override |
| 798 | + cvg.FaceRecognizerSF get ref => ptr.ref; |
| 799 | + |
| 800 | + static final finalizer = |
| 801 | + OcvFinalizer<cvg.FaceRecognizerSFPtr>(CFFI.addresses.FaceRecognizerSF_Close); |
| 802 | + |
| 803 | + void dispose() { |
| 804 | + finalizer.detach(this); |
| 805 | + CFFI.FaceRecognizerSF_Close(ptr); |
| 806 | + } |
| 807 | + |
| 808 | + @override |
| 809 | + List<int> get props => [ptr.address]; |
| 810 | + |
| 811 | + static const int DIS_TYPR_FR_COSINE = 0; |
| 812 | + static const int DIS_TYPE_FR_NORM_L2 = 1; |
| 813 | +} |
0 commit comments