From 4f4ab111743129a6df6f5e5b3205bc782ad812a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Pascual-Hern=C3=A1ndez?= Date: Thu, 21 Nov 2024 16:20:00 +0100 Subject: [PATCH] Revert "DetectionMetrics v2" --- .github/workflows/main.yml | 36 + .gitignore | 12 +- .travis.yml | 99 + Brewfile | 11 + CONTRIBUTING.md | 12 +- DetectionMetrics/CMakeLists.txt | 75 + DetectionMetrics/ClassMappingHierarchy.xml | 99 + .../DatasetEvaluationApp/CMakeLists.txt | 54 + .../SamplerGeneratorHandler/Converter.cpp | 91 + .../SamplerGeneratorHandler/Converter.h | 20 + .../SamplerGeneratorHandler/Deployer.cpp | 99 + .../SamplerGeneratorHandler/Deployer.h | 21 + .../SamplerGeneratorHandler/Detector.cpp | 79 + .../SamplerGeneratorHandler/Detector.h | 25 + .../SamplerGeneratorHandler/Evaluator.cpp | 61 + .../SamplerGeneratorHandler/Evaluator.h | 25 + .../SamplerGeneratorHandler/Label.cpp | 89 + .../SamplerGeneratorHandler/Label.h | 21 + .../SamplerGenerationHandler.cpp | 102 + .../SamplerGenerationHandler.h | 35 + .../SamplerGeneratorHandler/Viewer.cpp | 44 + .../SamplerGeneratorHandler/Viewer.h | 18 + .../DatasetEvaluationApp/gui/Appcfg.cpp | 21 + .../DatasetEvaluationApp/gui/Appcfg.hpp | 30 + .../gui/ListViewConfig.cpp | 197 ++ .../DatasetEvaluationApp/gui/ListViewConfig.h | 27 + .../DatasetEvaluationApp/gui/TabHandler.cpp | 30 + .../DatasetEvaluationApp/gui/TabHandler.h | 33 + .../DatasetEvaluationApp/gui/Utils.cpp | 92 + .../DatasetEvaluationApp/gui/Utils.h | 25 + .../DatasetEvaluationApp/gui/appconfig.cpp | 101 + .../DatasetEvaluationApp/gui/appconfig.h | 36 + .../DatasetEvaluationApp/gui/appconfig.ui | 237 ++ .../DatasetEvaluationApp/main.cpp | 68 + .../DatasetEvaluationApp/mainwindow.cpp | 517 ++++ .../DatasetEvaluationApp/mainwindow.h | 51 + .../DatasetEvaluationApp/mainwindow.ui | 1875 ++++++++++++ DetectionMetrics/Deps/glog/CMakeLists.txt | 54 + .../Deps/ice/CMake/FindZeroCIce.cmake | 69 + .../Deps/ice/CMake/FindZeroCIceBox.cmake | 25 + .../Deps/ice/CMake/FindZeroCIceCore.cmake | 25 + .../ice/CMake/FindZeroCIceExecutables.cmake | 39 + .../Deps/ice/CMake/FindZeroCIceGrid.cmake | 25 + .../Deps/ice/CMake/FindZeroCIcePatch2.cmake | 25 + .../Deps/ice/CMake/FindZeroCIceSSL.cmake | 25 + .../Deps/ice/CMake/FindZeroCIceStorm.cmake | 25 + .../Deps/ice/CMake/FindZeroCIceUtil.cmake | 25 + .../Deps/ice/CMake/FindZeroCIceXML.cmake | 25 + .../Deps/ice/CMake/FindZeroIceCore.cmake | 25 + DetectionMetrics/Deps/ice/CMakeLists.txt | 45 + .../Deps/numpy/CMake/FindNumPy.cmake | 41 + DetectionMetrics/Deps/numpy/CMakeLists.txt | 12 + DetectionMetrics/Deps/opencv/CMakeLists.txt | 10 + DetectionMetrics/Deps/qt/CMakeLists.txt | 37 + DetectionMetrics/Deps/ros/CMakeLists.txt | 33 + DetectionMetrics/Deps/yaml-cpp/CMakeLists.txt | 9 + .../DetectionMetricsLib/CMakeLists.txt | 30 + .../DetectionMetricsLib/Common/CMakeLists.txt | 15 + .../DetectionMetricsLib/Common/EvalMatrix.cpp | 19 + .../DetectionMetricsLib/Common/EvalMatrix.h | 16 + .../DetectionMetricsLib/Common/Matrix.h | 9 + .../DetectionMetricsLib/Common/Sample.cpp | 478 +++ .../DetectionMetricsLib/Common/Sample.h | 88 + .../DatasetConverters/CMakeLists.txt | 48 + .../DatasetConverters/ClassType.cpp | 56 + .../DatasetConverters/ClassType.h | 25 + .../DatasetConverters/ClassTypeGeneric.cpp | 64 + .../DatasetConverters/ClassTypeGeneric.h | 21 + .../DatasetConverters/ClassTypeMapper.cpp | 88 + .../DatasetConverters/ClassTypeMapper.h | 22 + .../DatasetConverters/ClassTypeOwn.cpp | 103 + .../DatasetConverters/ClassTypeOwn.h | 19 + .../DatasetConverters/DatasetConverter.cpp | 5 + .../DatasetConverters/DatasetConverter.h | 14 + .../DatasetConverters/Tree.cpp | 120 + .../DatasetConverters/Tree.h | 29 + .../liveReaders/CameraReader.cpp | 57 + .../liveReaders/CameraReader.h | 34 + .../liveReaders/GenericLiveReader.cpp | 139 + .../liveReaders/GenericLiveReader.h | 46 + .../liveReaders/JderobotReader.cpp | 93 + .../liveReaders/JderobotReader.h | 31 + .../liveReaders/RecorderReader.cpp | 103 + .../liveReaders/RecorderReader.h | 39 + .../liveReaders/VideoReader.cpp | 70 + .../liveReaders/VideoReader.h | 38 + .../readers/COCODatasetReader.cpp | 308 ++ .../readers/COCODatasetReader.h | 34 + .../readers/DatasetReader.cpp | 208 ++ .../DatasetConverters/readers/DatasetReader.h | 54 + .../readers/GenericDatasetReader.cpp | 172 ++ .../readers/GenericDatasetReader.h | 54 + .../readers/ImageNetDatasetReader.cpp | 145 + .../readers/ImageNetDatasetReader.h | 24 + .../readers/OpenImagesDatasetReader.cpp | 171 ++ .../readers/OpenImagesDatasetReader.h | 28 + .../readers/OwnDatasetReader.cpp | 45 + .../readers/OwnDatasetReader.h | 23 + .../readers/PascalVOCDatasetReader.cpp | 121 + .../readers/PascalVOCDatasetReader.h | 23 + .../readers/PrincetonDatasetReader.cpp | 75 + .../readers/PrincetonDatasetReader.h | 24 + .../readers/SamplesReader.cpp | 9 + .../DatasetConverters/readers/SamplesReader.h | 17 + .../readers/SpinelloDatasetReader.cpp | 113 + .../readers/SpinelloDatasetReader.h | 24 + .../readers/YoloDatasetReader.cpp | 61 + .../readers/YoloDatasetReader.h | 21 + .../writers/COCODatasetWriter.cpp | 272 ++ .../writers/COCODatasetWriter.h | 26 + .../writers/DatasetWriter.cpp | 24 + .../DatasetConverters/writers/DatasetWriter.h | 29 + .../writers/GenericDatasetWriter.cpp | 88 + .../writers/GenericDatasetWriter.h | 45 + .../writers/OpenImagesDatasetWriter.cpp | 110 + .../writers/OpenImagesDatasetWriter.h | 26 + .../writers/OwnDatasetWriter.cpp | 17 + .../writers/OwnDatasetWriter.h | 25 + .../writers/PascalVOCDatasetWriter.cpp | 171 ++ .../writers/PascalVOCDatasetWriter.h | 28 + .../writers/YoloDatasetWriter.cpp | 109 + .../writers/YoloDatasetWriter.h | 25 + .../Detectors/CMakeLists.txt | 21 + .../DetectionMetricsLib/Detectors/Detector.h | 17 + .../FrameworkEvaluator/CMakeLists.txt | 37 + .../FrameworkEvaluator/CaffeInferencer.cpp | 212 ++ .../FrameworkEvaluator/CaffeInferencer.h | 45 + .../FrameworkEvaluator/ClassStatistics.cpp | 117 + .../FrameworkEvaluator/ClassStatistics.h | 49 + .../FrameworkEvaluator/DarknetInferencer.cpp | 124 + .../FrameworkEvaluator/DarknetInferencer.h | 34 + .../DetectionsEvaluator.cpp | 409 +++ .../FrameworkEvaluator/DetectionsEvaluator.h | 76 + .../FrameworkInferencer.cpp | 51 + .../FrameworkEvaluator/FrameworkInferencer.h | 43 + .../FrameworkEvaluator/GenericInferencer.cpp | 123 + .../FrameworkEvaluator/GenericInferencer.h | 54 + .../FrameworkEvaluator/GlobalStats.cpp | 107 + .../FrameworkEvaluator/GlobalStats.h | 36 + .../FrameworkEvaluator/KerasInferencer.cpp | 224 ++ .../FrameworkEvaluator/KerasInferencer.h | 39 + .../FrameworkEvaluator/Labelling.cpp | 274 ++ .../FrameworkEvaluator/Labelling.h | 44 + .../MassBatchInferencer.cpp | 127 + .../FrameworkEvaluator/MassBatchInferencer.h | 25 + .../FrameworkEvaluator/MassInferencer.cpp | 230 ++ .../FrameworkEvaluator/MassInferencer.h | 43 + .../FrameworkEvaluator/PyTorchInferencer.cpp | 223 ++ .../FrameworkEvaluator/PyTorchInferencer.h | 42 + .../FrameworkEvaluator/StatsWriter.cpp | 71 + .../FrameworkEvaluator/StatsWriter.h | 21 + .../TensorFlowInferencer.cpp | 268 ++ .../FrameworkEvaluator/TensorFlowInferencer.h | 42 + .../FrameworkEvaluator/pythonWrap.h | 4 + .../GenerationUtils/BoundingRectGuiMover.cpp | 90 + .../GenerationUtils/BoundingRectGuiMover.h | 31 + .../GenerationUtils/BoundingValidator.cpp | 196 ++ .../GenerationUtils/BoundingValidator.h | 44 + .../GenerationUtils/CMakeLists.txt | 21 + .../DepthForegroundSegmentator.cpp | 214 ++ .../DepthForegroundSegmentator.h | 34 + .../GenerationUtils/DetectionsValidator.cpp | 192 ++ .../GenerationUtils/DetectionsValidator.h | 28 + .../Regions/CMakeLists.txt | 26 + .../Regions/ContourRegion.cpp | 14 + .../Regions/ContourRegion.h | 55 + .../Regions/ContourRegions.cpp | 145 + .../Regions/ContourRegions.h | 31 + .../Regions/RectRegion.cpp | 5 + .../DetectionMetricsLib/Regions/RectRegion.h | 50 + .../Regions/RectRegions.cpp | 173 ++ .../DetectionMetricsLib/Regions/RectRegions.h | 47 + .../DetectionMetricsLib/Regions/Region.h | 8 + .../DetectionMetricsLib/Regions/Regions.h | 26 + .../DetectionMetricsLib/Regions/RleRegion.cpp | 1 + .../DetectionMetricsLib/Regions/RleRegion.h | 50 + .../Regions/RleRegions.cpp | 100 + .../DetectionMetricsLib/Regions/RleRegions.h | 26 + .../DetectionMetricsLib/Regions/maskApi.cpp | 227 ++ .../DetectionMetricsLib/Regions/maskApi.h | 53 + .../DetectionMetricsLib/Utils/CMakeLists.txt | 53 + .../Utils/Configuration.cpp | 82 + .../DetectionMetricsLib/Utils/Configuration.h | 28 + .../DetectionMetricsLib/Utils/DepthUtils.cpp | 69 + .../DetectionMetricsLib/Utils/DepthUtils.h | 17 + .../DetectionMetricsLib/Utils/JsonHelper.h | 24 + .../DetectionMetricsLib/Utils/Key.cpp | 71 + .../DetectionMetricsLib/Utils/Key.h | 35 + .../Utils/Normalizations.cpp | 28 + .../Utils/Normalizations.h | 20 + .../DetectionMetricsLib/Utils/PathHelper.cpp | 23 + .../DetectionMetricsLib/Utils/PathHelper.h | 19 + .../DetectionMetricsLib/Utils/Playback.cpp | 121 + .../DetectionMetricsLib/Utils/Playback.hpp | 35 + .../Utils/SampleGenerationApp.cpp | 144 + .../Utils/SampleGenerationApp.h | 36 + .../DetectionMetricsLib/Utils/StatsUtils.cpp | 140 + .../DetectionMetricsLib/Utils/StatsUtils.h | 19 + .../DetectionMetricsLib/Utils/StringHandler.h | 35 + .../DetectionMetricsLib/Utils/addclass.cpp | 52 + .../DetectionMetricsLib/Utils/addclass.h | 35 + .../DetectionMetricsLib/Utils/addclass.ui | 146 + .../DetectionMetricsLib/Utils/pop_up.cpp | 47 + .../DetectionMetricsLib/Utils/pop_up.h | 36 + .../DetectionMetricsLib/Utils/pop_up.ui | 93 + .../DetectionMetricsLib/Utils/setclass.cpp | 43 + .../DetectionMetricsLib/Utils/setclass.h | 30 + .../DetectionMetricsLib/Utils/setclass.ui | 92 + .../python_modules/keras_detect.py | 61 + .../python_modules/keras_utils}/__init__.py | 0 .../keras_utils/bounding_box_utils.py | 356 +++ .../keras_utils/keras_layer_AnchorBoxes.py | 287 ++ .../keras_layer_DecodeDetections.py | 295 ++ .../keras_layer_L2Normalization.py | 72 + .../keras_utils/keras_ssd_loss.py | 222 ++ .../python_modules/pytorch_detect.py | 95 + .../python_modules/tensorflow_detect.py | 102 + .../DetectionMetricsROS/CMakeLists.txt | 111 + .../DetectionMetricsROS/DeployerNode.hpp | 40 + .../DetectionMetricsROS/msg/object.msg | 6 + .../DetectionMetricsROS/msg/objects.msg | 1 + .../DetectionMetricsROS/package.xml | 69 + .../DetectionMetricsROS/src/DeployerNode.cpp | 79 + .../DetectionMetricsROS/src/code.cpp | 8 + .../src/detection_node.cpp | 77 + .../src/image_converter.hpp | 49 + .../DetectionMetricsROS/src/test.cpp | 5 + DetectionMetrics/Dockerfile/Dockerfile | 48 + DetectionMetrics/Dockerfile/appConfig.yml | 11 + .../SampleGenerationApp/CMakeLists.txt | 28 + .../SampleGenerationApp/generator.cpp | 211 ++ .../Tools/AutoEvaluator/CMakeLists.txt | 38 + .../Tools/AutoEvaluator/autoEvaluator.cpp | 182 ++ DetectionMetrics/Tools/CMakeLists.txt | 6 + .../Tools/Converter/CMakeLists.txt | 26 + .../Tools/Converter/converter.cpp | 110 + .../Tools/Detector/CMakeLists.txt | 27 + DetectionMetrics/Tools/Detector/detector.cpp | 61 + .../Tools/Evaluator/CMakeLists.txt | 25 + .../Tools/Evaluator/evaluator.cpp | 75 + .../Tools/Splitter/CMakeLists.txt | 25 + DetectionMetrics/Tools/Splitter/splitter.cpp | 105 + DetectionMetrics/Tools/Viewer/CMakeLists.txt | 25 + DetectionMetrics/Tools/Viewer/viewer.cpp | 55 + DetectionMetrics/libs/CMakeLists.txt | 6 + DetectionMetrics/libs/comm/CMakeLists.txt | 95 + .../libs/comm/include/comm/cameraClient.hpp | 58 + .../libs/comm/include/comm/communicator.hpp | 52 + .../comm/include/comm/ice/cameraIceClient.hpp | 75 + .../include/comm/interfaces/cameraClient.hpp | 40 + .../comm/include/comm/ros/listenerCamera.hpp | 67 + .../comm/include/comm/ros/translators.hpp | 73 + .../libs/comm/include/comm/tools.hpp | 33 + DetectionMetrics/libs/comm/package.xml | 66 + .../libs/comm/src/cameraClient.cpp | 82 + .../libs/comm/src/communicator.cpp | 51 + .../libs/comm/src/ice/cameraIceClient.cpp | 218 ++ .../libs/comm/src/ros/listenerCamera.cpp | 81 + .../libs/comm/src/ros/translators.cpp | 65 + DetectionMetrics/libs/comm/src/tools.cpp | 33 + DetectionMetrics/libs/config/CMakeLists.txt | 30 + .../libs/config/include/config/config.h | 57 + .../libs/config/include/config/loader.hpp | 59 + .../libs/config/include/config/properties.hpp | 161 + .../libs/config/include/config/stdutils.hpp | 66 + DetectionMetrics/libs/config/src/loader.cpp | 79 + .../libs/config/src/properties.cpp | 230 ++ DetectionMetrics/libs/depthLib/CMakeLists.txt | 20 + .../libs/depthLib/DepthFilter.cpp | 343 +++ DetectionMetrics/libs/depthLib/DepthFilter.h | 63 + .../libs/depthLib/DepthSampler.cpp | 175 ++ DetectionMetrics/libs/depthLib/DepthSampler.h | 48 + .../libs/interfaces/CMakeLists.txt | 86 + .../libs/interfaces/slice/CMakeLists.txt | 4 + .../interfaces/slice/jderobot/CMakeLists.txt | 36 + .../libs/interfaces/slice/jderobot/camera.ice | 70 + .../libs/interfaces/slice/jderobot/common.ice | 11 + .../interfaces/slice/jderobot/containers.ice | 17 + .../interfaces/slice/jderobot/datetime.ice | 15 + .../interfaces/slice/jderobot/exceptions.ice | 36 + .../libs/interfaces/slice/jderobot/image.ice | 87 + DetectionMetrics/libs/types/CMakeLists.txt | 20 + .../libs/types/include/jderobottypes/image.h | 40 + .../libs/types/include/jderobottypes/rgbd.h | 38 + DetectionMetrics/libs/utils/CMakeLists.txt | 36 + DetectionMetrics/libs/utils/CameraUtils.cpp | 195 ++ DetectionMetrics/libs/utils/CameraUtils.h | 22 + .../libs/utils/colorspaces/colorspaces.h | 110 + .../libs/utils/colorspaces/colorspacesmm.h | 27 + .../libs/utils/colorspaces/imagecv.cpp | 486 +++ .../libs/utils/colorspaces/imagecv.h | 406 +++ .../libs/utils/colorspaces/rgb2hsv.c | 294 ++ .../libs/utils/colorspaces/rgb2yuv.c | 246 ++ .../libs/utils/colorspaces/uncopyable.h | 37 + DetectionMetrics/package.sh | 87 + DetectionMetrics/test/CMakeLists.txt | 1 + DetectionMetrics/test/GLOG/CMakeLists.txt | 7 + DetectionMetrics/test/GLOG/glogTest.cpp | 45 + Doxyfile | 2494 +++++++++++++++ README.md | 134 +- datasetGenerator/generator.py | 44 + datasetGenerator/loadImage.py | 12 + datasetGenerator/requirements.txt | 1 + detectionmetrics/datasets/dataset.py | 109 - detectionmetrics/datasets/gaia.py | 30 - detectionmetrics/datasets/goose.py | 80 - detectionmetrics/datasets/rellis3d.py | 82 - detectionmetrics/models/__init__.py | 0 detectionmetrics/models/model.py | 68 - detectionmetrics/models/onnx.py | 12 - detectionmetrics/models/tensorflow.py | 221 -- detectionmetrics/models/torch.py | 206 -- detectionmetrics/utils/__init__.py | 0 detectionmetrics/utils/conversion.py | 82 - detectionmetrics/utils/io.py | 69 - detectionmetrics/utils/metrics.py | 79 - examples/gaia_example.py | 27 - examples/goose_example.py | 52 - examples/merge_datasets_example.py | 42 - examples/rellis3d_example.py | 54 - examples/tensorflow_example.py | 82 - examples/torch_example.py | 82 - py_docs/Makefile | 20 - .../detectionmetrics.datasets.doctree | Bin 42177 -> 0 bytes .../_build/doctrees/detectionmetrics.doctree | Bin 3740 -> 0 bytes .../doctrees/detectionmetrics.models.doctree | Bin 112271 -> 0 bytes .../doctrees/detectionmetrics.utils.doctree | Bin 74880 -> 0 bytes py_docs/_build/doctrees/environment.pickle | Bin 62091 -> 0 bytes py_docs/_build/doctrees/index.doctree | Bin 5914 -> 0 bytes py_docs/_build/doctrees/modules.doctree | Bin 2808 -> 0 bytes py_docs/_build/html/.buildinfo | 4 - py_docs/_build/html/.buildinfo.bak | 4 - .../detectionmetrics.datasets.rst.txt | 45 - .../_sources/detectionmetrics.models.rst.txt | 45 - .../html/_sources/detectionmetrics.rst.txt | 20 - .../_sources/detectionmetrics.utils.rst.txt | 37 - py_docs/_build/html/_sources/index.rst.txt | 32 - py_docs/_build/html/_sources/modules.rst.txt | 7 - .../_sphinx_javascript_frameworks_compat.js | 123 - py_docs/_build/html/_static/basic.css | 914 ------ .../_build/html/_static/css/badge_only.css | 1 - .../_static/css/fonts/Roboto-Slab-Bold.woff | Bin 87624 -> 0 bytes .../_static/css/fonts/Roboto-Slab-Bold.woff2 | Bin 67312 -> 0 bytes .../css/fonts/Roboto-Slab-Regular.woff | Bin 86288 -> 0 bytes .../css/fonts/Roboto-Slab-Regular.woff2 | Bin 66444 -> 0 bytes .../_static/css/fonts/fontawesome-webfont.eot | Bin 165742 -> 0 bytes .../_static/css/fonts/fontawesome-webfont.svg | 2671 ----------------- .../_static/css/fonts/fontawesome-webfont.ttf | Bin 165548 -> 0 bytes .../css/fonts/fontawesome-webfont.woff | Bin 98024 -> 0 bytes .../css/fonts/fontawesome-webfont.woff2 | Bin 77160 -> 0 bytes .../_static/css/fonts/lato-bold-italic.woff | Bin 323344 -> 0 bytes .../_static/css/fonts/lato-bold-italic.woff2 | Bin 193308 -> 0 bytes .../html/_static/css/fonts/lato-bold.woff | Bin 309728 -> 0 bytes .../html/_static/css/fonts/lato-bold.woff2 | Bin 184912 -> 0 bytes .../_static/css/fonts/lato-normal-italic.woff | Bin 328412 -> 0 bytes .../css/fonts/lato-normal-italic.woff2 | Bin 195704 -> 0 bytes .../html/_static/css/fonts/lato-normal.woff | Bin 309192 -> 0 bytes .../html/_static/css/fonts/lato-normal.woff2 | Bin 182708 -> 0 bytes py_docs/_build/html/_static/css/theme.css | 4 - py_docs/_build/html/_static/doctools.js | 149 - .../html/_static/documentation_options.js | 13 - py_docs/_build/html/_static/file.png | Bin 286 -> 0 bytes .../html/_static/fonts/Lato/lato-bold.eot | Bin 256056 -> 0 bytes .../html/_static/fonts/Lato/lato-bold.ttf | Bin 600856 -> 0 bytes .../html/_static/fonts/Lato/lato-bold.woff | Bin 309728 -> 0 bytes .../html/_static/fonts/Lato/lato-bold.woff2 | Bin 184912 -> 0 bytes .../_static/fonts/Lato/lato-bolditalic.eot | Bin 266158 -> 0 bytes .../_static/fonts/Lato/lato-bolditalic.ttf | Bin 622572 -> 0 bytes .../_static/fonts/Lato/lato-bolditalic.woff | Bin 323344 -> 0 bytes .../_static/fonts/Lato/lato-bolditalic.woff2 | Bin 193308 -> 0 bytes .../html/_static/fonts/Lato/lato-italic.eot | Bin 268604 -> 0 bytes .../html/_static/fonts/Lato/lato-italic.ttf | Bin 639388 -> 0 bytes .../html/_static/fonts/Lato/lato-italic.woff | Bin 328412 -> 0 bytes .../html/_static/fonts/Lato/lato-italic.woff2 | Bin 195704 -> 0 bytes .../html/_static/fonts/Lato/lato-regular.eot | Bin 253461 -> 0 bytes .../html/_static/fonts/Lato/lato-regular.ttf | Bin 607720 -> 0 bytes .../html/_static/fonts/Lato/lato-regular.woff | Bin 309192 -> 0 bytes .../_static/fonts/Lato/lato-regular.woff2 | Bin 182708 -> 0 bytes .../fonts/RobotoSlab/roboto-slab-v7-bold.eot | Bin 79520 -> 0 bytes .../fonts/RobotoSlab/roboto-slab-v7-bold.ttf | Bin 170616 -> 0 bytes .../fonts/RobotoSlab/roboto-slab-v7-bold.woff | Bin 87624 -> 0 bytes .../RobotoSlab/roboto-slab-v7-bold.woff2 | Bin 67312 -> 0 bytes .../RobotoSlab/roboto-slab-v7-regular.eot | Bin 78331 -> 0 bytes .../RobotoSlab/roboto-slab-v7-regular.ttf | Bin 169064 -> 0 bytes .../RobotoSlab/roboto-slab-v7-regular.woff | Bin 86288 -> 0 bytes .../RobotoSlab/roboto-slab-v7-regular.woff2 | Bin 66444 -> 0 bytes py_docs/_build/html/_static/jquery.js | 2 - py_docs/_build/html/_static/js/badge_only.js | 1 - py_docs/_build/html/_static/js/theme.js | 1 - py_docs/_build/html/_static/js/versions.js | 228 -- py_docs/_build/html/_static/language_data.js | 192 -- py_docs/_build/html/_static/minus.png | Bin 90 -> 0 bytes py_docs/_build/html/_static/plus.png | Bin 90 -> 0 bytes py_docs/_build/html/_static/pygments.css | 75 - py_docs/_build/html/_static/searchtools.js | 632 ---- .../_build/html/_static/sphinx_highlight.js | 154 - .../html/detectionmetrics.datasets.html | 236 -- py_docs/_build/html/detectionmetrics.html | 234 -- .../_build/html/detectionmetrics.models.html | 416 --- .../_build/html/detectionmetrics.utils.html | 376 --- py_docs/_build/html/genindex.html | 443 --- py_docs/_build/html/index.html | 127 - py_docs/_build/html/modules.html | 149 - py_docs/_build/html/objects.inv | Bin 771 -> 0 bytes py_docs/_build/html/py-modindex.html | 190 -- py_docs/_build/html/search.html | 120 - py_docs/_build/html/searchindex.js | 1 - py_docs/conf.py | 29 - py_docs/detectionmetrics.datasets.rst | 45 - py_docs/detectionmetrics.models.rst | 45 - py_docs/detectionmetrics.rst | 20 - py_docs/detectionmetrics.utils.rst | 37 - py_docs/index.rst | 32 - py_docs/make.bat | 35 - py_docs/modules.rst | 7 - pyproject.toml | 31 - sampleGenerator/simpleSampleGenerator/main.py | 289 ++ .../sampleGenerator}/__init__.py | 0 .../sampleGenerator/sample.py | 128 + samples/myAutoEvaluatorConfig.yml | 37 + samples/myappConfig.yml | 11 + samples/names/class-descriptions-boxable.csv | 601 ++++ samples/names/coco.names | 90 + samples/names/spinello.names | 1 + samples/names/voc.names | 20 + samples/samplerConfig-princeton.txt | 32 + samples/samplerConfig-spinello.txt | 29 + samples/samplerConfig.txt | 28 + tests/__init__.py | 0 429 files changed, 28144 insertions(+), 9411 deletions(-) create mode 100644 .github/workflows/main.yml create mode 100644 .travis.yml create mode 100644 Brewfile create mode 100644 DetectionMetrics/CMakeLists.txt create mode 100644 DetectionMetrics/ClassMappingHierarchy.xml create mode 100644 DetectionMetrics/DatasetEvaluationApp/CMakeLists.txt create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.hpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/Utils.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/Utils.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/appconfig.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/appconfig.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/gui/appconfig.ui create mode 100644 DetectionMetrics/DatasetEvaluationApp/main.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/mainwindow.cpp create mode 100644 DetectionMetrics/DatasetEvaluationApp/mainwindow.h create mode 100644 DetectionMetrics/DatasetEvaluationApp/mainwindow.ui create mode 100644 DetectionMetrics/Deps/glog/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIce.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceBox.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceCore.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceExecutables.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceGrid.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIcePatch2.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceSSL.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceStorm.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceUtil.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroCIceXML.cmake create mode 100644 DetectionMetrics/Deps/ice/CMake/FindZeroIceCore.cmake create mode 100644 DetectionMetrics/Deps/ice/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/numpy/CMake/FindNumPy.cmake create mode 100644 DetectionMetrics/Deps/numpy/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/opencv/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/qt/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/ros/CMakeLists.txt create mode 100644 DetectionMetrics/Deps/yaml-cpp/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/Matrix.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/Sample.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Common/Sample.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Detectors/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/Detectors/Detector.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.h create mode 100644 DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/pythonWrap.h create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.h create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.h create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.h create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/Region.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/Regions.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/maskApi.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Regions/maskApi.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Configuration.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Configuration.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/JsonHelper.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Key.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Key.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Playback.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/Playback.hpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/StringHandler.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/addclass.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/addclass.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/addclass.ui create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/pop_up.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/pop_up.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/pop_up.ui create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/setclass.cpp create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/setclass.h create mode 100644 DetectionMetrics/DetectionMetricsLib/Utils/setclass.ui create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_detect.py rename {detectionmetrics => DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils}/__init__.py (100%) create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/bounding_box_utils.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_AnchorBoxes.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_DecodeDetections.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_L2Normalization.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_ssd_loss.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/pytorch_detect.py create mode 100644 DetectionMetrics/DetectionMetricsLib/python_modules/tensorflow_detect.py create mode 100644 DetectionMetrics/DetectionMetricsROS/CMakeLists.txt create mode 100644 DetectionMetrics/DetectionMetricsROS/include/DetectionMetricsROS/DeployerNode.hpp create mode 100644 DetectionMetrics/DetectionMetricsROS/msg/object.msg create mode 100644 DetectionMetrics/DetectionMetricsROS/msg/objects.msg create mode 100644 DetectionMetrics/DetectionMetricsROS/package.xml create mode 100644 DetectionMetrics/DetectionMetricsROS/src/DeployerNode.cpp create mode 100644 DetectionMetrics/DetectionMetricsROS/src/code.cpp create mode 100644 DetectionMetrics/DetectionMetricsROS/src/detection_node.cpp create mode 100644 DetectionMetrics/DetectionMetricsROS/src/image_converter.hpp create mode 100644 DetectionMetrics/DetectionMetricsROS/src/test.cpp create mode 100644 DetectionMetrics/Dockerfile/Dockerfile create mode 100644 DetectionMetrics/Dockerfile/appConfig.yml create mode 100644 DetectionMetrics/SampleGenerationApp/CMakeLists.txt create mode 100644 DetectionMetrics/SampleGenerationApp/generator.cpp create mode 100644 DetectionMetrics/Tools/AutoEvaluator/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/AutoEvaluator/autoEvaluator.cpp create mode 100644 DetectionMetrics/Tools/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Converter/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Converter/converter.cpp create mode 100644 DetectionMetrics/Tools/Detector/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Detector/detector.cpp create mode 100644 DetectionMetrics/Tools/Evaluator/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Evaluator/evaluator.cpp create mode 100644 DetectionMetrics/Tools/Splitter/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Splitter/splitter.cpp create mode 100644 DetectionMetrics/Tools/Viewer/CMakeLists.txt create mode 100644 DetectionMetrics/Tools/Viewer/viewer.cpp create mode 100644 DetectionMetrics/libs/CMakeLists.txt create mode 100644 DetectionMetrics/libs/comm/CMakeLists.txt create mode 100644 DetectionMetrics/libs/comm/include/comm/cameraClient.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/communicator.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/ice/cameraIceClient.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/interfaces/cameraClient.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/ros/listenerCamera.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/ros/translators.hpp create mode 100644 DetectionMetrics/libs/comm/include/comm/tools.hpp create mode 100644 DetectionMetrics/libs/comm/package.xml create mode 100644 DetectionMetrics/libs/comm/src/cameraClient.cpp create mode 100644 DetectionMetrics/libs/comm/src/communicator.cpp create mode 100644 DetectionMetrics/libs/comm/src/ice/cameraIceClient.cpp create mode 100644 DetectionMetrics/libs/comm/src/ros/listenerCamera.cpp create mode 100644 DetectionMetrics/libs/comm/src/ros/translators.cpp create mode 100644 DetectionMetrics/libs/comm/src/tools.cpp create mode 100644 DetectionMetrics/libs/config/CMakeLists.txt create mode 100644 DetectionMetrics/libs/config/include/config/config.h create mode 100644 DetectionMetrics/libs/config/include/config/loader.hpp create mode 100644 DetectionMetrics/libs/config/include/config/properties.hpp create mode 100644 DetectionMetrics/libs/config/include/config/stdutils.hpp create mode 100644 DetectionMetrics/libs/config/src/loader.cpp create mode 100644 DetectionMetrics/libs/config/src/properties.cpp create mode 100644 DetectionMetrics/libs/depthLib/CMakeLists.txt create mode 100644 DetectionMetrics/libs/depthLib/DepthFilter.cpp create mode 100644 DetectionMetrics/libs/depthLib/DepthFilter.h create mode 100644 DetectionMetrics/libs/depthLib/DepthSampler.cpp create mode 100644 DetectionMetrics/libs/depthLib/DepthSampler.h create mode 100644 DetectionMetrics/libs/interfaces/CMakeLists.txt create mode 100644 DetectionMetrics/libs/interfaces/slice/CMakeLists.txt create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/CMakeLists.txt create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/camera.ice create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/common.ice create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/containers.ice create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/datetime.ice create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/exceptions.ice create mode 100644 DetectionMetrics/libs/interfaces/slice/jderobot/image.ice create mode 100644 DetectionMetrics/libs/types/CMakeLists.txt create mode 100644 DetectionMetrics/libs/types/include/jderobottypes/image.h create mode 100644 DetectionMetrics/libs/types/include/jderobottypes/rgbd.h create mode 100644 DetectionMetrics/libs/utils/CMakeLists.txt create mode 100644 DetectionMetrics/libs/utils/CameraUtils.cpp create mode 100644 DetectionMetrics/libs/utils/CameraUtils.h create mode 100644 DetectionMetrics/libs/utils/colorspaces/colorspaces.h create mode 100644 DetectionMetrics/libs/utils/colorspaces/colorspacesmm.h create mode 100644 DetectionMetrics/libs/utils/colorspaces/imagecv.cpp create mode 100644 DetectionMetrics/libs/utils/colorspaces/imagecv.h create mode 100644 DetectionMetrics/libs/utils/colorspaces/rgb2hsv.c create mode 100644 DetectionMetrics/libs/utils/colorspaces/rgb2yuv.c create mode 100644 DetectionMetrics/libs/utils/colorspaces/uncopyable.h create mode 100644 DetectionMetrics/package.sh create mode 100644 DetectionMetrics/test/CMakeLists.txt create mode 100644 DetectionMetrics/test/GLOG/CMakeLists.txt create mode 100644 DetectionMetrics/test/GLOG/glogTest.cpp create mode 100644 Doxyfile create mode 100644 datasetGenerator/generator.py create mode 100644 datasetGenerator/loadImage.py create mode 100644 datasetGenerator/requirements.txt delete mode 100644 detectionmetrics/datasets/dataset.py delete mode 100644 detectionmetrics/datasets/gaia.py delete mode 100644 detectionmetrics/datasets/goose.py delete mode 100644 detectionmetrics/datasets/rellis3d.py delete mode 100644 detectionmetrics/models/__init__.py delete mode 100644 detectionmetrics/models/model.py delete mode 100644 detectionmetrics/models/onnx.py delete mode 100644 detectionmetrics/models/tensorflow.py delete mode 100644 detectionmetrics/models/torch.py delete mode 100644 detectionmetrics/utils/__init__.py delete mode 100644 detectionmetrics/utils/conversion.py delete mode 100644 detectionmetrics/utils/io.py delete mode 100644 detectionmetrics/utils/metrics.py delete mode 100644 examples/gaia_example.py delete mode 100644 examples/goose_example.py delete mode 100644 examples/merge_datasets_example.py delete mode 100644 examples/rellis3d_example.py delete mode 100644 examples/tensorflow_example.py delete mode 100644 examples/torch_example.py delete mode 100644 py_docs/Makefile delete mode 100644 py_docs/_build/doctrees/detectionmetrics.datasets.doctree delete mode 100644 py_docs/_build/doctrees/detectionmetrics.doctree delete mode 100644 py_docs/_build/doctrees/detectionmetrics.models.doctree delete mode 100644 py_docs/_build/doctrees/detectionmetrics.utils.doctree delete mode 100644 py_docs/_build/doctrees/environment.pickle delete mode 100644 py_docs/_build/doctrees/index.doctree delete mode 100644 py_docs/_build/doctrees/modules.doctree delete mode 100644 py_docs/_build/html/.buildinfo delete mode 100644 py_docs/_build/html/.buildinfo.bak delete mode 100644 py_docs/_build/html/_sources/detectionmetrics.datasets.rst.txt delete mode 100644 py_docs/_build/html/_sources/detectionmetrics.models.rst.txt delete mode 100644 py_docs/_build/html/_sources/detectionmetrics.rst.txt delete mode 100644 py_docs/_build/html/_sources/detectionmetrics.utils.rst.txt delete mode 100644 py_docs/_build/html/_sources/index.rst.txt delete mode 100644 py_docs/_build/html/_sources/modules.rst.txt delete mode 100644 py_docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js delete mode 100644 py_docs/_build/html/_static/basic.css delete mode 100644 py_docs/_build/html/_static/css/badge_only.css delete mode 100644 py_docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/fontawesome-webfont.eot delete mode 100644 py_docs/_build/html/_static/css/fonts/fontawesome-webfont.svg delete mode 100644 py_docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf delete mode 100644 py_docs/_build/html/_static/css/fonts/fontawesome-webfont.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-bold-italic.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-bold.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-bold.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-normal-italic.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-normal.woff delete mode 100644 py_docs/_build/html/_static/css/fonts/lato-normal.woff2 delete mode 100644 py_docs/_build/html/_static/css/theme.css delete mode 100644 py_docs/_build/html/_static/doctools.js delete mode 100644 py_docs/_build/html/_static/documentation_options.js delete mode 100644 py_docs/_build/html/_static/file.png delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bold.eot delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bold.ttf delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bold.woff delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bold.woff2 delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-italic.eot delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-italic.ttf delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-italic.woff delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-italic.woff2 delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-regular.eot delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-regular.ttf delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-regular.woff delete mode 100644 py_docs/_build/html/_static/fonts/Lato/lato-regular.woff2 delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff delete mode 100644 py_docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 delete mode 100644 py_docs/_build/html/_static/jquery.js delete mode 100644 py_docs/_build/html/_static/js/badge_only.js delete mode 100644 py_docs/_build/html/_static/js/theme.js delete mode 100644 py_docs/_build/html/_static/js/versions.js delete mode 100644 py_docs/_build/html/_static/language_data.js delete mode 100644 py_docs/_build/html/_static/minus.png delete mode 100644 py_docs/_build/html/_static/plus.png delete mode 100644 py_docs/_build/html/_static/pygments.css delete mode 100644 py_docs/_build/html/_static/searchtools.js delete mode 100644 py_docs/_build/html/_static/sphinx_highlight.js delete mode 100644 py_docs/_build/html/detectionmetrics.datasets.html delete mode 100644 py_docs/_build/html/detectionmetrics.html delete mode 100644 py_docs/_build/html/detectionmetrics.models.html delete mode 100644 py_docs/_build/html/detectionmetrics.utils.html delete mode 100644 py_docs/_build/html/genindex.html delete mode 100644 py_docs/_build/html/index.html delete mode 100644 py_docs/_build/html/modules.html delete mode 100644 py_docs/_build/html/objects.inv delete mode 100644 py_docs/_build/html/py-modindex.html delete mode 100644 py_docs/_build/html/search.html delete mode 100644 py_docs/_build/html/searchindex.js delete mode 100644 py_docs/conf.py delete mode 100644 py_docs/detectionmetrics.datasets.rst delete mode 100644 py_docs/detectionmetrics.models.rst delete mode 100644 py_docs/detectionmetrics.rst delete mode 100644 py_docs/detectionmetrics.utils.rst delete mode 100644 py_docs/index.rst delete mode 100644 py_docs/make.bat delete mode 100644 py_docs/modules.rst delete mode 100644 pyproject.toml create mode 100644 sampleGenerator/simpleSampleGenerator/main.py rename {detectionmetrics/datasets => sampleGenerator/simpleSampleGenerator/sampleGenerator}/__init__.py (100%) create mode 100644 sampleGenerator/simpleSampleGenerator/sampleGenerator/sample.py create mode 100755 samples/myAutoEvaluatorConfig.yml create mode 100755 samples/myappConfig.yml create mode 100644 samples/names/class-descriptions-boxable.csv create mode 100755 samples/names/coco.names create mode 100755 samples/names/spinello.names create mode 100755 samples/names/voc.names create mode 100644 samples/samplerConfig-princeton.txt create mode 100644 samples/samplerConfig-spinello.txt create mode 100644 samples/samplerConfig.txt delete mode 100644 tests/__init__.py diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..bfa46e76 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,36 @@ +# This is a basic workflow to help you get started with Actions + +name: Publish Docker image + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the noetic-devel branch +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + push_to_registry: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v2 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Push to Docker Hub + uses: docker/build-push-action@v2 + with: + push: true + tags: jderobot/detection-metrics:noetic + context: DetectionMetrics/Dockerfile/ + file: DetectionMetrics/Dockerfile/Dockerfile diff --git a/.gitignore b/.gitignore index 73d0fc3f..9ad8053f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,4 @@ -.vscode +.clang_complete -.mypy_cache -__pycache__ - -.venv -dist -poetry.lock - -local/* \ No newline at end of file +# Py byte files +*.py[cod] diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..2a23ee56 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,99 @@ +language: cpp + +os: + - linux + - osx + +sudo: required +dist: xenial + +compiler: + - gcc + - clang + +cache: + pip: true + directories: + - $HOME/opencv/ + - $HOME/darknet/ + +matrix: + include: + - env: TO_TEST=WITH_ROS_AND_ICE + +addons: + apt: + sources: + - sourceline: "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" + key_url: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x421C365BD9FF1F717815A3895523BAEEB01FA116" + - sourceline: "deb http://zeroc.com/download/Ice/3.6/ubuntu16.04 stable main" + key_url: "http://keyserver.ubuntu.com/pks/lookup?search=0x5E6DA83306132997&fingerprint=on&hash=on&op=get" + packages: + - rapidjson-dev + - libssl-dev + - libboost-dev + - libboost-filesystem-dev + - libboost-system-dev + - libboost-program-options-dev + - libgoogle-glog-dev + - libyaml-cpp-dev + - qt5-default + - libqt5svg5-dev + - libqt5opengl5-dev + - ros-kinetic-roscpp + - ros-kinetic-cv-bridge + - ros-kinetic-image-transport + - zeroc-ice-all-runtime + - zeroc-ice-all-dev + - doxygen + - doxygen-doc + - doxygen-latex + - doxygen-gui + - graphviz + + homebrew: + update: true + brewfile: true + +before_install: + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export PATH="/usr/local/opt/qt/bin:$PATH" ; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then wget https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.6.pkg ; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then sudo installer -pkg python-2.7.15-macosx10.6.pkg -target / ; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export PATH="/Library/Frameworks/Python.framework/Versions/2.7/bin:${PATH}" ; fi + - if [[ ( "$TRAVIS_OS_NAME" == "linux" ) && ( "$TO_TEST" == "WITH_ROS_AND_ICE" ) ]]; then source /opt/ros/kinetic/setup.bash ; fi + - sudo pip install numpy + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then bash -x install_opencv.sh ; fi + - ./install_darknet.sh + +before_script: + - cd DetectionMetrics + - mkdir build + - cd build + - cmake .. + +script: make -j2 + +after_success: + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then travis_wait bash ../package.sh ; fi # To run the script in the same shell so as to setup environment variables + - ls -lh out/* # Assuming you have some files in out/ that you would like to upload + - wget -c https://github.com/probonopd/uploadtool/raw/master/upload.sh + - travis_wait bash upload.sh out/* + - cd $TRAVIS_BUILD_DIR && rm -rf DetectionMetrics/build + - wget https://jderobot.org/resources/assets/jderobot.png + - doxygen + - echo "" > html/.nojekyll + +deploy: + provider: pages + skip-cleanup: true + local_dir: $TRAVIS_BUILD_DIR/html + github-token: $GITHUB_TOKEN # Set in the settings page of your repository, as a secure variable + on: + branch: master + condition: ( "$TO_TEST" == "WITH_ROS_AND_ICE" ) # Publish only in the 4th job + +branches: + except: + - # Do not build tags that we create when we upload to GitHub Releases + - /^(?i:continuous)$/ + - gh-pages diff --git a/Brewfile b/Brewfile new file mode 100644 index 00000000..6914f7d2 --- /dev/null +++ b/Brewfile @@ -0,0 +1,11 @@ +tap "homebrew/bundle" +brew "boost" +brew "boost-python" +brew "cmake" +brew "gcc" +brew "glog" +brew "libyaml" +brew "qt" +brew "rapidjson" +brew "opencv" +brew "yaml-cpp" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e2c0b8ec..4c048180 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ Thanks for your interest on contributing! -This file contains a set of rules to contributing to the project and the +This file contains a set of rules to contributing to the project and the rest of the projects developed by JdeRobot. If you have any doubt about how to contribute contact one of the maintainers of the project. They will be pleased to tell you how you can contribute with your @@ -19,7 +19,7 @@ Please report any unacceptable behavior to any of [the maintainers](#i-have-a-qu ## Prerequisites before contributing -In order to contribute to JdeRobot projects, please read carefully the project README.md/webpage (if available) before +In order to contribute to JdeRobot projects, please read carefully the project README.md/webpage (if available) before starting contributing to understand the purpose of the project and where you can contribute. @@ -40,24 +40,22 @@ The two following points are different depending on the permissions you have to write a good description of the changes made and refer to the issue solved to make things easier to the maintainers. Include any additional resource that would be interesting (references, screenshots...). Link the PR with the issue * **Testing and merging pull requests** -Your pull request will be automatically tested by Travis CI. If any jobs have failed, you should fix them. +Your pull request will be automatically tested by Travis CI. If any jobs have failed, you should fix them. To rerun the automatic builds just push changes to your branch on GitHub. No need to close that pull request and open a new one! Once all the builders are "green", one of DetectionMetrics's developers will review your code. Reviewer could ask you to modify your pull request. Please provide timely response for reviewers (within weeks, not months), otherwise you submission could be postponed or even rejected. - + * **[If you have write permission] Don't accept your own pull requests:** Wait for a project maintainer to accept the changes you made. They will probably comment the pull request with some feedback and will consider if it can be merge to the master branch. Be proactive and kind! ## I have a question If you have any question related to how to contribute to the project or anything related to the organization, -you can contact the main project maintainers sending them an email. Indicate the project you are talking about in the +you can contact the main project maintainers sending them an email. Indicate the project you are talking about in the subject of the email, please. Emails: - - diff --git a/DetectionMetrics/CMakeLists.txt b/DetectionMetrics/CMakeLists.txt new file mode 100644 index 00000000..1da7f017 --- /dev/null +++ b/DetectionMetrics/CMakeLists.txt @@ -0,0 +1,75 @@ + cmake_minimum_required(VERSION 2.8) + +project(samplerGenerator C CXX) + + +#check compiller + +include(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) + +message ("-- version: ${CMAKE_CXX_COMPILER_VERSION}") +if(COMPILER_SUPPORTS_CXX11) + set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -std=c++11) + MESSAGE("-- C++11 support enabled") +else() + message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support.") +endif() + +IF (NOT CMAKE_BUILD_TYPE) + MESSAGE("-- SETTING BUILD TYPE TO RELEASE") + SET(CMAKE_BUILD_TYPE RELEASE) +ENDIF() + + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated -fPIC") +set(CMAKE_MACOSX_RPATH 1) + +#automated opencv +include(FindPkgConfig) + +## FIND_PACKAGE(JdeRobot REQUIRED) + + include(Deps/glog/CMakeLists.txt) + include(Deps/ice/CMakeLists.txt) + include(Deps/numpy/CMakeLists.txt) + include(Deps/yaml-cpp/CMakeLists.txt) + include(Deps/ros/CMakeLists.txt) + include(Deps/opencv/CMakeLists.txt) + include(Deps/qt/CMakeLists.txt) + +FIND_PACKAGE(Boost REQUIRED program_options filesystem) + +set(DetectionMetrics_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/DetectionMetricsLib) +SET( INTERFACES_CPP_DIR ${CMAKE_CURRENT_BINARY_DIR}/libs/interfaces/cpp/jderobot ${CMAKE_CURRENT_BINARY_DIR}/libs/interfaces/cpp) + +if(ZeroCIce_FOUND) + add_definitions(-DICE) +endif() +if(roscpp_FOUND) + add_definitions(-DJDERROS) +endif() +if(OpenCV_FOUND) + SET(ENABLE_DNN_CAFFE ON) + add_definitions(-DENABLE_DNN_CAFFE) +endif(OpenCV_FOUND) + + add_subdirectory(libs) + if (QT_FOUND) + add_subdirectory(DetectionMetricsLib) + add_subdirectory(DatasetEvaluationApp) + endif(QT_FOUND) + add_subdirectory(SampleGenerationApp) + add_subdirectory(Tools) + +if(roscpp_FOUND) + add_subdirectory(DetectionMetricsROS) +endif() + file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ClassMappingHierarchy.xml + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ClassMappingHierarchy.xml + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/Tools) + + IF (BUILD_TEST) + add_subdirectory(test) +ENDIF() diff --git a/DetectionMetrics/ClassMappingHierarchy.xml b/DetectionMetrics/ClassMappingHierarchy.xml new file mode 100644 index 00000000..74627644 --- /dev/null +++ b/DetectionMetrics/ClassMappingHierarchy.xml @@ -0,0 +1,99 @@ + + + + flying vehicle + + + airplane + + + + plane + + + + aircraft + + + + aeroplane + + + + + + table + + + dining table + + + + diningtable + + + + + + + plant + + + pottedplant + + + potted plant + + + + + + appliance + + + tv + + + tvmonitor + + + tv monitor + + + television + + + + + vehicle + + + motorbike + + + motorcycle + + + bike + + + motor bike + + + motor cycle + + + + + furniture + + + sofa + + + couch + + + + + diff --git a/DetectionMetrics/DatasetEvaluationApp/CMakeLists.txt b/DetectionMetrics/DatasetEvaluationApp/CMakeLists.txt new file mode 100644 index 00000000..34ffcf64 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/CMakeLists.txt @@ -0,0 +1,54 @@ +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTOUIC ON) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +set (SOURCES main.cpp + mainwindow + gui/Appcfg + gui/appconfig + gui/ListViewConfig + gui/TabHandler.cpp gui/TabHandler.h + SamplerGeneratorHandler/Converter + SamplerGeneratorHandler/SamplerGenerationHandler + gui/Utils + SamplerGeneratorHandler/Detector + SamplerGeneratorHandler/Evaluator + SamplerGeneratorHandler/Deployer + SamplerGeneratorHandler/Viewer + # gui/pop_up + ) + + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${comm_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${ros_INCLUDE_DIRS} + ${PYTHON_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${jderobottypes_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} +) + + +add_executable(DatasetEvaluationApp ${SOURCES}) + + +TARGET_LINK_LIBRARIES(DatasetEvaluationApp + DetectionMetrics + ${OpenCV_LIBRARIES} + ${JderobotInterfaces_LIBRARIES} + ${EXTRA_LIBS} + ${Boost_LIBRARIES} + ${QT_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.cpp new file mode 100644 index 00000000..704fae91 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.cpp @@ -0,0 +1,91 @@ +// +// Created by frivas on 19/02/17. +// + +#include +#include +#include "Converter.h" +#include "SamplerGenerationHandler.h" +#include +void SampleGeneratorHandler::Converter::process(QListView *datasetList, QListView *namesList, QListView *readerImpList, + QListView *filterClasses, QListView *writerImpList, QListView* writerNamesList, bool useWriterNames, + const std::string& datasetPath, const std::string& namesPath, const std::string &outputPath, + bool splitActive, double splitRatio,bool writeImages) { + + + GenericDatasetReaderPtr reader = SamplerGenerationHandler::createDatasetReaderPtr(datasetList, namesList, + readerImpList, filterClasses, + datasetPath, namesPath, writeImages); // Images Required for dataset if and + // only if write images is true + + + if (!reader) + return; + + + std::vector writerImp; + Utils::getListViewContent(writerImpList,writerImp,""); + std::vector writerNames; + + if (useWriterNames) { + if (! Utils::getListViewContent(writerNamesList,writerNames,namesPath+"/")){ + LOG(WARNING)<<"Select the dataset names related to the Output dataset, or unchechk mapping if you want a custom names file to be generated"; + return; + } +} + + + if (splitActive){ + DatasetReaderPtr readerTest(new DatasetReader(writeImages)); + DatasetReaderPtr readerTrain(new DatasetReader(writeImages)); + + std::string testPath = outputPath + "/test"; + std::string trainPath = outputPath + "/train"; + + int ratio=int(splitRatio*10); + + Sample sample; + auto readerPtr = reader->getReader(); + int counter=0; + while (readerPtr->getNextSample(sample)){ + if (counter addSample(sample); + } + else{ + readerTest->addSample(sample); + } + counter++; + counter= counter % 10; + } + + LOG(INFO) << "Train: " << std::endl; + readerTrain->printDatasetStats(); + LOG(INFO) << "Test: " << std::endl; + readerTest->printDatasetStats(); + + if (useWriterNames) { + GenericDatasetWriterPtr writerTest( new GenericDatasetWriter(testPath,readerTest,writerImp[0],writerNames[0])); + GenericDatasetWriterPtr writerTrain( new GenericDatasetWriter(trainPath,readerTrain,writerImp[0], writerNames[0])); + writerTest->getWriter()->process(writeImages); + writerTrain->getWriter()->process(writeImages); + + } else { + GenericDatasetWriterPtr writerTest( new GenericDatasetWriter(testPath,readerTest,writerImp[0])); + GenericDatasetWriterPtr writerTrain( new GenericDatasetWriter(trainPath,readerTrain,writerImp[0])); + writerTest->getWriter()->process(writeImages); + writerTrain->getWriter()->process(writeImages); + } + } + else{ + auto readerPtr = reader->getReader(); + if (useWriterNames) { + GenericDatasetWriterPtr writer( new GenericDatasetWriter(outputPath,readerPtr,writerImp[0], writerNames[0])); + + writer->getWriter()->process(writeImages); + } else { + GenericDatasetWriterPtr writer( new GenericDatasetWriter(outputPath,readerPtr,writerImp[0])); + + writer->getWriter()->process(writeImages); + } + } +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.h new file mode 100644 index 00000000..dde3d94e --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Converter.h @@ -0,0 +1,20 @@ +// +// Created by frivas on 19/02/17. +// + +#ifndef SAMPLERGENERATOR_CONVERTER_H +#define SAMPLERGENERATOR_CONVERTER_H + +#include + +namespace SampleGeneratorHandler { + class Converter { + public: + static void process(QListView* datasetList,QListView* namesList,QListView* readerImpList, QListView* filterClasses, QListView* writerImpList, + QListView* writerNamesList, bool useWriterNames, const std::string& datasetPath, const std::string& namesPath, const std::string& outputPath, bool splitActive, double splitRatio, bool writeImages); + }; + +} + + +#endif //SAMPLERGENERATOR_CONVERTER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.cpp new file mode 100644 index 00000000..1ecf1d66 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.cpp @@ -0,0 +1,99 @@ +// +// Created by frivas on 27/03/17. +// + +#include +#include +#include +#include +#include +#include "Deployer.h" +#include "SamplerGenerationHandler.h" +#include "gui/Appcfg.hpp" + +void +SampleGeneratorHandler::Deployer::process(QListView *deployImpList, QListView *weightsList, QListView *netConfigList, + QListView *inferencerImpList, QListView *inferencerNamesList, + bool* stopButton, double* confidence_threshold, QGroupBox* deployer_params, QGroupBox* camera_params, QGroupBox* inferencer_params, const std::string &weightsPath, const std::string &cfgPath, + const std::string &inferencerNamesPath, const std::string &inputInfo, const std::string &outputFolder,bool labelling) { + + GenericLiveReaderPtr reader; + + try { + + reader = SamplerGenerationHandler::createLiveReaderPtr( inferencerNamesList,deployImpList, deployer_params, + camera_params, inputInfo,inferencerNamesPath); + + } catch(const std::invalid_argument& ex) { + LOG(WARNING)<< "Error Creating Generic Live Reader\nError Message: " << ex.what(); + return; + + } + + std::vector weights; + if (! Utils::getListViewContent(weightsList,weights,weightsPath+ "/")){ + LOG(WARNING)<<"Select the weightsList"; + return; + } + std::vector netConfiguration; + if (! Utils::getListViewContent(netConfigList,netConfiguration,cfgPath+ "/")){ + LOG(WARNING)<<"Select the netConfiguration"; + return; + } + + std::vector inferencerImp; + if (! Utils::getListViewContent(inferencerImpList,inferencerImp,"")){ + LOG(WARNING)<<"Select the inferencer type"; + return; + } + + std::vector inferencerNames; + if (! Utils::getListViewContent(inferencerNamesList,inferencerNames,inferencerNamesPath + "/")){ + LOG(WARNING)<<"Select the class names"; + return; + } + + + std::map* inferencerParamsMap = new std::map(); + try { + if(! Utils::getInferencerParamsContent(inferencer_params, *inferencerParamsMap)) { + inferencerParamsMap = NULL; + } + + } + catch(std::exception& ex) { + LOG(WARNING)<< ex.what(); + return; + } + + if (!outputFolder.empty()) { + + auto boostPath= boost::filesystem::path(outputFolder); + if (boost::filesystem::exists(boostPath)){ + boost::filesystem::directory_iterator end_itr; + boost::filesystem::directory_iterator itr(boostPath); + for (; itr != end_itr; ++itr) + { + if (boost::filesystem::is_regular_file(itr->path()) && (itr->path().extension()==".png" || itr->path().extension()==".json") ) { + break; + } + } + if (itr != end_itr) + QMessageBox::warning(deployer_params, QObject::tr("Output Directory isn't Empty"), QObject::tr("Output Director contains png or json files which might be overwritten")); + } + + } + DatasetReaderPtr data_reader=reader->getReader(); + data_reader->SetClassNamesFile(&inferencerNames[0]); + LOG(INFO) << "netConfigList : " << netConfiguration[0] << " ; weights : " << weights[0] << " ; inferencerNames : " << inferencerNames[0] << " ; inferencerImp : " << inferencerImp[0] << std::endl; + GenericInferencerPtr inferencer(new GenericInferencer(netConfiguration[0],weights[0],inferencerNames[0],inferencerImp[0], inferencerParamsMap)); + if(labelling){ + Labelling massInferencer(data_reader,inferencer->getInferencer(),outputFolder, stopButton, confidence_threshold, true); + massInferencer.process(false); + } + else{ + MassInferencer massInferencer(data_reader,inferencer->getInferencer(),outputFolder, stopButton, confidence_threshold, true); + massInferencer.process(false); + } + +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.h new file mode 100644 index 00000000..1717a384 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Deployer.h @@ -0,0 +1,21 @@ +// +// Created by frivas on 27/03/17. +// + +#ifndef SAMPLERGENERATOR_DEPLOYER_H +#define SAMPLERGENERATOR_DEPLOYER_H + +#include +#include +#include + +namespace SampleGeneratorHandler { + class Deployer { + public: + static void process(QListView *deployImpList,QListView* weightsList, QListView* netConfigList, QListView* inferencerImpList, QListView* inferencerNamesList, + bool* stopButton, double* confidence_threshold, QGroupBox* deployer_params, QGroupBox* camera_params, QGroupBox* inferencer_params, const std::string& weightsPath, const std::string& cfgPath, + const std::string& inferencerNamesPath,const std::string& inputInfo,const std::string& outputFolder,bool labelling); + }; +} + +#endif //SAMPLERGENERATOR_DEPLOYER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.cpp new file mode 100644 index 00000000..a815f20f --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.cpp @@ -0,0 +1,79 @@ +// +// Created by frivas on 20/02/17. +// + +#include +#include +#include +#include +#include +#include +#include "Detector.h" +#include "SamplerGenerationHandler.h" + +void SampleGeneratorHandler::Detector::process(QListView* datasetList,QListView* namesList,QListView* readerImpList, const std::string& datasetPath, + QListView* weightsList, QListView* netConfigList, QListView* inferencerImpList, QListView* inferencerNamesList, + QGroupBox* inferencer_params, const std::string& weightsPath, const std::string& cfgPath, const std::string& outputPath, + const std::string& namesPath, bool useDepth, bool singleEvaluation) { + + GenericDatasetReaderPtr reader = SamplerGenerationHandler::createDatasetReaderPtr(datasetList, namesList, + readerImpList, NULL, datasetPath, + namesPath, true); + + if (!reader) + return; + std::vector weights; + if (! Utils::getListViewContent(weightsList,weights,weightsPath+ "/")){ + LOG(WARNING)<<"Select the weightsList"; + return; + } + + std::vector netConfiguration; + if (! Utils::getListViewContent(netConfigList,netConfiguration,cfgPath+ "/")){ + LOG(WARNING)<<"Select the netConfiguration"; + return; + } + + std::vector inferencerImp; + if (! Utils::getListViewContent(inferencerImpList,inferencerImp,"")){ + LOG(WARNING)<<"Select the inferencer type"; + return; + } + + std::vector inferencerNames; + if (! Utils::getListViewContent(inferencerNamesList,inferencerNames,namesPath + "/")){ + LOG(WARNING)<<"Select the inferencer type"; + return; + } + + std::map* inferencerParamsMap = new std::map(); + try { + if(! Utils::getInferencerParamsContent(inferencer_params, *inferencerParamsMap)) { + inferencerParamsMap = NULL; + } + + } catch(std::exception& ex) { + LOG(WARNING)<< ex.what(); + return; + } + + std::vector writerImp; + Utils::getListViewContent(readerImpList,writerImp,""); + + + std::vector writerNames; + if (! Utils::getListViewContent(namesList,writerNames,namesPath+"/")){ + LOG(WARNING)<<"Select the dataset names related to the Output dataset, or unchechk mapping if you want a custom names file to be generated"; + return; + } + + DatasetReaderPtr readerDetection (new DatasetReader(true)); + + GenericInferencerPtr inferencer(new GenericInferencer(netConfiguration[0],weights[0],inferencerNames[0],inferencerImp[0], inferencerParamsMap)); + MassInferencer massInferencer(reader->getReader(),inferencer->getInferencer(),std::string(), true); + massInferencer.process(useDepth, readerDetection); + + GenericDatasetWriterPtr writer(new GenericDatasetWriter(outputPath,readerDetection,writerImp[0], writerNames[0])); + + writer->getWriter()->process(false); +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.h new file mode 100644 index 00000000..8891d33c --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Detector.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 20/02/17. +// + +#ifndef SAMPLERGENERATOR_DETECTOR_H +#define SAMPLERGENERATOR_DETECTOR_H + + +#include + + +namespace SampleGeneratorHandler { + + class Detector { + public: + static void process(QListView* datasetList,QListView* namesList,QListView* readerImpList, const std::string& datasetPath, + QListView* weightsList, QListView* netConfigList, QListView* inferencerImpList, QListView* inferencerNamesList, + QGroupBox* inferencer_params, const std::string& weightsPath, const std::string& cfgPath, const std::string& outputPath, + const std::string& namesPath, bool useDepth, bool singleEvaluation + ); + }; + +} + +#endif //SAMPLERGENERATOR_DETECTOR_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.cpp new file mode 100644 index 00000000..a209b852 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.cpp @@ -0,0 +1,61 @@ +// +// Created by frivas on 20/02/17. +// + +#include +#include +#include "Evaluator.h" +#include "SamplerGenerationHandler.h" + +void +SampleGeneratorHandler::Evaluator::process(QListView *datasetListGT, QListView *namesListGT, QListView *readerImpListGT, + QListView *datasetListDetect, QListView *namesListDetect, + QListView *readerImpListDetect, QListView *filterClasses, + const std::string &datasetPath, const std::string &namesGTPath, + const std::string &inferencesPath, const std::string &namesPath, + bool overWriterPersonClasses,bool enableMixEvaluation, + bool isIouTypeBbox) { + + GenericDatasetReaderPtr readerGT = SamplerGenerationHandler::createDatasetReaderPtr(datasetListGT, namesListGT, + readerImpListGT, filterClasses, + datasetPath, + namesPath, false); + GenericDatasetReaderPtr readerDetection = SamplerGenerationHandler::createDatasetReaderPtr(datasetListDetect, + namesListDetect, + readerImpListDetect, + filterClasses, + inferencesPath, + namesPath, false); + + + if (!readerGT || !readerDetection) + return; + + + DetectionsEvaluatorPtr evaluator(new DetectionsEvaluator(readerGT->getReader(),readerDetection->getReader())); + + + if (overWriterPersonClasses){ + readerGT->getReader()->overWriteClasses("person-falling","person"); + readerGT->getReader()->overWriteClasses("person-fall","person"); + readerGT->getReader()->printDatasetStats(); + } + + if(enableMixEvaluation) { + evaluator->addValidMixClass("person", "person-falling"); + evaluator->addValidMixClass("person", "person-fall"); + } + evaluator->evaluate(isIouTypeBbox); + evaluator->accumulateResults(); + + + + std::string mywriterFile("evaluation_results.csv"); + + StatsWriter writer(readerGT->getReader(), mywriterFile); + + writer.writeInferencerResults("Detection Dataset", evaluator); + + writer.saveFile(); + +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.h new file mode 100644 index 00000000..ae2ccce7 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Evaluator.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 20/02/17. +// + +#ifndef SAMPLERGENERATOR_EVALUATOR_H +#define SAMPLERGENERATOR_EVALUATOR_H + + +#include + + +namespace SampleGeneratorHandler { + class Evaluator { + public: + static void process(QListView* datasetListGT,QListView* namesListGT,QListView* readerImpListGT, + QListView* datasetListDetect,QListView* namesListDetect,QListView* readerImpListDetect, + QListView* filterClasses, const std::string& datasetPath, const std::string& namesGTPath, + const std::string& inferencesPath, const std::string& namesPath,bool overWriterPersonClasses, + bool enableMixEvaluation, bool isIouTypeBbox + ); + }; + +} + +#endif //SAMPLERGENERATOR_EVALUATOR_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.cpp new file mode 100644 index 00000000..65712bb6 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.cpp @@ -0,0 +1,89 @@ +// +// Created by frivas on 27/03/17. +// + +#include +#include +#include +#include +#include "Deployer.h" +#include "SamplerGenerationHandler.h" +#include "gui/Appcfg.hpp" + +void +SampleGeneratorHandler::Deployer::process(QListView *deployImpList, QListView *weightsList, QListView *netConfigList, + QListView *inferencerImpList, QListView *inferencerNamesList, + bool* stopButton, double* confidence_threshold, QGroupBox* deployer_params, QGroupBox* camera_params, QGroupBox* inferencer_params, const std::string &weightsPath, const std::string &cfgPath, + const std::string &inferencerNamesPath, const std::string &inputInfo, const std::string &outputFolder) { + + GenericLiveReaderPtr reader; + + try { + + reader = SamplerGenerationHandler::createLiveReaderPtr( inferencerNamesList, + deployImpList, deployer_params, camera_params, inputInfo,inferencerNamesPath); + + } catch(const std::invalid_argument& ex) { + LOG(WARNING)<< "Error Creating Generic Live Reader\nError Message: " << ex.what(); + return; + + } + + std::vector weights; + if (! Utils::getListViewContent(weightsList,weights,weightsPath+ "/")){ + LOG(WARNING)<<"Select the weightsList"; + return; + } + + std::vector netConfiguration; + if (! Utils::getListViewContent(netConfigList,netConfiguration,cfgPath+ "/")){ + LOG(WARNING)<<"Select the netConfiguration"; + return; + } + + std::vector inferencerImp; + if (! Utils::getListViewContent(inferencerImpList,inferencerImp,"")){ + LOG(WARNING)<<"Select the inferencer type"; + return; + } + + std::vector inferencerNames; + if (! Utils::getListViewContent(inferencerNamesList,inferencerNames,inferencerNamesPath + "/")){ + LOG(WARNING)<<"Select the inferencer type"; + return; + } + + std::map* inferencerParamsMap = new std::map(); + try { + if(! Utils::getInferencerParamsContent(inferencer_params, *inferencerParamsMap)) { + inferencerParamsMap = NULL; + } + + } catch(std::exception& ex) { + LOG(WARNING)<< ex.what(); + return; + } + + if (!outputFolder.empty()) { + + auto boostPath= boost::filesystem::path(outputFolder); + if (boost::filesystem::exists(boostPath)){ + boost::filesystem::directory_iterator end_itr; + boost::filesystem::directory_iterator itr(boostPath); + for (; itr != end_itr; ++itr) + { + if (boost::filesystem::is_regular_file(itr->path()) && (itr->path().extension()==".png" || itr->path().extension()==".json") ) { + break; + } + } + if (itr != end_itr) + QMessageBox::warning(deployer_params, QObject::tr("Output Directory isn't Empty"), QObject::tr("Output Director contains png or json files which might be overwritten")); + } + + } + DatasetReaderPtr data_reader=reader->getReader(); + data_reader->SetClassNamesFile(&inferencerNames[0]); + GenericInferencerPtr inferencer(new GenericInferencer(netConfiguration[0],weights[0],inferencerNames[0],inferencerImp[0], inferencerParamsMap)); + MassInferencer massInferencer(data_reader,inferencer->getInferencer(),outputFolder, stopButton, confidence_threshold, true); + massInferencer.process(false); +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.h new file mode 100644 index 00000000..7ec0bdbb --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Label.h @@ -0,0 +1,21 @@ +// +// Created by frivas on 27/03/17. +// + +#ifndef SAMPLERGENERATOR_DEPLOYER_H +#define SAMPLERGENERATOR_DEPLOYER_H + +#include +#include +#include + +namespace SampleGeneratorHandler { + class Deployer { + public: + static void process(QListView *deployImpList,QListView* weightsList, QListView* netConfigList, QListView* inferencerImpList, QListView* inferencerNamesList, + bool* stopButton, double* confidence_threshold, QGroupBox* deployer_params, QGroupBox* camera_params, QGroupBox* inferencer_params, const std::string& weightsPath, const std::string& cfgPath, + const std::string& inferencerNamesPath,const std::string& inputInfo,const std::string& outputFolder); + }; +} + +#endif //SAMPLERGENERATOR_DEPLOYER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.cpp new file mode 100644 index 00000000..8afa1a0a --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.cpp @@ -0,0 +1,102 @@ +// +// Created by frivas on 19/02/17. +// + +#include +#include +#include "SamplerGenerationHandler.h" + +GenericDatasetReaderPtr SampleGeneratorHandler::SamplerGenerationHandler::createDatasetReaderPtr( + const QListView *datasetList, + const QListView *namesList, + const QListView *readerImpList, + const QListView *filterClasses, + const std::string &datasetPath, const std::string &namesPath, const bool imagesRequired) { + std::vector datasetsToShow; + + if (! Utils::getListViewContent(datasetList,datasetsToShow,datasetPath + "/")){ + LOG(WARNING)<<"Select at least one dataset to read"; + return GenericDatasetReaderPtr(); + } + + std::vector names; + if (! Utils::getListViewContent(namesList,names,namesPath+"/")){ + LOG(WARNING)<<"Select the dataset names related to the input dataset"; + return GenericDatasetReaderPtr(); + } + + std::vector readerImplementation; + if (! Utils::getListViewContent(readerImpList,readerImplementation,"")){ + LOG(WARNING)<<"Select the reader implementation"; + return GenericDatasetReaderPtr(); + } + + std::vector classesToFilter; + if (filterClasses) + Utils::getListViewContent(filterClasses,classesToFilter,""); + + + GenericDatasetReaderPtr reader; + if (datasetsToShow.size()>1) { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(datasetsToShow,names[0], readerImplementation[0], imagesRequired)); + } + else { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(datasetsToShow[0],names[0], readerImplementation[0], imagesRequired)); + } + + + if (classesToFilter.size()){ + reader->getReader()->filterSamplesByID(classesToFilter); + } + + return reader; +} + +GenericLiveReaderPtr SampleGeneratorHandler::SamplerGenerationHandler::createLiveReaderPtr(const QListView *namesList, + const QListView *readerImpList, + const QGroupBox *deployer_params, + const QGroupBox *camera_params, + const std::string &infoPath, + const std::string &namesPath) { + + std::vector names; + if (! Utils::getListViewContent(namesList,names,namesPath+"/")){ + LOG(WARNING)<<"Select the dataset names related to the input dataset"; + return GenericLiveReaderPtr(); + } + + std::vector readerImplementation; + if (! Utils::getListViewContent(readerImpList,readerImplementation,"")){ + LOG(WARNING)<<"Select the reader implementation"; + return GenericLiveReaderPtr(); + } + + int cameraID; + if(! Utils::getCameraParamsContent(camera_params, cameraID)) { + LOG(WARNING)<<"Invalid Camera ID passed"; + return GenericLiveReaderPtr(); + } + + std::map* deployer_params_map = new std::map(); + + try { + + if(! Utils::getDeployerParamsContent(deployer_params, *deployer_params_map)) { + deployer_params_map = NULL; + } + + } catch(std::exception& ex) { + LOG(WARNING)<< ex.what(); + return GenericLiveReaderPtr(); + } + + GenericLiveReaderPtr reader; + + reader = GenericLiveReaderPtr( + new GenericLiveReader(infoPath, names[0], readerImplementation[0], deployer_params_map, cameraID)); + + + return reader; +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.h new file mode 100644 index 00000000..1d97761f --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/SamplerGenerationHandler.h @@ -0,0 +1,35 @@ +// +// Created by frivas on 19/02/17. +// + +#ifndef SAMPLERGENERATOR_SAMPLERGENERATIONHANDLER_H +#define SAMPLERGENERATOR_SAMPLERGENERATIONHANDLER_H + +#include +#include +#include +#include +#include + +namespace SampleGeneratorHandler { + + class SamplerGenerationHandler { + public: + static GenericDatasetReaderPtr createDatasetReaderPtr(const QListView *datasetList, const QListView *namesList, + const QListView *readerImpList, + const QListView *filterClasses, + const std::string &datasetPath, + const std::string &namesPath, + const bool imagesRequired); + + static GenericLiveReaderPtr createLiveReaderPtr(const QListView *namesList, + const QListView *readerImpList, + const QGroupBox *deployer_params, + const QGroupBox *camera_params, + const std::string &infoPath, + const std::string &namesPath); + }; + +} + +#endif //SAMPLERGENERATOR_SAMPLERGENERATIONHANDLER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.cpp b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.cpp new file mode 100644 index 00000000..96dd608b --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.cpp @@ -0,0 +1,44 @@ +// +// Created by frivas on 18/02/17. +// + +#include +#include +#include +#include "Viewer.h" +#include "SamplerGenerationHandler.h" +#include +#include + +namespace SampleGeneratorHandler { + + void Viewer::process(QListView* datasetList,QListView* namesList,QListView* readerImpList,QListView* filterClasses, bool showDepth, const std::string& datasetPath, const std::string& namesPath) { + + try { + GenericDatasetReaderPtr reader = SamplerGenerationHandler::createDatasetReaderPtr(datasetList, namesList, + readerImpList, filterClasses, + datasetPath, namesPath, true); + if (!reader){ + return; + } + + std::string windowName="viewer"; + Sample sample; + + std::vector readerImplementation; + Utils::getListViewContent(readerImpList,readerImplementation,""); + + + while (reader->getReader()->getNextSample(sample)){ + LOG(INFO) << "number of elements: " << sample.getRectRegions()->getRegions().size() << std::endl; + + if (!sample.show(readerImplementation[0], windowName, 0, showDepth)) + break; + + } + } catch (std::invalid_argument e) { + LOG(INFO) << "Invalid argument!" << std::endl; + } + } + +} diff --git a/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.h b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.h new file mode 100644 index 00000000..df8cb737 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/SamplerGeneratorHandler/Viewer.h @@ -0,0 +1,18 @@ +// +// Created by frivas on 18/02/17. +// + +#ifndef SAMPLERGENERATOR_VIEWER_H +#define SAMPLERGENERATOR_VIEWER_H + +#include + +namespace SampleGeneratorHandler { + class Viewer { + public: + static void process(QListView* datasetList,QListView* namesList,QListView* readerImpList, QListView* filterClasses, bool showDepth, const std::string& datasetPath, const std::string& namesPath); + }; + +} + +#endif //SAMPLERGENERATOR_VIEWER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.cpp b/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.cpp new file mode 100644 index 00000000..3b6da5dc --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.cpp @@ -0,0 +1,21 @@ +#include "Appcfg.hpp" +#include + +// Classic constructor +Appcfg::Appcfg(int argc, char **argv){ + this->a = new QApplication(argc,argv); + this->w = new appconfig(); + Appcfg::exec(); +} + +// Starts the GUI +void Appcfg::exec(){ + this->w->show(); + this->a->exec(); +} + +/* Returns the YAML node which has information regarding the required parameters + to start the suite */ +YAML::Node Appcfg::get_node(){ + return this->w->return_node(); +} diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.hpp b/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.hpp new file mode 100644 index 00000000..66e0a9e2 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/Appcfg.hpp @@ -0,0 +1,30 @@ +#ifndef APP_CONFIG +#define APP_CONFIG + +// This is just for initialization of things required to start GUI +// For more refer to "appconfig" class +#include "appconfig.h" +#include + +class Appcfg { +public: + // constructor + Appcfg(int argc , char **argv); + // To start the GUI + void exec(); + // Yaml node that stores all the required parameters to start DetectionMetrics + YAML::Node get_node(); + +private: + QApplication *a; + appconfig *w; +}; + +#endif +// int main(int argc, char *argv[]) +// { +// QApplication a(argc, argv); +// MainWindow w; +// w.show(); +// return a.exec(); +// } diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.cpp b/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.cpp new file mode 100644 index 00000000..9d313723 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.cpp @@ -0,0 +1,197 @@ +// +// Created by frivas on 18/02/17. +// + +#include +#include +#include +#include +#include +#include +#include "ListViewConfig.h" + +bool ListViewConfig::configureDatasetInput(QMainWindow* mainWindow, QListView *qlistView, const std::string &path,bool multipleSelection) { + + /* + Check if the paths provided in the config file exists ,else output the + path that does not exist. + */ + if (!boost::filesystem::exists(boost::filesystem::path(path))){ + LOG(WARNING)<< "path: " + path + " does not exist"; + return false; + } + + QStringListModel *model; + model = new QStringListModel(mainWindow); + QStringList List; + + std::vector filesID; + + getPathContentDatasetInput(path,filesID); + + std::vector filteredFilesID; + for (int i = 0; i < filesID.size(); i = i + 1) { + std::size_t found_json = filesID[i].find(".json"); + std::size_t found_txt = filesID[i].find(".txt"); + std::size_t found_xml = filesID[i].find(".xml"); + std::size_t found_csv = filesID[i].find(".csv"); + std::size_t found_file = filesID[i].find("."); + if (found_json != std::string::npos || found_txt != std::string::npos || found_xml != std::string::npos || found_csv != std::string::npos || found_file == std::string::npos) { + filteredFilesID.push_back(filesID[i]); + } + } + filesID = filteredFilesID; + + std::sort(filesID.begin(),filesID.end()); + + for (auto it = filesID.begin(), end = filesID.end(); it != end; ++it){ + std::string::size_type i = it->find(path); + + if (i != std::string::npos) + it->erase(i, path.length()); + + List << QString::fromStdString(*it); + } + + model->setStringList(List); + + qlistView->setModel(model); + if (multipleSelection) + qlistView->setSelectionMode(QAbstractItemView::ExtendedSelection); + + return true; +} + +void ListViewConfig::getPathContentDatasetInput(const std::string &path, std::vector& content) { + + boost::filesystem::directory_iterator end_itr; + boost::filesystem::path boostPath(path); + std::size_t path_last; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) { + std::vector possibleContent; + if (boost::filesystem::is_directory(*itr)){ + //check if yolo (should contain a *.txt + bool isOwnFormat=false; + bool takeParent=true; + boost::filesystem::path boostPath2(itr->path()); + for (boost::filesystem::directory_iterator itr2(boostPath2); itr2!=end_itr; ++itr2) { + if (itr2->path().string().find(".txt") != std::string::npos) { + possibleContent.push_back(itr2->path().string()); + } else if(itr2->path().string().find(".json") != std::string::npos) { + possibleContent.push_back(itr2->path().string()); + } else if(itr2->path().string().find(".csv") != std::string::npos) { + possibleContent.push_back(itr2->path().string()); + } else if(itr2->path().string().find(".xml") != std::string::npos) { + //Only Take Parent and break this will prevent displaying multiple xml files + break; + //possibleContent.push_back(itr2->path().string()); + } else if ((itr2->path().string().find("png") != std::string::npos) || (itr2->path().string().find("json") != std::string::npos)) { + isOwnFormat=true; + takeParent=false; + break; + } else { + takeParent=false; + } + } + if (takeParent) { + possibleContent.push_back(itr->path().string()); + } + + if (possibleContent.size() != 0) { + for (auto it = possibleContent.begin(), end = possibleContent.end(); it != end; ++it){ + content.push_back(*it); + } + } else if (isOwnFormat) { + content.push_back(itr->path().string()); + } else{ + getPathContentDatasetInput(itr->path().string(),content); + } + } + } +} + +bool ListViewConfig::configureInputByFile(QMainWindow *mainWindow, QListView *qlistView, const std::string &path, const std::string &pathIdentifier, bool multipleSelection) { + if (!boost::filesystem::exists(boost::filesystem::path(path))){ + LOG(WARNING) << "path: " + path + " does not exist"; + return false; + } + + QStringListModel *model; + model = new QStringListModel(mainWindow); + QStringList List; + std::vector filesID; + getPathContentOnlyFiles(path,filesID); + + // Filter weights files + if (pathIdentifier == "weightsPath") { + std::vector filteredFilesID; + for (int i = 0; i < filesID.size(); i = i + 1) { + std::size_t found_pb = filesID[i].find(".pb"); + std::size_t found_h5 = filesID[i].find(".h5"); + std::size_t found_pth = filesID[i].find(".pth"); + std::size_t found_weights = filesID[i].find(".weights"); + if (found_pb != std::string::npos || found_h5 != std::string::npos || found_pth != std::string::npos || found_weights != std::string::npos) { + filteredFilesID.push_back(filesID[i]); + } + } + filesID = filteredFilesID; + } + + std::sort(filesID.begin(),filesID.end()); + for (auto it = filesID.begin(), end = filesID.end(); it != end; ++it){ + std::string::size_type i = it->find(path); + + if (i != std::string::npos) + it->erase(i, path.length()); + + List << QString::fromStdString(*it); + } + + model->setStringList(List); + + qlistView->setModel(model); + if (multipleSelection) + qlistView->setSelectionMode(QAbstractItemView::ExtendedSelection); + return true; +} + + +/* + Get all the files(Only) present in a given PATH. +*/ +void ListViewConfig::getPathContentOnlyFiles(const std::string &path, std::vector &content) { + boost::filesystem::directory_iterator end_itr; // An iterator to iterate through directories. + boost::filesystem::path boostPath(path); + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + /* + Check if the current path is a directory, if yes then recursively call + this function until you reach a file. + */ + if (boost::filesystem::is_directory(*itr)) { + getPathContentOnlyFiles(itr->path().string(),content); + } else { + // If not a directory then push the file(path) into "content". + content.push_back(itr->path().string()); + } + } +} + +bool ListViewConfig::configureInputByData(QMainWindow *mainWindow, QListView *qlistView, const std::vector& data,bool multipleSelection) { + QStringListModel *model; + model = new QStringListModel(mainWindow); + QStringList List; + + for (auto it = data.begin(), end = data.end(); it != end; ++it){ + List << QString::fromStdString(*it); + } + + model->setStringList(List); + + qlistView->setModel(model); + if (multipleSelection) + qlistView->setSelectionMode(QAbstractItemView::ExtendedSelection); + return true; +} diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.h b/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.h new file mode 100644 index 00000000..17653b7e --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/ListViewConfig.h @@ -0,0 +1,27 @@ +// +// Created by frivas on 18/02/17. +// + +#ifndef SAMPLERGENERATOR_LISTVIEWCONFIG_H +#define SAMPLERGENERATOR_LISTVIEWCONFIG_H + +#include +#include +#include + + +class ListViewConfig { +public: + static bool configureDatasetInput(QMainWindow* mainWindow, QListView* qlistView, const std::string& path, bool multipleSelection); + static bool configureInputByFile(QMainWindow* mainWindow, QListView* qlistView, const std::string& path, const std::string& pathIdentifier, bool multipleSelection); + static bool configureInputByData(QMainWindow* mainWindow, QListView* qlistView, const std::vector& data,bool multipleSelection); + + + +private: + static void getPathContentDatasetInput(const std::string& path, std::vector& content); + static void getPathContentOnlyFiles(const std::string& path, std::vector& content); + +}; + +#endif //SAMPLERGENERATOR_LISTVIEWCONFIG_H diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.cpp b/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.cpp new file mode 100644 index 00000000..ba7888fa --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.cpp @@ -0,0 +1,30 @@ +// +// Created by frivas on 19/02/17. +// + +#include +#include +#include "TabHandler.h" +#include "ListViewConfig.h" + +// Contructor function(will be called whenever this object is created). +TabHandler::TabHandler() { + fillContexts(); +} + + +// Add "viewer" && "converter" to "contexts". +void TabHandler::fillContexts() { + this->contexts.push_back("viewer"); + this->contexts.push_back("converter"); +} + +// Get the context of the handler provided index. +std::string TabHandler::getStringContext(int index) { + return this->contexts[index]; +} + +// To get all the elements present in the "contexts" +std::vector TabHandler::getAllContexts() { + return this->contexts; +} diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.h b/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.h new file mode 100644 index 00000000..ae00f186 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/TabHandler.h @@ -0,0 +1,33 @@ +// +// Created by frivas on 19/02/17. +// + +#ifndef SAMPLERGENERATOR_TABHANDLER_H +#define SAMPLERGENERATOR_TABHANDLER_H + + +#include +#include +#include +#include + +class TabHandler { +public: + // Constructor function. + TabHandler(); + // Get the context provided index. + std::string getStringContext(int index); + // Get entire an entire vector of elements present in context. + std::vector getAllContexts(); + +private: + // A vector of strings to store different elements like "viewer","converter",etc. + std::vector contexts; + // Fill "contexts" with certain elements. + void fillContexts(); +}; + + +typedef boost::shared_ptr TabHandlerPtr; + +#endif //SAMPLERGENERATOR_TABHANDLER_H diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/Utils.cpp b/DetectionMetrics/DatasetEvaluationApp/gui/Utils.cpp new file mode 100644 index 00000000..1fa117b9 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/Utils.cpp @@ -0,0 +1,92 @@ +// +// Created by frivas on 19/02/17. +// + +#include "Utils.h" +#include +bool Utils::getListViewContent(const QListView *list, std::vector &content, const std::string &prefix) { + content.clear(); + + if (list->model() == 0) { + return false; + } + + + QModelIndexList selectedList =list->selectionModel()->selectedIndexes(); + for (auto it = selectedList.begin(), end = selectedList.end(); it != end; ++it){ + content.push_back(prefix + it->data().toString().toStdString()); + } + + return content.size() != 0; +} + +bool Utils::getDeployerParamsContent(const QGroupBox* deployer_params, std::map& deployer_params_map) { + deployer_params_map.clear(); + + if (!deployer_params->isEnabled()) + return false; + + + QList allLineEdits = deployer_params->findChildren(); + + QList::iterator i; + for (i = allLineEdits.begin(); i != allLineEdits.end(); ++i) { + if ((*i)->text().toStdString().empty()) + throw std::invalid_argument("Please Enter All the Parameters"); + } + + deployer_params_map["Server"] = deployer_params->findChild("radioButton_deployer_ros")->isChecked() ? "ROS" : "Ice"; + deployer_params_map["Proxy"] = deployer_params->findChild("lineEdit_deployer_proxy")->text().toStdString(); + deployer_params_map["Format"] = deployer_params->findChild("lineEdit_deployer_format")->text().toStdString(); + deployer_params_map["Topic"] = deployer_params->findChild("lineEdit_deployer_topic")->text().toStdString(); + deployer_params_map["Name"] = deployer_params->findChild("lineEdit_deployer_name")->text().toStdString(); + + + return true; + +} + +bool Utils::getInferencerParamsContent(const QGroupBox* inferencer_params, std::map& inferencer_params_map) { + + inferencer_params_map.clear(); + + if (!inferencer_params->isEnabled()) + return false; + + std::string prefix = inferencer_params->objectName().toStdString(); + size_t pos = prefix.find_first_of("_"); + prefix = prefix.substr(0, pos); + + QList allLineEdits = inferencer_params->findChildren(); + + QList::iterator i; + for (i = allLineEdits.begin(); i != allLineEdits.end(); ++i) { + if ((*i)->text().toStdString().empty()) + throw std::invalid_argument("Please Enter All the Parameters"); + } + + + //inferencer_params_map["conf_thresh"] = inferencer_params->findChild((prefix + "_lineEdit_confidence_thresh").c_str())->text().toStdString(); + inferencer_params_map["scaling_factor"] = inferencer_params->findChild((prefix + "_lineEdit_inferencer_scaling_factor").c_str())->text().toStdString(); + inferencer_params_map["inpWidth"] = inferencer_params->findChild((prefix + "_lineEdit_inferencer_input_width").c_str())->text().toStdString(); + inferencer_params_map["inpHeight"] = inferencer_params->findChild((prefix + "_lineEdit_inferencer_input_height").c_str())->text().toStdString(); + inferencer_params_map["mean_sub_blue"] = inferencer_params->findChild((prefix + "_lineEdit_mean_sub_blue").c_str())->text().toStdString(); + inferencer_params_map["mean_sub_green"] = inferencer_params->findChild((prefix + "_lineEdit_mean_sub_green").c_str())->text().toStdString(); + inferencer_params_map["mean_sub_red"] = inferencer_params->findChild((prefix + "_lineEdit_mean_sub_red").c_str())->text().toStdString(); + inferencer_params_map["useRGB"] = inferencer_params->findChild((prefix + "_checkBox_use_rgb").c_str())->isChecked() ? "true" : "false"; + + return true; + +} + +bool Utils::getCameraParamsContent(const QGroupBox* camera_params, int& cameraID) { + + cameraID = camera_params->findChild("deployer_camera_spinBox")->value(); + LOG(INFO) << cameraID << '\n'; + if (cameraID < -1) { + return false; + } + + return true; + +} diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/Utils.h b/DetectionMetrics/DatasetEvaluationApp/gui/Utils.h new file mode 100644 index 00000000..37b87942 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/Utils.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 19/02/17. +// + +#ifndef SAMPLERGENERATOR_UTILS_H +#define SAMPLERGENERATOR_UTILS_H + + +#include +#include +#include +#include +#include +#include + +class Utils { +public: + static bool getListViewContent(const QListView* list,std::vector& content ,const std::string& prefix); + static bool getDeployerParamsContent(const QGroupBox* deployer_params, std::map& deployer_params_map); + static bool getInferencerParamsContent(const QGroupBox* inferencer_params, std::map& inferencer_params_map); + static bool getCameraParamsContent(const QGroupBox* camera_params, int& cameraID); +}; + + +#endif //SAMPLERGENERATOR_UTILS_H diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.cpp b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.cpp new file mode 100644 index 00000000..6dbe2831 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.cpp @@ -0,0 +1,101 @@ +#include "appconfig.h" +#include "ui_appconfig.h" +#include +#include +#include + +// Constructor to initialize and configure all the buttons +appconfig::appconfig(QWidget *parent) : QMainWindow(parent), ui(new Ui::appconfig){ + ui->setupUi(this); + this->node; + // Connect all the buttons to listen to certain actions and + // trigger callback functions if action performed. + connect(ui->toolButton_weights, SIGNAL (clicked()),this, SLOT (handleToolbuttonWeights())); + connect(ui->toolButton_eval, SIGNAL (clicked()),this, SLOT (handleToolbuttonEval())); + connect(ui->toolButton_cfg, SIGNAL (clicked()),this, SLOT (handleToolbuttonCfg())); + connect(ui->toolButton_appconfig, SIGNAL (clicked()),this, SLOT (handleToolbuttonAppconfig())); + connect(ui->toolButton_names, SIGNAL (clicked()),this, SLOT (handleToolbuttonNames())); + connect(ui->pushButton_ok, SIGNAL (clicked()),this, SLOT (handlePushbuttonOK())); + connect(ui->checkBox, SIGNAL (clicked()),this, SLOT (handleCheckbox())); + this->node["datasetPath"]="~/"; +} + +// To select an config file +void appconfig::handleToolbuttonAppconfig(){ + QString dir_name = QFileDialog::getOpenFileName(this,"Select config file","~/"); + ui->lineEdit_appconfig->setText(dir_name); + this->node["appconfig"]=dir_name.toUtf8().constData(); +} + +// To select weightsPath +void appconfig::handleToolbuttonWeights(){ + QString dir_name = QFileDialog::getExistingDirectory(this,"Open a dir","~/"); + ui->lineEdit_weights->setText(dir_name); + this->node["weightsPath"]=dir_name.toUtf8().constData(); +} + +// To select config path +void appconfig::handleToolbuttonCfg(){ + QString dir_name = QFileDialog::getExistingDirectory(this,"Open a dir","~/"); + ui->lineEdit_cfg->setText(dir_name); + this->node["netCfgPath"]=dir_name.toUtf8().constData(); +} + +// To select NamesDir path +void appconfig::handleToolbuttonNames(){ + QString dir_name = QFileDialog::getExistingDirectory(this,"Open a dir","~/"); + ui->lineEdit_names->setText(dir_name); + this->node["namesPath"]=dir_name.toUtf8().constData(); +} + +// To select evalutaion path +void appconfig::handleToolbuttonEval(){ + QString dir_name = QFileDialog::getExistingDirectory(this,"Open a dir","~/"); + ui->lineEdit_eval->setText(dir_name); + this->node["evaluationsPath"]=dir_name.toUtf8().constData(); +} + +// Function to proceed forward if all the required parameters are passed +void appconfig::handlePushbuttonOK(){ + // Pop an error message if not all the parameters/or a config file is passed + if(!ui->checkBox->isChecked() && !ui->lineEdit_appconfig->text().size()){ + QMessageBox::warning(this,"AppConfig","Please select the AppConfig file or " + "provide the below required parameters individually "); + return; + } + // Similar check as above + if(ui->checkBox->isChecked()){ + if( !ui->lineEdit_cfg->text().size() || ! ui->lineEdit_names->text().size() || + !ui->lineEdit_eval->text().size() || ! ui->lineEdit_weights->text().size() ){ + QMessageBox::warning(this,"AppConfig","Please provide the required parameters to proceed"); + return; + } + } + + // If everything runs smoothly exit + QApplication::quit(); + QCoreApplication::quit(); + // return ; +} + +// Return a Yaml node +YAML::Node appconfig::return_node(){ + return this->node; +} + +// Graying out not required parameters +void appconfig::handleCheckbox(){ + ui->lineEdit_weights->setDisabled(!ui->checkBox->isChecked()); + ui->lineEdit_names->setDisabled(!ui->checkBox->isChecked()); + ui->lineEdit_eval->setDisabled(!ui->checkBox->isChecked()); + ui->lineEdit_cfg->setDisabled(!ui->checkBox->isChecked()); + ui->toolButton_weights->setDisabled(!ui->checkBox->isChecked()); + ui->toolButton_names->setDisabled(!ui->checkBox->isChecked()); + ui->toolButton_eval->setDisabled(!ui->checkBox->isChecked()); + ui->toolButton_cfg->setDisabled(!ui->checkBox->isChecked()); +} + +// Destructor +appconfig::~appconfig(){ + delete ui; +} diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.h b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.h new file mode 100644 index 00000000..4b55ace5 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.h @@ -0,0 +1,36 @@ +#ifndef APPCONFIG_H +#define APPCONFIG_H + +// This is the backend to select the required parameters graphically + +#include +#include + +namespace Ui { +class appconfig; +} + +class appconfig : public QMainWindow +{ + Q_OBJECT + +public: + explicit appconfig(QWidget *parent = 0); // Constructor + ~appconfig(); // Destructor + YAML::Node return_node(); // Returns YAML node +private slots: + // Callback functions to handle different buttons + void handleToolbuttonWeights(); + void handleToolbuttonNames(); + void handleToolbuttonCfg(); + void handleToolbuttonAppconfig(); + void handleToolbuttonEval(); + void handleCheckbox(); + void handlePushbuttonOK(); + +private: + Ui::appconfig *ui; + YAML::Node node; +}; + +#endif // APPCONFIG_H diff --git a/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.ui b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.ui new file mode 100644 index 00000000..fcd2785b --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/gui/appconfig.ui @@ -0,0 +1,237 @@ + + + appconfig + + + + 0 + 0 + 439 + 333 + + + + appconfig + + + + + + 230 + 230 + 178 + 29 + + + + + + + OK + + + + + + + + + 130 + 40 + 161 + 22 + + + + Select Indivudually + + + false + + + + + + 20 + 70 + 301 + 141 + + + + + + + + + weightsPath + + + + + + + evaluationsPath + + + + + + + namesPath + + + + + + + netCfgPath + + + + + + + + + + + + + false + + + + + + + false + + + ... + + + + + + + + + + + false + + + + + + + false + + + ... + + + + + + + + + + + false + + + + + + + false + + + ... + + + + + + + + + + + false + + + + + + + false + + + ... + + + + + + + + + + + + + 20 + 10 + 301 + 29 + + + + + + + Config file + + + + + + + + + + ... + + + + + + + + + + 0 + 0 + 439 + 25 + + + + + + TopToolBarArea + + + false + + + + + + + + diff --git a/DetectionMetrics/DatasetEvaluationApp/main.cpp b/DetectionMetrics/DatasetEvaluationApp/main.cpp new file mode 100644 index 00000000..74392c0b --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/main.cpp @@ -0,0 +1,68 @@ +#include "mainwindow.h" +#include +#include +#include +#include "gui/Appcfg.hpp" + +class MyApp:public SampleGenerationApp{ +public: + // Constructor Functions were written this form to avoid segmentation fault + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("datasetPath"); + this->requiredArguments.push_back("evaluationsPath"); + this->requiredArguments.push_back("weightsPath"); + this->requiredArguments.push_back("netCfgPath"); + this->requiredArguments.push_back("namesPath"); + }; + MyApp(YAML::Node node):SampleGenerationApp(node){ + this->requiredArguments.push_back("datasetPath"); + this->requiredArguments.push_back("evaluationsPath"); + this->requiredArguments.push_back("weightsPath"); + this->requiredArguments.push_back("netCfgPath"); + this->requiredArguments.push_back("namesPath"); + }; + MyApp(std::string filepath, bool isPath):SampleGenerationApp(filepath,isPath){ + this->requiredArguments.push_back("datasetPath"); + this->requiredArguments.push_back("evaluationsPath"); + this->requiredArguments.push_back("weightsPath"); + this->requiredArguments.push_back("netCfgPath"); + this->requiredArguments.push_back("namesPath"); + }; + void operator()(){ + QApplication a(argc, argv); + MainWindow w(this); + w.show(); + a.exec(); + + }; +}; + + + + +int main(int argc, char *argv[]){ + // Check how many arguments are passed + + if(argc<3){ + // If less than 3 , then pop up the gui. + Appcfg app(argc,argv); + YAML::Node noder = app.get_node(); + // Check if appconfig is passed + if(noder["appconfig"]){ + // If yes, convert that to a string and run Detection Metrics + MyApp myApp(noder["appconfig"].as(),true); + myApp.process(); + } + else{ + // Else pass that YAML node directly which requires no further checks by + // SampleGenerationApp. + MyApp myApp(noder); + myApp.process(); + } + } + else{ + // If a config file is passed , rest is handled by SampleGenerationApp + MyApp myApp(argc,argv); + myApp.process(); + } +} diff --git a/DetectionMetrics/DatasetEvaluationApp/mainwindow.cpp b/DetectionMetrics/DatasetEvaluationApp/mainwindow.cpp new file mode 100644 index 00000000..b4ac3e05 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/mainwindow.cpp @@ -0,0 +1,517 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mainwindow.h" +#include "ui_mainwindow.h" +// #include "gui/Appcfg.hpp" + +MainWindow::MainWindow(SampleGenerationApp* app,QWidget *parent) : + app(app), + QMainWindow(parent), + ui(new Ui::MainWindow) +{ + ui->setupUi(this); + + setupTabsInformation(); + + + + + connect(ui->pushButton, SIGNAL (released()),this, SLOT (handleViewButton())); + connect(ui->tabWidget, SIGNAL(currentChanged(int)), this, SLOT(setupTabsInformation())); + connect(ui->pushButton_converter_output, SIGNAL (released()),this, SLOT (handleSelectOutputFolderButton())); + connect(ui->pushButton_detector_output, SIGNAL (released()),this, SLOT (handleSelectOutputFolderButtonDetector())); + connect(ui->pushButton_convert, SIGNAL (released()),this, SLOT (handleConvertButton())); + connect(ui->pushButton_evaluate, SIGNAL (released()),this, SLOT (handleEvaluateButton())); + connect(ui->pushButton_detect, SIGNAL (released()),this, SLOT (handleDetectButton())); + connect(ui->pushButton_deploy_input, SIGNAL (released()),this, SLOT (handleSelectDeployInputSource())); + connect(ui->pushButton_deploy_process, SIGNAL (released()),this, SLOT (handleProcessDeploy())); + connect(ui->checkBox_deployer_saveOutput, SIGNAL (released()), this, SLOT( handleDeployerSaveOutputCheckboxChange())); + // connect(ui->checkBox_deployer_saveOutput, SIGNAL (released()), this, SLOT( handleDeployerSaveOutputCheckboxChange())); + connect(ui->pushButton_stop_deployer_process, SIGNAL(released()), this, SLOT(handleDeployerStop())); + connect(ui->pushButton_deployer_output_folder, SIGNAL(released()), this, SLOT(handleSelectOutputFolderButtonDeployer())); + connect(ui->deployer_conf_horizontalSlider, SIGNAL(valueChanged(int)), this, SLOT(handleDeployerConfidenceSliderChange(int))); + connect(ui->deployer_confidence_lineEdit, SIGNAL(textEdited(QString)), this, SLOT(handleDeployerConfidenceLineEditChange(QString))); + + +} + +MainWindow::~MainWindow() +{ + delete ui; +} + +void MainWindow::handleViewButton() { + SampleGeneratorHandler::Viewer::process(ui->listView_viewer_dataset,ui->listView_viewer_names,ui->listView_viewer_reader_imp, + ui->listView_viewer_classFilter, ui->checkBox_evaluator_show_depth->isChecked(), app->getConfig().asString("datasetPath"), app->getConfig().asString("namesPath")); +} + +void MainWindow::handleSelectionNamesChanged() { + std::string classNameFilePath; + LOG(INFO) << ui->tabWidget->currentIndex() << std::endl; + switch(ui->tabWidget->currentIndex()) { + case 0: { + std::vector dataSelected; + Utils::getListViewContent(ui->listView_viewer_names,dataSelected,app->getConfig().asString("namesPath") + "/"); + ClassTypeGeneric typeConverter(dataSelected[0]); + ListViewConfig::configureInputByData(this, ui->listView_viewer_classFilter, + typeConverter.getAllAvailableClasses(), true); + } + break; + case 1: { + std::vector dataSelected; + Utils::getListViewContent(ui->listView_converter_names,dataSelected,app->getConfig().asString("namesPath") + "/"); + ClassTypeGeneric typeConverter(dataSelected[0]); + ListViewConfig::configureInputByData(this, ui->listView_converter_classFilter, + typeConverter.getAllAvailableClasses(), true); + } + break; + case 3: { + std::vector dataSelected; + Utils::getListViewContent(ui->listView_evaluator_detection_names, dataSelected, + app->getConfig().asString("namesPath") + "/"); + ClassTypeGeneric typeConverter(dataSelected[0]); + ListViewConfig::configureInputByData(this, ui->listView_evaluator_classFilter, + typeConverter.getAllAvailableClasses(), true); + break; + } + case 4:{ + + break; + } + default: + LOG(WARNING) << "Unkown tab index"; + } +} + +void MainWindow::handleMappingCheckBoxChange() { + if(ui->checkBox_use_writernames->isChecked()) { + ui->listView_converter_writer_names->setEnabled(true); + } else { + ui->listView_converter_writer_names->setEnabled(false); + } +} + +void MainWindow::setupTabsInformation() { + switch(ui->tabWidget->currentIndex()) { + case 0: + ListViewConfig::configureDatasetInput(this, ui->listView_viewer_dataset, + app->getConfig().asString("datasetPath"), true); + ListViewConfig::configureInputByFile(this, ui->listView_viewer_names, + app->getConfig().asString("namesPath"), "namesPath", false); + ListViewConfig::configureInputByData(this, ui->listView_viewer_reader_imp, + GenericDatasetReader::getAvailableImplementations(), false); + connect(ui->listView_viewer_names->selectionModel(), SIGNAL(selectionChanged(QItemSelection,QItemSelection)), this, SLOT(handleSelectionNamesChanged())); + + break; + case 1: + ListViewConfig::configureDatasetInput(this, ui->listView_converter_dataset, + app->getConfig().asString("datasetPath"), true); + ListViewConfig::configureInputByFile(this, ui->listView_converter_names, + app->getConfig().asString("namesPath"), "namesPath", false); + ListViewConfig::configureInputByFile(this, ui->listView_converter_writer_names, + app->getConfig().asString("namesPath"), "namesPath", false); + ui->listView_converter_writer_names->setEnabled(false); + ListViewConfig::configureInputByData(this, ui->listView_converter_reader_imp, + GenericDatasetReader::getAvailableImplementations(), false); + ListViewConfig::configureInputByData(this, ui->listView_converter_outImp, + GenericDatasetWriter::getAvailableImplementations(), false); + + connect(ui->listView_converter_names->selectionModel(), SIGNAL(selectionChanged(QItemSelection,QItemSelection)), this, SLOT(handleSelectionNamesChanged())); + connect(ui->checkBox_use_writernames, SIGNAL(clicked(bool)), this, SLOT(handleMappingCheckBoxChange())); + break; + case 2: + ListViewConfig::configureDatasetInput(this, ui->listView_detector_dataset, + app->getConfig().asString("datasetPath"), true); + ListViewConfig::configureInputByFile(this, ui->listView_detector_names, + app->getConfig().asString("namesPath"), "namesPath",false); + ListViewConfig::configureInputByData(this, ui->listView_detector_reader_imp, + GenericDatasetReader::getAvailableImplementations(), false); + ListViewConfig::configureInputByFile(this, ui->listView_detector_weights, + app->getConfig().asString("weightsPath"), "weightsPath", false); + ListViewConfig::configureInputByFile(this, ui->listView_detector_net_config, + app->getConfig().asString("netCfgPath"), "netCfgPath", false); + ListViewConfig::configureInputByData(this, ui->listView_detector_imp, + GenericInferencer::getAvailableImplementations(), false); + ListViewConfig::configureInputByFile(this, ui->listView_detector_names_inferencer, + app->getConfig().asString("namesPath"), "namesPath", false); + + + ui->detector_groupBox_inferencer_params->setEnabled(false); + + connect(ui->listView_detector_imp->selectionModel(),SIGNAL(currentRowChanged(QModelIndex,QModelIndex)), this,SLOT(handleDetectorInferencerImpListViewChange(QModelIndex, QModelIndex))); + + + + break; + case 3: + try { + ListViewConfig::configureDatasetInput(this, ui->listView_evaluator_gt_dataset, + app->getConfig().asString("datasetPath"), true); + ListViewConfig::configureInputByFile(this, ui->listView_evaluator_gt_names, + app->getConfig().asString("namesPath"), "namesPath", false); + ListViewConfig::configureInputByData(this, ui->listView_evaluator_gt_imp, + GenericDatasetReader::getAvailableImplementations(), false); + ListViewConfig::configureDatasetInput(this, ui->listView_evaluator_dectection_dataset, + app->getConfig().asString("inferencesPath"), true); + ListViewConfig::configureInputByFile(this, ui->listView_evaluator_detection_names, + app->getConfig().asString("namesPath"), "namesPath", false); + ListViewConfig::configureInputByData(this, ui->listView_evaluator_detection_imp, + GenericDatasetReader::getAvailableImplementations(), false); + connect(ui->listView_evaluator_detection_names->selectionModel(), SIGNAL(selectionChanged(QItemSelection,QItemSelection)), this, SLOT(handleSelectionNamesChanged())); + break; + } catch (const std::exception ex) { + LOG(WARNING)<< "Error starting Evaluator. Is your config file completed? " << ex.what(); + break; + } + + case 4: + ListViewConfig::configureInputByFile(this, ui->listView_deploy_weights, + app->getConfig().asString("weightsPath"), "weightsPath", false); + ListViewConfig::configureInputByFile(this, ui->listView_deploy_net_config, + app->getConfig().asString("netCfgPath"), "netCfgPath", false); + ListViewConfig::configureInputByData(this, ui->listView_deploy_impl, + GenericInferencer::getAvailableImplementations(), false); + ListViewConfig::configureInputByFile(this, ui->listView_deploy_names_inferencer, + app->getConfig().asString("namesPath"), "namesPath", false); + ListViewConfig::configureInputByData(this, ui->listView_deploy_input_imp, + GenericLiveReader::getAvailableImplementations(), false); + + + ui->deployer_param_groupBox->setEnabled(false); + ui->groupBox_config_option->setEnabled(false); + ui->deployer_radioButton_manual->setChecked(true); + + #ifdef ICE + ui->radioButton_deployer_ice->setChecked(true); + #else + ui->radioButton_deployer_ice->setEnabled(false); + #endif + #ifdef JDERROS + ui->radioButton_deployer_ros->setChecked(true); + #else + ui->radioButton_deployer_ros->setEnabled(false); + #endif + + ui->deployer_groupBox_inferencer_params->setEnabled(false); + ui->deployer_cameraID_groupBox->setEnabled(false); + + connect(ui->listView_deploy_input_imp->selectionModel(),SIGNAL(currentRowChanged(QModelIndex,QModelIndex)), this,SLOT(handleDeployerImpListViewChange(QModelIndex, QModelIndex))); + //connect(ui->groupBox_config_file, SIGNAL(toggled(bool)), this, SLOT(handleDeployerConfigFileOptionChange(bool))); + connect(ui->deployer_radioButton_manual, SIGNAL(toggled(bool)), this, SLOT(handleDeployerConfigFileOptionChange(bool))); + connect(ui->listView_deploy_impl->selectionModel(),SIGNAL(currentRowChanged(QModelIndex,QModelIndex)), this,SLOT(handleDeployerInferencerImpListViewChange(QModelIndex, QModelIndex))); + + + + break; + default: + LOG(WARNING) << "Unkown tab index"; + } +} + +void MainWindow::handleSelectOutputFolderButton() { + QFileDialog *fd = new QFileDialog; + QTreeView *tree = fd->findChild (); +#ifndef __APPLE__ + //tree->setRootIsDecorated(true); + //tree->setItemsExpandable(false); +#endif + fd->setFileMode(QFileDialog::Directory); + fd->setOption(QFileDialog::ShowDirsOnly); + fd->setViewMode(QFileDialog::Detail); + int result = fd->exec(); + QString directory; + if (result) + { + directory = fd->selectedFiles()[0]; + this->ui->textEdit_converterOutPath->setText(directory); + } +} + +void MainWindow::handleConvertButton() { + double ratio; + ratio = this->ui->textEdit_converter_trainRatio->toPlainText().toDouble(); + std::string outputPath = this->ui->textEdit_converterOutPath->toPlainText().toStdString(); + bool splitActive = this->ui->checkBox_splitActive->isChecked(); + bool writeImages = this->ui->checkBox_converter_write_images->isChecked(); + + try { + SampleGeneratorHandler::Converter::process(ui->listView_converter_dataset, ui->listView_converter_names, + ui->listView_converter_reader_imp, + ui->listView_converter_classFilter, + ui->listView_converter_outImp, + ui->listView_converter_writer_names, + ui->checkBox_use_writernames->isChecked(), + app->getConfig().asString("datasetPath"), + app->getConfig().asString("namesPath"), outputPath, + splitActive, ratio, writeImages); + } + catch (const std::string& msg){ + LOG(ERROR) << "Exception detected: " << msg << std::endl; + } + catch (const std::exception &exc) + { + LOG(ERROR) << "Exception Detected: " << exc.what(); + } + catch (...){ + LOG(ERROR) << "Uknown exception type" << std::endl; + } +} + +void MainWindow::handleEvaluateButton() { + try{ + SampleGeneratorHandler::Evaluator::process(ui->listView_evaluator_gt_dataset,ui->listView_evaluator_gt_names,ui->listView_evaluator_gt_imp, + ui->listView_evaluator_dectection_dataset,ui->listView_evaluator_detection_names, ui->listView_evaluator_detection_imp, + ui->listView_evaluator_classFilter,app->getConfig().asString("datasetPath"),app->getConfig().asString("namesPath"), + app->getConfig().asString("inferencesPath"),app->getConfig().asString("namesPath"),ui->checkBox_evaluator_merge->isChecked(), + ui->checkBox_evaluator_mix->isChecked(), ui->radioButton_evaluator_iou_bbox->isChecked()); + } + catch (const std::string& msg){ + LOG(ERROR) << "Exception detected: " << msg; + } + catch (const std::exception &exc) + { + LOG(ERROR) << "Exeption Detected: " << exc.what(); + } + catch (...){ + LOG(ERROR) << "Uknown Exception"; + } +} + +void MainWindow::handleDetectButton() { + std::string outputPath = this->ui->textEdit_detectorOutPath->toPlainText().toStdString(); + bool useDepth = this->ui->checkBox_detector_useDepth->isChecked(); + bool singleEvaluation = this->ui->checkBox_detector_single->isChecked(); + QGroupBox* inferencer_params = this->ui->detector_groupBox_inferencer_params; + + + try{ + SampleGeneratorHandler::Detector::process(ui->listView_detector_dataset, ui->listView_detector_names,ui->listView_detector_reader_imp,app->getConfig().asString("datasetPath"), + ui->listView_detector_weights,ui->listView_detector_net_config,ui->listView_detector_imp,ui->listView_detector_names_inferencer, + inferencer_params, app->getConfig().asString("weightsPath"),app->getConfig().asString("netCfgPath"),outputPath,app->getConfig().asString("namesPath"), + useDepth,singleEvaluation); + } + catch (const std::string& msg){ + LOG(ERROR) << "Exception Detected: " << msg; + } + catch (const std::exception &exc) + { + LOG(ERROR) << "Exeption Detected: " << exc.what(); + } + catch (...){ + LOG(ERROR) << "Uknown exectip Type"; + } +} + +void MainWindow::handleSelectOutputFolderButtonDetector() { + QFileDialog *fd = new QFileDialog; + QTreeView *tree = fd->findChild (); +#ifndef __APPLE__ + //tree->setRootIsDecorated(true); + //tree->setItemsExpandable(false); +#endif + fd->setFileMode(QFileDialog::Directory); + fd->setOption(QFileDialog::ShowDirsOnly); + fd->setViewMode(QFileDialog::Detail); + int result = fd->exec(); + QString directory; + if (result) + { + directory = fd->selectedFiles()[0]; + this->ui->textEdit_detectorOutPath->setText(directory); + } +} + +void MainWindow::handleSelectDeployInputSource() { + QFileDialog *fd = new QFileDialog; + QTreeView *tree = fd->findChild (); +#ifndef __APPLE__ + //tree->setRootIsDecorated(true); + //tree->setItemsExpandable(false); +#endif + fd->setFileMode(QFileDialog::AnyFile); +// fd->setOption(QFileDialog::Show); + fd->setViewMode(QFileDialog::Detail); + int result = fd->exec(); + QString directory; + if (result) + { + directory = fd->selectedFiles()[0]; + this->ui->textEdit_deployInputPath->setText(directory); + } +} + +void MainWindow::handleSelectOutputFolderButtonDeployer() { + QFileDialog *fd = new QFileDialog; + QTreeView *tree = fd->findChild (); +#ifndef __APPLE__ + //tree->setRootIsDecorated(true); + //tree->setItemsExpandable(false); +#endif + fd->setFileMode(QFileDialog::Directory); + //fd->setOption(QFileDialog::ShowDirsOnly); + fd->setViewMode(QFileDialog::Detail); + int result = fd->exec(); + QString directory; + if (result) + { + directory = fd->selectedFiles()[0]; + this->ui->textEdit_deployerOutputPath->setText(directory); + } +} + +void MainWindow::handleDeployerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected) { + if (selected.data().toString() == "stream") { + ui->deployer_param_groupBox->setEnabled(true); + ui->groupBox_config_option->setEnabled(true); + ui->deployer_cameraID_groupBox->setEnabled(false); + handleDeployerConfigFileOptionChange(ui->deployer_radioButton_manual->isChecked()); + } else if (selected.data().toString() == "camera") { + ui->textEdit_deployInputPath->setEnabled(false); + ui->pushButton_deploy_input->setEnabled(false); + ui->deployer_param_groupBox->setEnabled(false); + ui->groupBox_config_option->setEnabled(false); + ui->deployer_cameraID_groupBox->setEnabled(true); + } + else { + ui->textEdit_deployInputPath->setEnabled(true); + ui->pushButton_deploy_input->setEnabled(true); + ui->deployer_param_groupBox->setEnabled(false); + ui->groupBox_config_option->setEnabled(false); + ui->deployer_cameraID_groupBox->setEnabled(false); + } +} + +void MainWindow::handleDeployerConfigFileOptionChange(bool checked) { + if(checked){ + ui->textEdit_deployInputPath->setEnabled(false); + ui->pushButton_deploy_input->setEnabled(false); + ui->deployer_param_groupBox->setEnabled(true); + } else { + ui->textEdit_deployInputPath->setEnabled(true); + ui->pushButton_deploy_input->setEnabled(true); + ui->deployer_param_groupBox->setEnabled(false); + } +} + +void MainWindow::handleDeployerInferencerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected) { + if (selected.data().toString() == "caffe") { + ui->deployer_groupBox_inferencer_params->setEnabled(true); + } else { + ui->deployer_groupBox_inferencer_params->setEnabled(false); + } +} + +void MainWindow::handleDetectorInferencerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected) { + if (selected.data().toString() == "caffe") { + ui->detector_groupBox_inferencer_params->setEnabled(true); + } else { + ui->detector_groupBox_inferencer_params->setEnabled(false); + } +} + +void MainWindow::handleDeployerConfidenceLineEditChange(const QString& confidence) { + + + std::string conf_val = confidence.toStdString(); + + //std::cout << conf_val << '\n'; + double val; + + try { + + val = std::stod(confidence.toStdString()); + } catch (...) { + + bool oldState = this->ui->deployer_conf_horizontalSlider->blockSignals(true); + this->ui->deployer_conf_horizontalSlider->setValue(0); + this->ui->deployer_conf_horizontalSlider->blockSignals(oldState); + return; + } + //std::cout << val << '\n'; + if (val > 1.0) { + QMessageBox::warning(this, QObject::tr("Confidence Threshold out of Bounds"), QObject::tr("Confidence Threshold can't be greater than 1.0, setting Threshold to 0.2")); + val = 1.0; + this->ui->deployer_confidence_lineEdit->setText(QString("0.2")); + } + if ( val < 0.0) { + QMessageBox::warning(this, QObject::tr("Confidence Threshold out of Bounds"), QObject::tr("Confidence Threshold can't be smaller than 0, setting Threshold to 0.2")); + val = 0; + this->ui->deployer_confidence_lineEdit->setText(QString("0.2")); + } + bool oldState = this->ui->deployer_conf_horizontalSlider->blockSignals(true); + this->ui->deployer_conf_horizontalSlider->setValue((int)(val*100)); + this->ui->deployer_conf_horizontalSlider->blockSignals(oldState); + this->confidence_threshold = val; +} + +void MainWindow::handleDeployerConfidenceSliderChange(const int& confidence) { + + std::stringstream str; + double val = confidence/100.0; + str << std::fixed << std::setprecision( 2 ) << val; + QString qstr = QString::fromStdString(str.str()); + + //std::cout << qstr.toStdString() << '\n'; + + this->ui->deployer_confidence_lineEdit->setText(qstr); + this->confidence_threshold = val; +} + + +void MainWindow::handleDeployerSaveOutputCheckboxChange() { + if(ui->checkBox_deployer_saveOutput->isChecked()) { + ui->groupbox_deployer_saveOutput->setEnabled(true); + } else { + ui->groupbox_deployer_saveOutput->setEnabled(false); + } +} + +void MainWindow::handleDeployerStop() { + this->stopDeployer = true; + LOG(WARNING) << "Stopping Deployer Process" << "\n"; +} + +void MainWindow::handleProcessDeploy() { + this->stopDeployer = false; + std::string inputInfo = this->ui->textEdit_deployInputPath->toPlainText().toStdString(); + + QGroupBox* deployer_params = this->ui->deployer_param_groupBox; + QGroupBox* camera_params = this->ui->deployer_cameraID_groupBox; + QGroupBox* inferencer_params = this->ui->deployer_groupBox_inferencer_params; + std::string outputFolder = this->ui->textEdit_deployerOutputPath->toPlainText().toStdString(); + if (!ui->checkBox_deployer_saveOutput->isChecked()) { + outputFolder.clear(); + } + + try{ + SampleGeneratorHandler::Deployer::process(ui->listView_deploy_input_imp,ui->listView_deploy_weights, + ui->listView_deploy_net_config,ui->listView_deploy_impl,ui->listView_deploy_names_inferencer, &this->stopDeployer, + &confidence_threshold, deployer_params, camera_params, inferencer_params, app->getConfig().asString("weightsPath"), + app->getConfig().asString("netCfgPath"),app->getConfig().asString("namesPath"),inputInfo, outputFolder,ui->Labelling->isChecked()); + } + catch (const std::string& msg){ + LOG(ERROR) << "Exception detected: " << msg; + } + catch (const std::exception &exc) + { + LOG(ERROR) << "Exception Detected: " << exc.what(); + } + catch (...){ + LOG(ERROR) << "Uknown Exception Type"; + } +} diff --git a/DetectionMetrics/DatasetEvaluationApp/mainwindow.h b/DetectionMetrics/DatasetEvaluationApp/mainwindow.h new file mode 100644 index 00000000..03434dd4 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/mainwindow.h @@ -0,0 +1,51 @@ +#ifndef MAINWINDOW_H +#define MAINWINDOW_H + +#include +#include +#include +#include + +namespace Ui { +class MainWindow; +} + +class MainWindow : public QMainWindow +{ + Q_OBJECT + +public: + explicit MainWindow(SampleGenerationApp* app,QWidget *parent = 0); + ~MainWindow(); + Ui::MainWindow *ui; + + +private: + SampleGenerationApp* app; + bool stopDeployer = false; + double confidence_threshold = 0.2; + +private slots: + void handleViewButton(); + void handleSelectOutputFolderButton(); + void handleSelectionNamesChanged(); + void setupTabsInformation(); + void handleConvertButton(); + void handleEvaluateButton(); + void handleDetectButton(); + void handleSelectOutputFolderButtonDetector(); + void handleSelectDeployInputSource(); + void handleSelectOutputFolderButtonDeployer(); + void handleProcessDeploy(); + void handleMappingCheckBoxChange(); + void handleDeployerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected); + void handleDeployerConfigFileOptionChange(bool checked); + void handleDeployerInferencerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected); + void handleDetectorInferencerImpListViewChange(const QModelIndex& selected, const QModelIndex& deselected); + void handleDeployerSaveOutputCheckboxChange(); + void handleDeployerStop(); + void handleDeployerConfidenceLineEditChange(const QString& confidence); + void handleDeployerConfidenceSliderChange(const int& confidence); +}; + +#endif // MAINWINDOW_H diff --git a/DetectionMetrics/DatasetEvaluationApp/mainwindow.ui b/DetectionMetrics/DatasetEvaluationApp/mainwindow.ui new file mode 100644 index 00000000..f0273f08 --- /dev/null +++ b/DetectionMetrics/DatasetEvaluationApp/mainwindow.ui @@ -0,0 +1,1875 @@ + + + MainWindow + + + + 0 + 0 + 1225 + 764 + + + + DetectionMetrics + + + + + + 0 + 0 + 1221 + 701 + + + + 4 + + + + Viewer + + + + + 1040 + 20 + 85 + 28 + + + + View + + + + + + 10 + 10 + 411 + 291 + + + + + + + Input Dataset + + + + + + + + + + + + 440 + 10 + 191 + 131 + + + + + + + Dataset Names + + + + + + + + + + + + 650 + 10 + 168 + 131 + + + + + + + Dataset implementation + + + + + + + + + + + + 10 + 320 + 411 + 321 + + + + + + + Filter by class + + + + + + + + + + + + 770 + 150 + 191 + 26 + + + + Show Depth Images + + + false + + + + + + Converter + + + + + 650 + 10 + 220 + 131 + + + + + + + Reader Dataset implementation + + + + + + + + + + + + 440 + 10 + 191 + 131 + + + + + + + Dataset Names + + + + + + + + + + + + 10 + 10 + 411 + 291 + + + + + + + Input Dataset + + + + + + + + + + + + 10 + 320 + 411 + 321 + + + + + + + Filter by class + + + + + + + + + + + + 510 + 220 + 216 + 111 + + + + + + + Writer Dataset Implementation + + + + + + + + + + + + 750 + 210 + 191 + 131 + + + + + + + Writer Dataset Names + + + + + + + + + + + + 870 + 350 + 341 + 26 + + + + Map To Writer Names, May Lead to data loss + + + false + + + + + + 510 + 500 + 401 + 21 + + + + + + + 510 + 440 + 204 + 20 + + + + Output Path + + + + + + 510 + 460 + 141 + 28 + + + + Select Folder + + + + + + 1040 + 50 + 85 + 28 + + + + Convert + + + + + + 750 + 400 + 181 + 26 + + + + Split into test and train + + + + + + 750 + 430 + 81 + 20 + + + + Train Ratio + + + + + + 750 + 450 + 171 + 21 + + + + + + + 510 + 540 + 178 + 95 + + + + + + + Writer Configuration + + + + + + + Qt::Horizontal + + + + + + + Write Images + + + + + + + + + Detector + + + + + 650 + 10 + 168 + 131 + + + + + + + Dataset implementation + + + + + + + + + + + + 440 + 10 + 191 + 131 + + + + + + + Dataset Names + + + + + + + + + + + + 10 + 10 + 411 + 291 + + + + + + + Input Dataset + + + + + + + + + + + + 670 + 530 + 401 + 21 + + + + + + + 670 + 490 + 161 + 28 + + + + Select Output Folder + + + + + + 10 + 330 + 411 + 291 + + + + + + + Net weights + + + + + + + + + + + + 460 + 330 + 281 + 131 + + + + + + + Net Configuration + + + + + + + + + + + + 470 + 490 + 186 + 131 + + + + + + + Inferencer Implementation + + + + + + + + + + + + 1040 + 20 + 85 + 28 + + + + Detect + + + + + + 460 + 150 + 161 + 26 + + + + Use depth images + + + + + + 670 + 570 + 161 + 26 + + + + Single Evaluation + + + + + + 760 + 330 + 191 + 131 + + + + + + + Inferencer names + + + + + + + + + + + + 810 + 150 + 321 + 191 + + + + Inferencer Parameters: + + + + + 160 + 160 + 97 + 22 + + + + Use RGB + + + + + + 160 + 26 + 61 + 27 + + + + + + + 0 + 30 + 161 + 20 + + + + Confidence Threshold: + + + + + + 0 + 60 + 141 + 17 + + + + Scaling Factor: + + + + + + 160 + 55 + 61 + 27 + + + + + + + 160 + 120 + 51 + 27 + + + + B + + + + + + 210 + 120 + 51 + 27 + + + + G + + + + + + 260 + 120 + 51 + 27 + + + + R + + + + + + 0 + 130 + 131 + 17 + + + + Mean Subtraction: + + + + + + 0 + 90 + 161 + 17 + + + + Inferencer Input Size: + + + + + + 160 + 88 + 61 + 27 + + + + + + + WIdth + + + + + + 220 + 88 + 61 + 27 + + + + Height + + + + + + + Evaluator + + + + + 440 + 10 + 191 + 131 + + + + + + + Dataset Names + + + + + + + + + + + + 10 + 10 + 411 + 291 + + + + + + + Input Grond Thruth Dataset + + + + + + + + + + + + 440 + 150 + 220 + 131 + + + + + + + Reader Dataset implementation + + + + + + + + + + + + 450 + 330 + 191 + 131 + + + + + + + Dataset Names + + + + + + + + + + + + 20 + 330 + 411 + 291 + + + + + + + Input Detection Dataset + + + + + + + + + + + + 440 + 490 + 220 + 131 + + + + + + + Reader Dataset implementation + + + + + + + + + + + + 690 + 300 + 411 + 321 + + + + + + + Filter by class + + + + + + + + + + + + 1040 + 20 + 85 + 28 + + + + Evaluate + + + + + + 710 + 30 + 191 + 26 + + + + Merge all person clases + + + + + + 710 + 70 + 191 + 26 + + + + Add mix evaluation + + + true + + + + + true + + + + 710 + 130 + 201 + 91 + + + + + Ubuntu + 11 + 50 + false + false + false + false + + + + + + + IOU Type + + + + + 10 + 30 + 171 + 22 + + + + Use Bounding Boxes + + + true + + + + + + 10 + 60 + 101 + 22 + + + + Use Masks + + + + + + + Deploy + + + + + 630 + 520 + 186 + 131 + + + + + + + Inferencer Implementation + + + + + + + + + + + + 420 + 520 + 191 + 131 + + + + + + + Inferencer names + + + + + + + + + + + + 20 + 350 + 381 + 301 + + + + + + + Net weights + + + + + + + + + + + + 20 + 4 + 409 + 20 + + + + + + + Deployer Input Type + + + + + + 20 + 30 + 409 + 91 + + + + + + + + + true + + + + 20 + 120 + 361 + 181 + + + + + Ubuntu + 11 + 50 + false + false + false + false + + + + + + + Camera Stream Parameters: + + + + + 20 + 60 + 61 + 31 + + + + + Saab + 10 + false + false + true + + + + Proxy: + + + + + + 90 + 60 + 211 + 23 + + + + + 11 + 50 + false + false + + + + cam1:tcp -h localhost -p 9999 + + + + + + 90 + 90 + 211 + 23 + + + + RGB8 + + + + + + 90 + 120 + 211 + 23 + + + + DetectionMetrics/Deployer + + + + + + 90 + 150 + 211 + 23 + + + + cam1 + + + + + true + + + + 20 + 90 + 71 + 31 + + + + + 10 + + + + Format: + + + + + + 20 + 120 + 61 + 31 + + + + + Saab + 10 + false + false + true + + + + Topic: + + + + + + 20 + 150 + 61 + 21 + + + + + Ubuntu + 10 + false + false + true + + + + Name: + + + + + + 100 + 30 + 61 + 22 + + + + ROS + + + + + + 170 + 30 + 117 + 22 + + + + ICE + + + + + + 20 + 30 + 68 + 17 + + + + Server: + + + + + + + 390 + 160 + 271 + 61 + + + + + + + + + + + + 0 + 10 + 271 + 22 + + + + Enter Config Parameters Manually + + + + + + 0 + 40 + 251 + 22 + + + + Select Config File + + + + + + + 730 + 60 + 401 + 21 + + + + + + + + + + 730 + 20 + 161 + 28 + + + + + + + Select Input + + + + + + 1050 + 100 + 85 + 31 + + + + PointingHandCursor + + + + + + Process + + + false + + + false + + + false + + + + + + 860 + 460 + 321 + 191 + + + + Inferencer Parameters + + + + + 160 + 145 + 97 + 22 + + + + Use RGB + + + + + + 0 + 30 + 141 + 17 + + + + Scaling Factor: + + + + + + 160 + 27 + 61 + 27 + + + + + + + 160 + 108 + 51 + 27 + + + + B + + + + + + 210 + 108 + 51 + 27 + + + + G + + + + + + 260 + 108 + 51 + 27 + + + + R + + + + + + 0 + 110 + 131 + 17 + + + + Mean Subtraction: + + + + + + 0 + 70 + 161 + 17 + + + + Inferencer Input Size: + + + + + + 160 + 67 + 61 + 27 + + + + + + + Width + + + + + + 220 + 67 + 61 + 27 + + + + Height + + + + + + false + + + + 750 + 270 + 451 + 101 + + + + + + + + + 20 + 20 + 161 + 28 + + + + + + + Select Output Folder + + + + + + 20 + 60 + 401 + 21 + + + + + + + + 770 + 250 + 191 + 22 + + + + + + + Save Output Inferences + + + + + + 420 + 350 + 281 + 151 + + + + + + + Net Configuration + + + + + + + + + + + + 1050 + 140 + 85 + 31 + + + + Stop + + + + + + 450 + 300 + 160 + 16 + + + + <html><head/><body><p>Confidence Threshold for inferencer, + typically 0.2</p></body></html> + + + + <html><head/><body><p>Confidence Threshold for inferencer, + typically 0.2</p></body></html> + + + + + + + 100 + + + 20 + + + true + + + Qt::Horizontal + + + QSlider::TicksBothSides + + + + + + 447 + 270 + 171 + 20 + + + + Confidence Threshold: + + + + + + 608 + 267 + 41 + 27 + + + + + + + 0.2 + + + + + + + + true + + + + 460 + 30 + 101 + 71 + + + + Camera ID starting form 0, -1 for any camera vaiable + + + Camera ID starting form 0, -1 for any camera vaiable + + + Camera ID: + + + + true + + + + 0 + 30 + 57 + 27 + + + + Camera ID starting form 0, -1 for any camera vaiable + + + Camera ID starting form 0, -1 for any camera vaiable + + + + + + -1 + + + + + + + 770 + 200 + 141 + 22 + + + + Use for labelling + + + + + + + + + 0 + 0 + 1225 + 25 + + + + + + TopToolBarArea + + + false + + + + + + + + diff --git a/DetectionMetrics/Deps/glog/CMakeLists.txt b/DetectionMetrics/Deps/glog/CMakeLists.txt new file mode 100644 index 00000000..7df230d7 --- /dev/null +++ b/DetectionMetrics/Deps/glog/CMakeLists.txt @@ -0,0 +1,54 @@ +# - Try to find Glog +# +# The following variables are optionally searched for defaults +# GLOG_ROOT_DIR: Base directory where all GLOG components are found +# +# The following are set after configuration is done: +# GLOG_FOUND +# GLOG_INCLUDE_DIRS +# GLOG_LIBRARIES +# GLOG_LIBRARYRARY_DIRS + +include(FindPackageHandleStandardArgs) + +set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") + +if(WIN32) + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}/src/windows) +else() + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}) +endif() + +if(MSVC) + find_library(GLOG_LIBRARY_RELEASE libglog_static + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES Release) + + find_library(GLOG_LIBRARY_DEBUG libglog_static + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES Debug) + + set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) +else() + find_library(GLOG_LIBRARY glog + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES lib lib64) +endif() + +find_package_handle_standard_args(Glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) + +if(GLOG_FOUND) + set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) + set(GLOG_LIBRARIES ${GLOG_LIBRARY}) + message(STATUS "Found glog (include: ${GLOG_INCLUDE_DIR}, library: ${GLOG_LIBRARY})") + mark_as_advanced(GLOG_ROOT_DIR GLOG_LIBRARY_RELEASE GLOG_LIBRARY_DEBUG + GLOG_LIBRARY GLOG_INCLUDE_DIR) +ELSE() + message(FATAL_ERROR "Google Logs is required (libgoogle-glog-dev)") +endif() + + + +list(APPEND DEPS libgoogle-glog-dev) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIce.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIce.cmake new file mode 100644 index 00000000..ad45a471 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIce.cmake @@ -0,0 +1,69 @@ +# Find the ZeroC ICE includes and libraries for every module (Ice, IceStorm, IceUtil, etc) + +# +# ZeroCIce_INCLUDE_DIR - Where the includes are. If everything is all right, ZeroCIceXXXX_INCLUDE_DIR is always the same. You usually will use this. +# ZeroCIce_LIBRARIES - List of *all* the libraries. You usually will not use this but only ZeroCIceUtil_LIBRARY or alike +# ZerocCIce_FOUND - True if the core Ice was found +# ZeroCIceCore_FOUND +# ZeroCIceCore_INCLUDE_DIR +# ZeroCIceCore_LIBRARY +# ZeroCIceBox_FOUND +# ZeroCIceBox_INCLUDE_DIR +# ZeroCIceBox_LIBRARY +# ZeroCIceGrid_FOUND +# ZeroCIceGrid_INCLUDE_DIR +# ZeroCIceGrid_LIBRARY +# ZeroCIcePatch2_FOUND +# ZeroCIcePatch2_INCLUDE_DIR +# ZeroCIcePatch2_LIBRARY +# ZeroCIceSSL_FOUND +# ZeroCIceSSL_INCLUDE_DIR +# ZeroCIceSSL_LIBRARY +# ZeroCIceStorm_FOUND +# ZeroCIceStorm_INCLUDE_DIR +# ZeroCIceStorm_LIBRARY +# ZeroCIceUtil_FOUND +# ZeroCIceUtil_INCLUDE_DIR +# ZeroCIceUtil_LIBRARY +# ZeroCIceXML_FOUND +# ZeroCIceXML_INCLUDE_DIR +# ZeroCIceXML_LIBRARY +# ZeroCIceExecutables_FOUND + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +SET( ZeroCIceCore_FIND_QUIETLY TRUE ) +SET( ZeroCIceBox_FIND_QUIETLY TRUE ) +SET( ZeroCIceGrid_FIND_QUIETLY TRUE ) +SET( ZeroCIcePatch2_FIND_QUIETLY TRUE ) +SET( ZeroCIceSSL_FIND_QUIETLY TRUE ) +SET( ZeroCIceStorm_FIND_QUIETLY TRUE ) +SET( ZeroCIceUtil_FIND_QUIETLY TRUE ) +SET( ZeroCIceXML_FIND_QUIETLY TRUE ) +SET( ZeroCIceExecutables_FIND_QUIETLY TRUE ) + +FIND_PACKAGE( ZeroCIceCore ) +FIND_PACKAGE( ZeroCIceBox ) +FIND_PACKAGE( ZeroCIceGrid ) +FIND_PACKAGE( ZeroCIcePatch2 ) +FIND_PACKAGE( ZeroCIceSSL ) +FIND_PACKAGE( ZeroCIceStorm ) +FIND_PACKAGE( ZeroCIceUtil ) +FIND_PACKAGE( ZeroCIceXML ) +FIND_PACKAGE( ZeroCIceExecutables ) + +SET( ZeroCIce_INCLUDE_DIR ${ZeroCIceCore_INCLUDE_DIR} ) +SET( ZeroCIce_LIBRARIES ${ZeroCIceCore_LIBRARY} ${ZeroCIceBox_LIBRARY} ${ZeroCIceGrid_LIBRARY} ${ZeroCIcePatch2_LIBRARY} ${ZeroCIceSSL_LIBRARY} ${ZeroCIceStorm_LIBRARY} ${ZeroCIceUtil_LIBRARY} ${ZeroCIceXML_LIBRARY} ) + +FOREACH( exec ${ICE_EXECUTABLES} ) + IF(ZeroCIce_${exec}_FOUND) + LIST(APPEND ZeroCIce_EXECUTABLES ${ZeroCIce_${exec}_BIN} ) + ENDIF(ZeroCIce_${exec}_FOUND) +ENDFOREACH( exec ${ICE_EXECUTABLES} ) + +SET( ZeroCIce_FOUND ${ZeroCIceCore_FOUND} ) + diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceBox.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceBox.cmake new file mode 100644 index 00000000..065eee4d --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceBox.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEBox includes and libraries + +# +# ZeroCIceBox_INCLUDE_DIR +# ZeroCIceBox_LIBRARIES +# ZeroCIceBox_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceBox_INCLUDE_DIR NAMES IceBox/IceBox.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceBox_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceBox_LIBRARY NAMES IceBox PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceBox_LIBRARY ) + SET( ZeroCIceBox_FOUND TRUE ) + ENDIF( ZeroCIceBox_LIBRARY ) + + +ENDIF( ZeroCIceBox_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceCore.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceCore.cmake new file mode 100644 index 00000000..6393febf --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceCore.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICE essential includes and libraries + +# +# ZeroCIceCore_INCLUDE_DIR +# ZeroCIceCore_LIBRARIES +# ZeroCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceCore_INCLUDE_DIR NAMES Ice/Ice.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceCore_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceCore_LIBRARY NAMES Ice PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceCore_LIBRARY ) + SET( ZeroCIceCore_FOUND TRUE ) + ENDIF( ZeroCIceCore_LIBRARY ) + + +ENDIF( ZeroCIceCore_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceExecutables.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceExecutables.cmake new file mode 100644 index 00000000..e3a4fbd9 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceExecutables.cmake @@ -0,0 +1,39 @@ +# Find the ZeroC ICE executables: +# +# dumpdb, glacier2router, icebox, iceboxadmin, icecpp, icegridadmin, +# icegridnode, icegridregistry, icepatch2calc, icepatch2client, +# icepatch2server, icestormadmin, slice2cpp, slice2cs, slice2docbook, +# slice2freeze, slice2freezej, slice2html, slice2java, slice2py, +# slice2rb, slice2vb, transformdb +# +# Sets ZeroCIceExecutables_FOUND to TRUE only if *any* of the executables +# in the ICE_EXECUTABLES were found, therefore you must also check +# if ZeroCIce_XXXXX_FOUND is true for the executable you want +# +# Defines a ZeroCIce_XXXXX_FOUND and ZeroCIce_XXXXX_BIN variable for each +# executable in the ICE_EXECUTABLES list (the _BIN is the location of the +# executable) +# +# Defines a ZeroCIce_slice_ICES variable with the location of Plugin.ice, +# Logger.ice, etc +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +SET( ICE_EXECUTABLES dumpdb glacier2router icebox iceboxadmin icecpp icegridadmin icegridnode icegridregistry icepatch2calc icepatch2client icepatch2server icestormadmin slice2cpp slice2cs slice2docbook slice2freeze slice2freezej slice2html slice2java slice2py slice2rb slice2vb transformdb ) + +FOREACH( exec ${ICE_EXECUTABLES} ) + + FIND_PROGRAM( ZeroCIce_${exec}_BIN NAMES ${exec} PATHS ENV C++LIB ENV PATH PATH_SUFFIXES bin bin_release bin-release ) + FIND_PATH( ZeroCIce_slice_ICES NAMES Ice/Plugin.ice PATHS /usr/share ENV C++LIB ENV PATH PATH_SUFFIXES slice Ice/slice ) +# MESSAGE( "ZeroCIce_slice_ICES = ${ZeroCIce_slice_ICES}" ) + + IF( ZeroCIce_${exec}_BIN ) + + SET( ZeroCIce_${exec}_FOUND TRUE) + SET( ZeroCIceExecutables_FOUND TRUE ) + ENDIF() + +ENDFOREACH( exec ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceGrid.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceGrid.cmake new file mode 100644 index 00000000..e853a9c7 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceGrid.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEGrid includes and libraries + +# +# ZeroCIceGrid_INCLUDE_DIR +# ZeroCIceGrid_LIBRARIES +# ZerocCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceGrid_INCLUDE_DIR NAMES IceGrid/UserAccountMapper.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceGrid_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceGrid_LIBRARY NAMES IceGrid PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceGrid_LIBRARY ) + SET( ZeroCIceGrid_FOUND TRUE ) + ENDIF( ZeroCIceGrid_LIBRARY ) + + +ENDIF( ZeroCIceGrid_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIcePatch2.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIcePatch2.cmake new file mode 100644 index 00000000..1b45a2d4 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIcePatch2.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEPatch2 includes and libraries + +# +# ZeroCIcePatch2_INCLUDE_DIR +# ZeroCIcePatch2_LIBRARIES +# ZerocCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIcePatch2_INCLUDE_DIR NAMES IcePatch2/ClientUtil.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIcePatch2_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIcePatch2_LIBRARY NAMES IcePatch2 PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIcePatch2_LIBRARY ) + SET( ZeroCIcePatch2_FOUND TRUE ) + ENDIF( ZeroCIcePatch2_LIBRARY ) + + +ENDIF( ZeroCIcePatch2_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceSSL.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceSSL.cmake new file mode 100644 index 00000000..d34afe5b --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceSSL.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICESSL includes and libraries + +# +# ZeroCIceSSL_INCLUDE_DIR +# ZeroCIceSSL_LIBRARIES +# ZerocCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceSSL_INCLUDE_DIR NAMES IceSSL/Plugin.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceSSL_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceSSL_LIBRARY NAMES IceSSL PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceSSL_LIBRARY ) + SET( ZeroCIceSSL_FOUND TRUE ) + ENDIF( ZeroCIceSSL_LIBRARY ) + + +ENDIF( ZeroCIceSSL_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceStorm.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceStorm.cmake new file mode 100644 index 00000000..fee39805 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceStorm.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEStorm includes and libraries + +# +# ZeroCIceStorm_INCLUDE_DIR +# ZeroCIceStorm_LIBRARIES +# ZerocCIceStorm_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceStorm_INCLUDE_DIR NAMES IceStorm/IceStorm.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceStorm_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceStorm_LIBRARY NAMES IceStorm PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceStorm_LIBRARY ) + SET( ZeroCIceStorm_FOUND TRUE ) + ENDIF( ZeroCIceStorm_LIBRARY ) + + +ENDIF( ZeroCIceStorm_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceUtil.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceUtil.cmake new file mode 100644 index 00000000..bdd58f88 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceUtil.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEUtil includes and libraries + +# +# ZeroCIceUtil_INCLUDE_DIR +# ZeroCIceUtil_LIBRARIES +# ZerocCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceUtil_INCLUDE_DIR NAMES IceUtil/IceUtil.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceUtil_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceUtil_LIBRARY NAMES IceUtil PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceUtil_LIBRARY ) + SET( ZeroCIceUtil_FOUND TRUE ) + ENDIF( ZeroCIceUtil_LIBRARY ) + + +ENDIF( ZeroCIceUtil_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroCIceXML.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceXML.cmake new file mode 100644 index 00000000..d1dc7ca9 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroCIceXML.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICEXML includes and libraries + +# +# ZeroCIceXML_INCLUDE_DIR +# ZeroCIceXML_LIBRARIES +# ZerocCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceXML_INCLUDE_DIR NAMES IceXML/Parser.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceXML_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceXML_LIBRARY NAMES IceXML PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceXML_LIBRARY ) + SET( ZeroCIceXML_FOUND TRUE ) + ENDIF( ZeroCIceXML_LIBRARY ) + + +ENDIF( ZeroCIceXML_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMake/FindZeroIceCore.cmake b/DetectionMetrics/Deps/ice/CMake/FindZeroIceCore.cmake new file mode 100644 index 00000000..6393febf --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMake/FindZeroIceCore.cmake @@ -0,0 +1,25 @@ +# Find the ZeroC ICE essential includes and libraries + +# +# ZeroCIceCore_INCLUDE_DIR +# ZeroCIceCore_LIBRARIES +# ZeroCIceCore_FOUND + + +# +# Copyright (c) 2007, Pau Garcia i Quiles, +# +# Redistribution and use is allowed according to the terms of the BSD license. +# For details see the accompanying COPYING-CMAKE-SCRIPTS file. + +FIND_PATH( ZeroCIceCore_INCLUDE_DIR NAMES Ice/Ice.h PATHS ENV C++LIB ENV PATH PATH_SUFFIXES include Ice Ice/include ) + +IF( ZeroCIceCore_INCLUDE_DIR ) + FIND_LIBRARY( ZeroCIceCore_LIBRARY NAMES Ice PATHS ENV C++LIB ENV PATH PATH_SUFFIXES Ice lib-release lib_release ) + + IF( ZeroCIceCore_LIBRARY ) + SET( ZeroCIceCore_FOUND TRUE ) + ENDIF( ZeroCIceCore_LIBRARY ) + + +ENDIF( ZeroCIceCore_INCLUDE_DIR ) diff --git a/DetectionMetrics/Deps/ice/CMakeLists.txt b/DetectionMetrics/Deps/ice/CMakeLists.txt new file mode 100644 index 00000000..820522f0 --- /dev/null +++ b/DetectionMetrics/Deps/ice/CMakeLists.txt @@ -0,0 +1,45 @@ +OPTION(ENABLE_ICE "Enable ICE compatibility for streaming" ON) + +IF (ENABLE_ICE) + + SET(CMAKE_MODULE_PATH + ${CMAKE_MODULE_PATH} + "${CMAKE_CURRENT_LIST_DIR}/CMake" + ) + + SET(slice_path /usr/share/slice) + + #ICE installs c++11 libraries in this directory and we don't indicate this, cmake uses c++98 files + SET(CMAKE_PREFIX_PATH /usr/lib/x86_64-linux-gnu/c++11) + FIND_PACKAGE(ZeroCIceUtil) + FIND_PACKAGE(ZeroCIceStorm) + FIND_PACKAGE(ZeroCIce) + IF( ZeroCIceStorm_LIBRARY ) + IF( ZeroCIceUtil_LIBRARY ) + MESSAGE("-- Found Ice") + SET(ZeroCIce_FOUND TRUE) + include_directories(${ZeroCIceUtil_INCLUDE_DIR}) + link_directories(${ZeroCIceUtil_LIBRARY}) + include_directories(${ZeroCIceCore_INCLUDE_DIR}) + link_directories(${ZeroCIceCore_LIBRARY}) + include_directories(${ZeroCIceStorm_INCLUDE_DIR}) + link_directories(${ZeroCIceStorm_LIBRARY}) + include_directories(${ZeroCIce_INCLUDE_DIR}) + link_directories(${ZeroCIce_LIBRARIES}) + + list(APPEND DEPS libzeroc-ice3.6 zeroc-ice-utils libzeroc-icestorm3.6) + list(APPEND DEPS_DEV zeroc-ice-slice libzeroc-ice-dev) + set(Ice_LIBRARIES ${ZeroCIce_LIBRARIES}) + + ENDIF( ZeroCIceUtil_LIBRARY ) + ENDIF( ZeroCIceStorm_LIBRARY ) + + IF((NOT ZeroCIceStorm_LIBRARY ) OR (NOT ZeroCIceUtil_LIBRARY)) + SET(ZeroCIce_FOUND FALSE) + MESSAGE(WARNING "*** ICE LIBRARIES NOT FOUND. ICE SUPPORT WILL BE DISABLED") + ENDIF() + +ELSE() + SET(ZeroCIce_FOUND FALSE) + MESSAGE("Ice is Disabled by default, Set ENABLE_Ice=ON to Enable") +ENDIF() diff --git a/DetectionMetrics/Deps/numpy/CMake/FindNumPy.cmake b/DetectionMetrics/Deps/numpy/CMake/FindNumPy.cmake new file mode 100644 index 00000000..a95d6ac5 --- /dev/null +++ b/DetectionMetrics/Deps/numpy/CMake/FindNumPy.cmake @@ -0,0 +1,41 @@ +# Find the Python NumPy package +# PYTHON_NUMPY_INCLUDE_DIR +# PYTHON_NUMPY_FOUND +# will be set by this script + +cmake_minimum_required(VERSION 2.6) + +if(NOT PYTHON_EXECUTABLE) + if(NumPy_FIND_QUIETLY) + find_package(PythonInterp QUIET) + else() + find_package(PythonInterp) + set(__numpy_out 1) + endif() +endif() + +if (PYTHON_EXECUTABLE) + # Find out the include path + execute_process( + COMMAND "${PYTHON_EXECUTABLE}" -c + "from __future__ import print_function\ntry: import numpy; print(numpy.get_include(), end='')\nexcept:pass\n" + OUTPUT_VARIABLE __numpy_path) + # And the version + execute_process( + COMMAND "${PYTHON_EXECUTABLE}" -c + "from __future__ import print_function\ntry: import numpy; print(numpy.__version__, end='')\nexcept:pass\n" + OUTPUT_VARIABLE __numpy_version) +elseif(__numpy_out) + message(STATUS "Python executable not found.") +endif(PYTHON_EXECUTABLE) + +find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h + HINTS "${__numpy_path}" "${PYTHON_INCLUDE_PATH}" NO_DEFAULT_PATH) + +if(PYTHON_NUMPY_INCLUDE_DIR) + set(PYTHON_NUMPY_FOUND 1 CACHE INTERNAL "Python numpy found") +endif(PYTHON_NUMPY_INCLUDE_DIR) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NumPy REQUIRED_VARS PYTHON_NUMPY_INCLUDE_DIR +VERSION_VAR __numpy_version) diff --git a/DetectionMetrics/Deps/numpy/CMakeLists.txt b/DetectionMetrics/Deps/numpy/CMakeLists.txt new file mode 100644 index 00000000..1665a546 --- /dev/null +++ b/DetectionMetrics/Deps/numpy/CMakeLists.txt @@ -0,0 +1,12 @@ +SET(CMAKE_MODULE_PATH + ${CMAKE_MODULE_PATH} + "${CMAKE_CURRENT_LIST_DIR}/CMake" +) + + +find_package( PythonInterp 2.7.12 REQUIRED ) +find_package( PythonLibs 2.7.12 REQUIRED ) +find_package( NumPy REQUIRED ) + +SET(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} ${PYTHON_NUMPY_INCLUDE_DIR}) +# LIBRARIESD AVAILABLE IN PYTHON_LIBRARIES diff --git a/DetectionMetrics/Deps/opencv/CMakeLists.txt b/DetectionMetrics/Deps/opencv/CMakeLists.txt new file mode 100644 index 00000000..52b70f35 --- /dev/null +++ b/DetectionMetrics/Deps/opencv/CMakeLists.txt @@ -0,0 +1,10 @@ +FIND_PACKAGE(OpenCV NO_DEFAULT_PATH PATHS "/usr/local" "/usr" ) + +if(OpenCV_FOUND) + + if(OpenCV_VERSION VERSION_LESS "4.2") + message(FATAL_ERROR "Minimum OpenCV Version Required is 4.2") + endif(OpenCV_VERSION VERSION_LESS "4.2") +else() + message(FATAL_ERROR "Error: Can't Find OpenCV") +endif(OpenCV_FOUND) diff --git a/DetectionMetrics/Deps/qt/CMakeLists.txt b/DetectionMetrics/Deps/qt/CMakeLists.txt new file mode 100644 index 00000000..a61e4a6d --- /dev/null +++ b/DetectionMetrics/Deps/qt/CMakeLists.txt @@ -0,0 +1,37 @@ +OPTION(ENABLE_QT "Enable Qt support for GUI" ON) + +IF(ENABLE_QT) + + FIND_PACKAGE(Qt5Core) # Just to print error if Qt isn't found + FIND_PACKAGE(Qt5Widgets QUIET) + FIND_PACKAGE(Qt5Gui QUIET) + FIND_PACKAGE(Qt5Svg QUIET) + FIND_PACKAGE(Qt5OpenGL QUIET) + + IF (Qt5Widgets_FOUND AND Qt5Core_FOUND AND Qt5Gui_FOUND AND Qt5Svg_FOUND AND Qt5OpenGL_FOUND) + + SET(QT_INCLUDE_DIRS + ${Qt5Widgets_INCLUDE_DIRS} + ${Qt5Core_INCLUDE_DIRS} + ${Qt5Gui_INCLUDE_DIRS} + ${Qt5Svg_INCLUDE_DIRS} + ${Qt5OpenGL_INCLUDE_DIRS} + ) + + SET(QT_LIBRARIES + ${Qt5Widgets_LIBRARIES} + ${Qt5Core_LIBRARIES} + ${Qt5Gui_LIBRARIES} + ${Qt5Svg_LIBRARIES} + ${Qt5OpenGL_LIBRARIES} + ) + + SET(QT_FOUND TRUE) + ELSE() + + MESSAGE("QT Libraries can't be found, Disabling GUI support. DatasetEvaluationApp will not be build.") + SET(QT_FOUND FALSE) + + ENDIF() + +ENDIF(ENABLE_QT) diff --git a/DetectionMetrics/Deps/ros/CMakeLists.txt b/DetectionMetrics/Deps/ros/CMakeLists.txt new file mode 100644 index 00000000..812469f4 --- /dev/null +++ b/DetectionMetrics/Deps/ros/CMakeLists.txt @@ -0,0 +1,33 @@ +OPTION(ENABLE_ROS "Enable ROS compatibility modules" ON) + +SET (CMAKE_PREFIX_PATH "/opt/ros/kinetic;/opt/ros/lunar;/opt/ros/jade;/opt/ros/melodic;") + +if (ENABLE_ROS) + find_package(roscpp QUIET) + + if(roscpp_FOUND) + + MESSAGE("*** ROS LIBRARIES FOUND : ${roscpp_INCLUDE_DIRS}") + + set (CATKIN_BUILD_BINARY_PACKAGE 1) #doesn't add ROS environment files to package + find_package(catkin REQUIRED COMPONENTS + roscpp + std_msgs + cv_bridge + image_transport + ) + + SET(ros_INCLUDE_DIRS ${roscpp_INCLUDE_DIRS} ${std_msgs_INCLUDE_DIRS} ${cv_bridge_INCLUDE_DIRS} ${image_transport_INCLUDE_DIRS}) + SET(ros_LIBRARIES ${roscpp_LIBRARIES} ${std_msgs_LIBRARIES} ${cv_bridge_LIBRARIES} ${image_transport_LIBRARIES}) + + list(APPEND DEPS ros-kinetic-roscpp ros-kinetic-std-msgs ros-kinetic-cv-bridge ros-kinetic-image-transport ros-kinetic-roscpp-core ) + + + # list(APPEND DEPS_DEV ) + else() + MESSAGE(WARNING "*** ROS LIBRARIES NOT FOUND. ROS SUPPORT WILL BE DISABLED") + endif() +else() + SET(roscpp_FOUND FALSE) + MESSAGE("ROS is Disabled by default, Set ENABLE_ROS=ON to enable") +endif() diff --git a/DetectionMetrics/Deps/yaml-cpp/CMakeLists.txt b/DetectionMetrics/Deps/yaml-cpp/CMakeLists.txt new file mode 100644 index 00000000..c34ec208 --- /dev/null +++ b/DetectionMetrics/Deps/yaml-cpp/CMakeLists.txt @@ -0,0 +1,9 @@ +find_package(yaml-cpp REQUIRED) + +if (YAML_CPP_INCLUDE_DIR) + message("***YAML-CPP FOUND: ${YAML_CPP_INCLUDE_DIR}") + list(APPEND DEPS libyaml-cpp0.5v5) + list(APPEND DEPS_DEV libyaml-cpp-dev) +else() + message ("*** YAML-CPP NOT FOUND") +endIF() diff --git a/DetectionMetrics/DetectionMetricsLib/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/CMakeLists.txt new file mode 100644 index 00000000..50cd0570 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/CMakeLists.txt @@ -0,0 +1,30 @@ + +add_subdirectory(Common) +add_subdirectory(DatasetConverters) +add_subdirectory(Detectors) +add_subdirectory(FrameworkEvaluator) +add_subdirectory(GenerationUtils) +add_subdirectory(Regions) +add_subdirectory(Utils) + +add_library(DetectionMetrics SHARED + $ + $ + $ + $ + $ + $ + $ + ) + + +TARGET_LINK_LIBRARIES(DetectionMetrics + ${OpenCV_LIBRARIES} + ${EXTRA_LIBS} + ${depthLib_LIBRARIES} + ${JderobotInterfaces_LIBRARIES} + ${comm_LIBRARIES} + ${config_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ) diff --git a/DetectionMetrics/DetectionMetricsLib/Common/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/Common/CMakeLists.txt new file mode 100644 index 00000000..78d5794f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/CMakeLists.txt @@ -0,0 +1,15 @@ +SET ( Common_SOURCE_FILES + Matrix.h + EvalMatrix + Sample +) + +ADD_LIBRARY(DetectionMetrics_Common OBJECT ${Common_SOURCE_FILES}) + +TARGET_INCLUDE_DIRECTORIES( DetectionMetrics_Common PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} +) diff --git a/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.cpp b/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.cpp new file mode 100644 index 00000000..422cd232 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.cpp @@ -0,0 +1,19 @@ +#include "EvalMatrix.h" +#include +#include +void Eval::printMatrix(Eval::EvalMatrix matrix) { + + LOG(INFO) << "Printing Matrix" << '\n'; + + for (auto itr = matrix.begin(); itr != matrix.end(); itr++) { + LOG(INFO) << "ClassID: " << itr->first <<'\n'; + for (auto iter = itr->second.begin(); iter != itr->second.end(); iter++ ) { + for (auto iterate = iter->begin(); iterate != iter->end(); iterate++) { + LOG(INFO) << *iterate << " "; + } + LOG(INFO) << '\n'; + } + } + + +} diff --git a/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.h b/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.h new file mode 100644 index 00000000..8295157f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/EvalMatrix.h @@ -0,0 +1,16 @@ +#ifndef SAMPLERGENERATOR_EVALMATRIX_H +#define SAMPLERGENERATOR_EVALMATRIX_H + +#include "Matrix.h" +#include + +namespace Eval { + +using EvalMatrix = std::map>; + +using DetectionsMatcher = std::map>>; + +void printMatrix(EvalMatrix matrix); + +} +#endif //SAMPLERGENERATOR_MATRIX_H diff --git a/DetectionMetrics/DetectionMetricsLib/Common/Matrix.h b/DetectionMetrics/DetectionMetricsLib/Common/Matrix.h new file mode 100644 index 00000000..1ab082ac --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/Matrix.h @@ -0,0 +1,9 @@ +#ifndef SAMPLERGENERATOR_MATRIX_H +#define SAMPLERGENERATOR_MATRIX_H + +#include + +template +using Matrix = std::vector>; + +#endif //SAMPLERGENERATOR_MATRIX_H diff --git a/DetectionMetrics/DetectionMetricsLib/Common/Sample.cpp b/DetectionMetrics/DetectionMetricsLib/Common/Sample.cpp new file mode 100644 index 00000000..bcd438b2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/Sample.cpp @@ -0,0 +1,478 @@ +// +// Created by frivas on 22/01/17. +// + +#include "Sample.h" +#include +#include +#include + + +// Constructor which creates many variables +Sample::Sample() { + this->colorImagePath=""; + this->depthImagePath=""; + this->rectRegions=RectRegionsPtr(new RectRegions()); + this->contourRegions=ContourRegionsPtr(new ContourRegions()); + this->rleRegions=RleRegionsPtr(new RleRegions()); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage) { + colorImage.copyTo(this->colorImage); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const RectRegionsPtr &rectRegions) { + this->setColorImage(colorImage); + this->setRectRegions(rectRegions); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const ContourRegionsPtr &contourRegions) { + this->setColorImage(colorImage); + this->setContourRegions(contourRegions); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const RectRegionsPtr &rectRegions, const ContourRegionsPtr &contourRegions) { + this->setColorImage(colorImage); + this->setRectRegions(rectRegions); + this->setContourRegions(contourRegions); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const cv::Mat &depthImage, const RectRegionsPtr &rectRegions) { + this->setColorImage(colorImage); + this->setDepthImage(depthImage); + this->setRectRegions(rectRegions); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const cv::Mat &depthImage, const ContourRegionsPtr &contourRegions) { + this->setColorImage(colorImage); + this->setDepthImage(depthImage); + this->setContourRegions(contourRegions); +} + +//Constructor +Sample::Sample(const cv::Mat &colorImage, const cv::Mat &depthImage, const RectRegionsPtr &rectRegions, + const ContourRegionsPtr &contourRegions) { + this->setColorImage(colorImage); + this->setDepthImage(depthImage); + this->setRectRegions(rectRegions); + this->setContourRegions(contourRegions); + +} + +// Set the dimensions of the sample +void Sample::setSampleDims(const int width, const int height) { + this->width = width; + this->height = height; +} + +// Set the image colorImage member to the passed image +void Sample::setColorImage(const cv::Mat &image) { + image.copyTo(this->colorImage); +} + +void Sample::clearColorImage() { // For better memory management + if (!this->colorImage.empty()) + this->colorImage.release(); +} + +void Sample::clearDepthImage() { // For better memory management + if (!this->depthImage.empty()) + this->depthImage.release(); +} + +// Set the depthImage +void Sample::setDepthImage(const cv::Mat &image) { + image.copyTo(this->depthImage); +} + +// Set the RectRegions member to the new regions +void Sample::setRectRegions(const RectRegionsPtr ®ions) { + this->rectRegions=regions; +} + +void Sample::setContourRegions(const ContourRegionsPtr ®ions) { + this->contourRegions=regions; +} + +void Sample::setRleRegions(const RleRegionsPtr& regions) { + this->rleRegions=regions; +} + + +int Sample::getSampleWidth() const { + if (this->width != -1) + return this->width; + + if (!this->getColorImage().empty()) + return this->getColorImage().cols; + + if (!this->getDepthImage().empty()) + return this->getDepthImage().cols; + + return -1; +} + +int Sample::getSampleHeight() const { + if (this->height != -1) + return this->height; + + if (!this->getColorImage().empty()) + return this->getColorImage().rows; + + if (!this->getDepthImage().empty()) + return this->getDepthImage().rows; + + return -1; +} + +RectRegionsPtr Sample::getRectRegions() const{ + return this->rectRegions; +} + +ContourRegionsPtr Sample::getContourRegions() const{ + return this->contourRegions; +} + +RleRegionsPtr Sample::getRleRegions() const{ + return this->rleRegions; +} + +std::string Sample::getColorImagePath() const{ + if (this->colorImagePath.empty()) + throw std::invalid_argument("Color Image Path not set in this Sample"); + + return this->colorImagePath; +} + +std::string Sample::getDepthImagePath() const{ + if (this->depthImagePath.empty()) + throw std::invalid_argument("Depth Image Path not set in this Sample"); + + return this->depthImagePath; +} + +cv::Mat Sample::getColorImage() const{ + if (this->colorImage.empty()) { + cv::Mat image = cv::imread(this->colorImagePath); + return image; + } + else + return this->colorImage.clone(); +} + +cv::Mat Sample::getDepthImage() const{ + if (this->depthImage.empty()) { + cv::Mat image = cv::imread(this->depthImagePath); + return image; + } + else + return this->depthImage.clone(); +} + +// Constructor +Sample::Sample(const std::string &path, const std::string &id,bool loadDepth) { + this->colorImagePath=path + "/" + id + ".png"; + + if (boost::filesystem::exists(boost::filesystem::path(path + "/" + id + ".json"))) + this->rectRegions=RectRegionsPtr(new RectRegions(path + "/" + id + ".json")); + else{ + LOG(ERROR) << "Error " + id + " sample has not associated detection"; + } + + if (boost::filesystem::exists(boost::filesystem::path(path + "/" + id + "-region.json"))) + this->contourRegions=ContourRegionsPtr(new ContourRegions(path + "/" + id + "-region.json")); + + if (loadDepth) { + this->depthImagePath=path + "/" + id + "-depth.png"; + } +} + +cv::Mat Sample::getSampledColorImage() const{ + cv::Mat image = this->getColorImage(); + if (this->rectRegions) + this->rectRegions->drawRegions(image); + if (this->contourRegions) + this->contourRegions->drawRegions(image); + if (this->rleRegions) + this->rleRegions->drawRegions(image); + return image; +} + +cv::Mat Sample::getSampledDepthImage() const{ + cv::Mat image =this->getDepthImage(); + if (this->rectRegions) + this->rectRegions->drawRegions(image); + if (this->contourRegions) + this->contourRegions->drawRegions(image); + return image; +} + +void Sample::save(const std::string &outPath, int id) { + std::stringstream ss ; + ss << std::setfill('0') << std::setw(5) << id; + this->save(outPath,ss.str()); +} + +void Sample::save(const std::string &outPath, const std::string &filename) { + + if (this->colorImage.empty()){ + if (!this->colorImagePath.empty()) + if (boost::filesystem::exists(boost::filesystem::path(this->colorImagePath))) { + cv::Mat image = cv::imread(this->colorImagePath); + cv::imwrite(outPath + "/" + filename + ".png", image); + } + } + else + cv::imwrite(outPath + "/" + filename + ".png",this->colorImage); + + if (this->depthImage.empty()){ + if (boost::filesystem::exists(boost::filesystem::path(this->depthImagePath))) { + cv::Mat image = cv::imread(this->depthImagePath); + cv::imwrite(outPath + "/" + filename + "-depth.png", image); + } + } + else + cv::imwrite(outPath + "/" + filename + "-depth.png", depthImage); + + bool ifRegions = false; + if(!rectRegions->getRegions().empty()) { + rectRegions->saveJson(outPath + "/" + filename + ".json"); + ifRegions = true; + } + if (!contourRegions->getRegions().empty()) { + contourRegions->saveJson(outPath + "/" + filename + "-region.json"); + ifRegions = true; + } + + if (!ifRegions) + LOG(WARNING) << "Both ContourRegions and Rect Regions are not present, hence not saving any regions for Sample: " << this->sampleID; +} + +void Sample::save(const std::string &outPath) { + if (this->sampleID.size() != 0 ){ + this->save(outPath,this->sampleID); + } + else{ + LOG(ERROR) << "No sample id is defined, this sample will not be saved"; + } + +} + +// Print the detections +void Sample::print() { + LOG(INFO) << "Printing Regions with Classes" << '\n'; + std::vector regionsToPrint = this->rectRegions->getRegions(); + for (auto it = regionsToPrint.begin(); it != regionsToPrint.end(); it++) { + LOG(INFO) << "Class: " << it->classID << '\n'; + LOG(INFO) << "Confidence: " << it->confidence_score << '\n'; + LOG(INFO) << "uniqObjectID" << it->uniqObjectID <<'\n'; + LOG(INFO) << "BBOX" << it->region.x << it->region.y << it->region.width << it->region.height << '\n'; + } +} + + +// To get a positive number +int mod(int test){ + if(test<0) + return -test; + return test; +} + +// Adds detections to the frame +void Sample::AddDetection(cv::Rect &detection,std::vector *classNames){ + RectRegion temp; + temp.region.x = detection.x; + temp.region.y = detection.y; + temp.region.width = detection.width; + temp.region.height = detection.height; + // To get the class names and probability from the user. + AddClass *a = new AddClass(); + a->SetInit(classNames,&temp.classID,&temp.confidence_score); + a->show(); + a->wait(); + if(temp.classID.length()) + this->rectRegions->regions.push_back(temp); +} + +// Adjust the bounding boxes , and if successfully changed any boundary return true +// else false. +bool Sample::AdjustBox(int x, int y){ + // x and y are current mouse pointer positions + // Find the corner which is nearer to the mouse pointer + for (auto it = this->rectRegions->regions.begin(); it != this->rectRegions->regions.end(); it++) { + if(mod(it->region.x-x)<20 && mod(it->region.y-y)<20){ + it->region.width -= (x-it->region.x); + it->region.height -= (y-it->region.y); + it->region.x=x; + it->region.y=y; + return true; + } + else if(mod(it->region.x+it->region.width-x)<20 && mod(it->region.y-y)<20){ + it->region.width += (x-(it->region.x+it->region.width)); + it->region.height -= (y-it->region.y); + it->region.y=y; + return true; + } + else if(mod(it->region.x-x)<20 && mod(it->region.y+it->region.height-y)<20){ + it->region.width -= (x-it->region.x); + it->region.height += (y-(it->region.y+it->region.height)); + it->region.x=x; + return true; + } + else if(mod(it->region.x+it->region.width-x)<20 && mod(it->region.y+it->region.height-y)<20){ + it->region.width += (x-it->region.x-it->region.width); + it->region.height += (y-it->region.y-it->region.height); + return true; + } + } + return false; +} + +bool Sample::show(const std::string readerImplementation, const std::string windowName, const int waitKey, const bool showDepth) { + cv::Mat image = this->getSampledColorImage(); + cv::imshow(windowName, image); + + if (showDepth) { + + if (!(this->isDepthImageValid())) { + LOG(WARNING)<< "Depth Images not available! Please verify your dataset or uncheck 'Show Depth Images'"; + return false; + } + + cv::Mat depth_color; + + if (readerImplementation == "spinello") + depth_color = this->getSampledDepthColorMapImage(-0.9345, 1013.17); + else + depth_color = this->getSampledDepthColorMapImage(); + cv::imshow("Depth Color Map", depth_color); + } + + int key = cv::waitKey(waitKey); + if (char(key) == 'q' || key == 27) { + cv::destroyWindow(windowName); + return false; + } + + return true; + +} + +bool Sample::isDepthImageValid() { + return !this->depthImage.empty(); +} + +bool Sample::isValid() { + return !this->colorImage.empty(); +} + +void Sample::filterSamplesByID(std::vector filteredIDS) { + if (this->rectRegions) + this->rectRegions->filterSamplesByID(filteredIDS); + if (contourRegions) + this->contourRegions->filterSamplesByID(filteredIDS); +} + +void Sample::setColorImagePath(const std::string &imagePath) { + this->colorImagePath=imagePath; +} + +void Sample::setDepthImage(const std::string &imagePath) { + this->depthImagePath=imagePath; +} + +void Sample::setSampleID(const std::string &sampleID) { + this->sampleID=sampleID; +} + +std::string Sample::getSampleID() { + return this->sampleID; +} + +Sample::~Sample() { + if (!this->colorImage.empty()){ + this->colorImage.release(); + } + if (this->depthImage.empty()){ + this->depthImage.release(); + } + +} + +cv::Mat Sample::getDeptImageGrayRGB() const { + cv::Mat image = this->getDepthImage(); + std::vector imageVector; + cv::split(image,imageVector); + + std::vector grayRGB_vector; + grayRGB_vector.push_back(imageVector[0]); + grayRGB_vector.push_back(imageVector[0]); + grayRGB_vector.push_back(imageVector[0]); + + cv::Mat grayRGB; + cv::merge(grayRGB_vector,grayRGB); + return grayRGB; + +} + +cv::Mat Sample::getDepthColorMapImage(double alpha, double beta) const { + cv::Mat image = getDepthImage(); + double minVal, maxVal; + + minMaxLoc( image, &minVal, &maxVal ); + + cv::Mat mask; + cv::threshold(image, mask, maxVal - 1, 255, cv::THRESH_BINARY_INV); + mask.convertTo(mask, CV_8UC1); + + image.convertTo(image, CV_8UC1, alpha, beta); + + cv::Mat colorMappedDepth; + cv::applyColorMap(image, image, cv::COLORMAP_RAINBOW); + image.copyTo(colorMappedDepth, mask); + + return colorMappedDepth;; +} + +cv::Mat Sample::getSampledDepthColorMapImage(double alpha, double beta) const { + cv::Mat image = getDepthColorMapImage(alpha, beta); + if (this->rectRegions) + this->rectRegions->drawRegions(image); + if (this->contourRegions) + this->contourRegions->drawRegions(image); + return image; +} + + +// Set if mouse is clicked +void Sample::SetMousy(bool mousy){ + this->mousy = mousy; +} + +// Set the current state of mouse +bool Sample::GetMousy(){ + return this->mousy; +} + +// This function is used to change the classes of wrongly classified detections +void Sample::SetClassy(int x , int y, std::vector *classNames){ + // Check if the user clicked inside certain boundaries + for (auto it = this->rectRegions->regions.begin(); it != this->rectRegions->regions.end(); it++) + if(it->nameRect.xnameRect.x+it->nameRect.width>x) + if(it->nameRect.ynameRect.y+it->nameRect.height>y){ + LOG(INFO) << "I'm inside rectName" << std::endl; + LOG(INFO) << "ClassId : " << it->classID <SetInit(&it->classID,classNames,&it->classID); + w->show(); + w->wait(); + break; + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/Common/Sample.h b/DetectionMetrics/DetectionMetricsLib/Common/Sample.h new file mode 100644 index 00000000..ecd0743e --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Common/Sample.h @@ -0,0 +1,88 @@ +// +// Created by frivas on 22/01/17. +// + +// This is the main variable which is shared across different components while +// running DetectionMetrics. + +#ifndef SAMPLERGENERATOR_SAMPLE_H +#define SAMPLERGENERATOR_SAMPLE_H + +#include +#include +#include +#include +#include +#include + + + +struct Sample { + Sample(const std::string& path, const std::string& id, bool loadDepth=true); + Sample(); + Sample(const cv::Mat& colorImage); + Sample(const cv::Mat& colorImage, const RectRegionsPtr& rectRegions); + Sample(const cv::Mat& colorImage, const ContourRegionsPtr& contourRegions); + Sample(const cv::Mat& colorImage, const RectRegionsPtr& rectRegions, const ContourRegionsPtr& contourRegions); + Sample(const cv::Mat& colorImage, const cv::Mat& depthImage, const RectRegionsPtr& rectRegions); + Sample(const cv::Mat& colorImage, const cv::Mat& depthImage, const ContourRegionsPtr& contourRegions); + Sample(const cv::Mat& colorImage, const cv::Mat& depthImage, const RectRegionsPtr& rectRegions, const ContourRegionsPtr& contourRegions); + ~Sample(); + void setSampleDims(const int width, const int height); + void setColorImagePath(const std::string& imagePath); + void setColorImage(const cv::Mat& image); + void setDepthImage(const cv::Mat& image); + void setDepthImage(const std::string& imagePath); + void setRectRegions(const RectRegionsPtr& regions); + void setContourRegions(const ContourRegionsPtr& regions); + void setRleRegions(const RleRegionsPtr& regions); + void setSampleID(const std::string& sampleID); + void clearColorImage(); // For better memory management + void clearDepthImage(); // For better memeory management + + int getSampleWidth()const; + int getSampleHeight()const; + RectRegionsPtr getRectRegions()const; + ContourRegionsPtr getContourRegions()const; + RleRegionsPtr getRleRegions()const; + std::string getColorImagePath() const; + std::string getDepthImagePath() const; + cv::Mat getColorImage() const; + cv::Mat getDepthImage() const; + cv::Mat getDeptImageGrayRGB() const; + cv::Mat getSampledColorImage() const; + cv::Mat getSampledDepthImage() const; + cv::Mat getSampledDepthColorMapImage(double alpha = 1 , double beta = 0) const; + cv::Mat getDepthColorMapImage(double alpha = 1 , double beta = 0) const; + void save(const std::string& outPath, int id); + void save(const std::string& outPath, const std::string& filename); + void save(const std::string& outPath); + void print(); + bool show(const std::string readerImplementation, const std::string windowName, const int waitKey, const bool showDepth); + static void CallBackFunc(int event, int x, int y, int flags, void* userdat); + + bool isDepthImageValid(); + bool isValid(); + void filterSamplesByID(std::vector filteredIDS); + std::string getSampleID(); + void SetMousy(bool mousy); + bool GetMousy(); + bool AdjustBox(int x,int y); + void SetClassy(int x, int y,std::vector *classNames); + void AddDetection(cv::Rect &detection,std::vector *classNames); +private: + RectRegionsPtr rectRegions; + ContourRegionsPtr contourRegions; + RleRegionsPtr rleRegions; + cv::Mat colorImage; + std::string colorImagePath; + cv::Mat depthImage; + std::string depthImagePath; + std::string sampleID; + int width = -1; + int height = -1; + bool mousy; +}; + + +#endif //SAMPLERGENERATOR_SAMPLE_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/CMakeLists.txt new file mode 100644 index 00000000..44d4e293 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/CMakeLists.txt @@ -0,0 +1,48 @@ +SET (DatasetConverters_SOURCE_FILES + DatasetConverter + readers/DatasetReader + readers/OwnDatasetReader + writers/YoloDatasetWriter + readers/YoloDatasetReader + writers/COCODatasetWriter + readers/COCODatasetReader + readers/PascalVOCDatasetReader + readers/PrincetonDatasetReader + writers/PascalVOCDatasetWriter + readers/ImageNetDatasetReader + readers/SpinelloDatasetReader + readers/OpenImagesDatasetReader + writers/OpenImagesDatasetWriter + ClassTypeGeneric + ClassTypeOwn + ClassTypeMapper + ClassType + Tree + readers/GenericDatasetReader + writers/DatasetWriter + writers/OwnDatasetWriter + writers/GenericDatasetWriter + liveReaders/GenericLiveReader + liveReaders/JderobotReader + liveReaders/RecorderReader + liveReaders/CameraReader + liveReaders/VideoReader + liveReaders/VideoReader +) + +ADD_LIBRARY(DetectionMetrics_DatasetConverters OBJECT ${DatasetConverters_SOURCE_FILES}) + +TARGET_INCLUDE_DIRECTORIES( DetectionMetrics_DatasetConverters PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${comm_INCLUDE_DIRS} + ${ros_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${jderobottypes_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} +) diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.cpp new file mode 100644 index 00000000..2410ddc4 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.cpp @@ -0,0 +1,56 @@ +// +// Created by frivas on 2/02/17. +// + +#include "ClassType.h" + +float colorsClass[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} }; + + +cv::Scalar ClassType::getColor() { + + auto itFind = std::find(this->classes.begin(), this->classes.end(),this->classID); + int id = std::distance(this->classes.begin(),itFind); + int nClasses= this->classes.size(); + + int offset = id*123457 % nClasses; + float red = _get_color(2,offset,nClasses); + float green = _get_color(1,offset,nClasses); + float blue = _get_color(0,offset,nClasses); + return cv::Scalar(red*255, green*255,blue*255); +} + +float ClassType::_get_color(int c, int x, int max) { + + float ratio = ((float)x/max)*5; + int i = floor(ratio); + int j = ceil(ratio); + ratio -= i; + float r = (1-ratio) * colorsClass[i][c] + ratio*colorsClass[j][c]; + //printf("%f\n", r); + return r; +} + +//ClassType::ClassType(int id) { +// fillStringClassesVector(); +// this->classID=this->classes[id]; +//} +// +//ClassType::ClassType(const std::string &classID) { +// fillStringClassesVector(); +// this->classID=classID; +//} + +std::string ClassType::getClassString() { + return this->classID; +} + +int ClassType::getClassID() { + auto itFind = std::find(this->classes.begin(), this->classes.end(),this->classID); + int id = std::distance(this->classes.begin(),itFind); + return id; +} + +std::vector ClassType::getAllAvailableClasses() { + return this->classes; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.h new file mode 100644 index 00000000..1a6891ad --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassType.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 2/02/17. +// + +#ifndef SAMPLERGENERATOR_CLASSTYPE_H +#define SAMPLERGENERATOR_CLASSTYPE_H + +#include + +struct ClassType { + + cv::Scalar getColor(); + std::string getClassString(); + int getClassID(); + std::vector getAllAvailableClasses(); +protected: + float _get_color(int c, int x, int max); + std::vector classes; + std::string classID; + + +}; + + +#endif //SAMPLERGENERATOR_CLASSTYPE_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.cpp new file mode 100644 index 00000000..5fff1170 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.cpp @@ -0,0 +1,64 @@ +// +// Created by frivas on 9/02/17. +// + +#include +#include "ClassTypeGeneric.h" + +/* + Constructor function when id is not given. + Only a class vector containing the classes + in the file is created. +*/ +ClassTypeGeneric::ClassTypeGeneric(const std::string &classesFile) { + // Create a vector of strings with classes in it. + fillStringClassesVector(classesFile); +} + +/* + Constructor function when id is given. + Not only a class vector is created but + also the classID is initialized. +*/ +ClassTypeGeneric::ClassTypeGeneric(const std::string &classesFile, int id) { + // Create a vector of strings with classes in it. + fillStringClassesVector(classesFile); + this->classID=this->classes[id]; +} + + +/* + Loop over all the classes available in the classesFile and + store them in vector of strings, named "classes". +*/ +void ClassTypeGeneric::fillStringClassesVector(const std::string &classesFile) { + std::ifstream labelFile(classesFile); + std::string data; + while(getline(labelFile,data)) { + this->classes.push_back(data); + } +} + +// Update/Set the classID +void ClassTypeGeneric::setId(int id) { + this->classID=this->classes[id]; +} + +void ClassTypeGeneric::setStringId(std::string id) { + bool found = false; + int i = 0; + while (i < this->classes.size() && !found) { + std::string className = this->classes[i].substr(0, this->classes[i].find(",")); + if (className == id) { + found = true; + this->classID = this->classes[i].substr(this->classes[i].find(",") + 1, this->classes[i].size()-1); + } + i++; + } +} + +// Print all the classes +void ClassTypeGeneric::Print(){ + for(auto itr=this->classes.begin();itr!=this->classes.end();itr++) + std::cout << *itr << std::endl; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.h new file mode 100644 index 00000000..e9dc4712 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeGeneric.h @@ -0,0 +1,21 @@ +// +// Created by frivas on 9/02/17. +// + +#ifndef SAMPLERGENERATOR_CLASSTYPEGENERIC_H +#define SAMPLERGENERATOR_CLASSTYPEGENERIC_H + + +#include "ClassType.h" + +struct ClassTypeGeneric: public ClassType{ + ClassTypeGeneric(const std::string& classesFile); + ClassTypeGeneric(const std::string& classesFile, int id); + void setId(int id); + void setStringId(std::string id); + void fillStringClassesVector(const std::string& classesFile); + void Print(); +}; + + +#endif //SAMPLERGENERATOR_CLASSTYPEGENERIC_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.cpp new file mode 100644 index 00000000..668169e0 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.cpp @@ -0,0 +1,88 @@ +#include "ClassTypeMapper.h" + +ClassTypeMapper::ClassTypeMapper(const std::string& classNamesFile) { + + this->root = Tree("../ClassMappingHierarchy.xml"); // Initializing tree for mapping synonmys + fillStringClassesVector(classNamesFile); + +} + +ClassTypeMapper::ClassTypeMapper() { + +} + +void ClassTypeMapper::fillStringClassesVector(const std::string &classesFile) { + std::ifstream labelFile(classesFile); + std::string data; + while(getline(labelFile,data)) { + this->classes.push_back(data); + } +} + +bool ClassTypeMapper::mapString(std::string &className) { + std::vector::iterator it; + + it = find (this->classes.begin(), this->classes.end(), className); + + // For Open Images Dataset + int i = 0; + bool found = false; + while (i < this->classes.size() && !found) { + std::string splittedClass = this->classes[i].substr(this->classes[i].find(",") + 1, this->classes[i].size()-2); + std::transform(splittedClass.begin(), splittedClass.end(), splittedClass.begin(), [](unsigned char c){ return std::tolower(c); }); + splittedClass = splittedClass.substr(0, splittedClass.size()-1); + + if (splittedClass == className) { + found = true; + this->classID = this->classes[i]; + return true; + } + i++; + } + + if (it != this->classes.end()) { + this->classID = className; + return true; //Class Name already present in dataset file + } + + std::vector syns = this->root.getImmediateSynonmys(className); + std::vector::iterator itr; + + if (!syns.empty()) { + for (itr = syns.begin(); itr != syns.end(); itr++) { + it = find (this->classes.begin(), this->classes.end(), *itr); + if (it != this->classes.end()) { + this->classID = *itr; + return true; + } + } + } + + return false; +} + +std::unordered_map ClassTypeMapper::mapFile(std::string classNamesFile) { + + std::unordered_map classMap; + + std::ifstream myReadFile; + myReadFile.open(classNamesFile); + std::string output; + + if (myReadFile.is_open()) { + while (getline(myReadFile, output)) { + if (mapString(output)) + classMap.insert(std::pair(output, this->classID)); + else + classMap.insert(std::pair(output, "")); + //std::cout << output << '\n'; // Prints our STRING. + + } + + } + + myReadFile.close(); + + return classMap; + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.h new file mode 100644 index 00000000..7725742d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeMapper.h @@ -0,0 +1,22 @@ +#ifndef SAMPLERGENERATOR_CLASSTYPEMAPPER_H +#define SAMPLERGENERATOR_CLASSTYPEMAPPER_H + + +#include "ClassType.h" +#include "Tree.h" +#include +#include + +struct ClassTypeMapper: public ClassType{ + ClassTypeMapper(const std::string& classNamesFile); + ClassTypeMapper(); + void fillStringClassesVector(const std::string &classesFile); + bool mapString(std::string &className); + std::unordered_map mapFile(std::string classNamesFile); + +private: + Tree root; +}; + + +#endif //SAMPLERGENERATOR_CLASSTYPEMAPPER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.cpp new file mode 100644 index 00000000..0d731a2f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.cpp @@ -0,0 +1,103 @@ +// +// Created by frivas on 2/02/17. +// + +#include "ClassTypeOwn.h" + + + +ClassTypeOwn::ClassTypeOwn(const std::string& classID) { + fillStringClassesVector(); + this->classID = std::string(classID); +} + +ClassTypeOwn::ClassTypeOwn(int id){ + fillStringClassesVector(); + this->classID=this->classes[id]; +} + +void ClassTypeOwn::fillStringClassesVector() { + classes.push_back("aeroplane"); + classes.push_back("apple"); + classes.push_back("backpack"); + classes.push_back("banana"); + classes.push_back("baseball bat"); + classes.push_back("baseball glove"); + classes.push_back("bear"); + classes.push_back("bed"); + classes.push_back("bench"); + classes.push_back("bicycle"); + classes.push_back("bird"); + classes.push_back("boat"); + classes.push_back("book"); + classes.push_back("bottle"); + classes.push_back("bowl"); + classes.push_back("broccoli"); + classes.push_back("bus"); + classes.push_back("cake"); + classes.push_back("car"); + classes.push_back("carrot"); + classes.push_back("cat"); + classes.push_back("cell phone"); + classes.push_back("chair"); + classes.push_back("clock"); + classes.push_back("cow"); + classes.push_back("cup"); + classes.push_back("diningtable"); + classes.push_back("dog"); + classes.push_back("donut"); + classes.push_back("elephant"); + classes.push_back("fire hydrant"); + classes.push_back("fork"); + classes.push_back("frisbee"); + classes.push_back("giraffe"); + classes.push_back("hair drier"); + classes.push_back("handbag"); + classes.push_back("horse"); + classes.push_back("hot dog"); + classes.push_back("keyboard"); + classes.push_back("kite"); + classes.push_back("knife"); + classes.push_back("laptop"); + classes.push_back("microwave"); + classes.push_back("motorbike"); + classes.push_back("mouse"); + classes.push_back("orange"); + classes.push_back("oven"); + classes.push_back("parking meter"); + classes.push_back("person"); + classes.push_back("pizza"); + classes.push_back("pottedplant"); + classes.push_back("refrigerator"); + classes.push_back("remote"); + classes.push_back("sandwich"); + classes.push_back("scissors"); + classes.push_back("sheep"); + classes.push_back("sink"); + classes.push_back("skateboard"); + classes.push_back("skis"); + classes.push_back("snowboard"); + classes.push_back("sofa"); + classes.push_back("spoon"); + classes.push_back("sports ball"); + classes.push_back("stop sign"); + classes.push_back("suitcase"); + classes.push_back("surfboard"); + classes.push_back("teddy bear"); + classes.push_back("tennis racket"); + classes.push_back("tie"); + classes.push_back("toaster"); + classes.push_back("toilet"); + classes.push_back("toothbrush"); + classes.push_back("traffic light"); + classes.push_back("train"); + classes.push_back("truck"); + classes.push_back("tvmonitor"); + classes.push_back("umbrella"); + classes.push_back("vase"); + classes.push_back("wine glass"); + classes.push_back("zebra"); + classes.push_back("person-falling"); + classes.push_back("person-fall"); + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.h new file mode 100644 index 00000000..5ec25a17 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/ClassTypeOwn.h @@ -0,0 +1,19 @@ +// +// Created by frivas on 2/02/17. +// + +#ifndef SAMPLERGENERATOR_CLASSTYPEOWN_H +#define SAMPLERGENERATOR_CLASSTYPEOWN_H + + +#include "ClassType.h" + +struct ClassTypeOwn: public ClassType{ + ClassTypeOwn(int id); + ClassTypeOwn(const std::string& classID); + void fillStringClassesVector(); + +}; + + +#endif //SAMPLERGENERATOR_CLASSTYPEOWN_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.cpp new file mode 100644 index 00000000..0f9afaf8 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.cpp @@ -0,0 +1,5 @@ +// +// Created by frivas on 22/01/17. +// + +#include "DatasetConverter.h" diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.h new file mode 100644 index 00000000..8c927ba6 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/DatasetConverter.h @@ -0,0 +1,14 @@ +// +// Created by frivas on 22/01/17. +// + +#ifndef SAMPLERGENERATOR_DATASETCONVERTER_H +#define SAMPLERGENERATOR_DATASETCONVERTER_H + + +class DatasetConverter { + +}; + + +#endif //SAMPLERGENERATOR_DATASETCONVERTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.cpp new file mode 100644 index 00000000..f4b9512e --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.cpp @@ -0,0 +1,120 @@ +#include "Tree.h" +#include + +Tree::Tree(std::string filename) { + boost::property_tree::ptree tree; + + // Parse the XML into the property tree. + boost::property_tree::read_xml(filename, tree); + this->setClassName("root"); + BOOST_FOREACH(boost::property_tree::ptree::value_type &v, tree.get_child("mappings")) { + boost::property_tree::ptree subtree = v.second; + Tree* rootOfSubTree = new Tree(); + fillSubTree(subtree, rootOfSubTree); + rootOfSubTree->parent = this; + this->children.push_back(rootOfSubTree); + } +} + +Tree::Tree() { +} + +void Tree::fillSubTree(boost::property_tree::ptree tree, Tree* root) { + std::string name = tree.get("name"); + root->setClassName(name); + if( tree.count("children") != 0 ) { + BOOST_FOREACH(boost::property_tree::ptree::value_type &v, tree.get_child("children")) { + // The data function is used to access the data stored in a node. + boost::property_tree::ptree subtree = v.second; + Tree* rootOfSubTree = new Tree(); + fillSubTree(subtree, rootOfSubTree); + rootOfSubTree->parent = root; + root->children.push_back(rootOfSubTree); + } + } +} + +void Tree::insertChild(Tree* tree) { + if (tree == NULL) { + throw std::invalid_argument("Children Subtree Passed is NULL"); + } + tree->setParent(this); + this->children.push_back(tree); +} + +void Tree::setParent(Tree* tree) { + if (tree == NULL) { + throw std::invalid_argument("Parent Tree Passed is NULL"); + } + this->parent = tree; +} + +void Tree::printChildren() { + if (this->children.empty()) { + LOG(INFO) << "This is a leaf node and has no children" << '\n'; + return; + } + + std::vector::iterator it; + + for (it = this->children.begin(); it != this->children.end(); it++) { + (*it)->printClassName(); + } +} + +void Tree::printChildrenRecursive() { + if (this->children.empty()) { + //std::cout << "This is a leaf node and has no children" << '\n'; + return; + } + + std::vector::iterator it; + + for (it = this->children.begin(); it != this->children.end(); it++) { + (*it)->printClassName(); + (*it)->printChildrenRecursive(); + } +} + +Tree* Tree::searchTree(std::string className) { + Tree* result = NULL; + if (this->className == className) { + result = this; + return result; + } + std::vector::iterator it; + for (it = this->children.begin(); it != this->children.end(); it++ ) { + result = (*it)->searchTree(className); + if(result) { + return result; + } + } + return result; +} + +std::vector Tree::getImmediateSynonmys(std::string passedClassName) { + std::vector results; + Tree* classSubTree = searchTree(passedClassName); + if (classSubTree != NULL) { + //classSubTree->printClassName(); + Tree* parent = classSubTree->parent; + std::vector::iterator it; + if (parent->className != "root") { + for (it = parent->children.begin(); it != parent->children.end(); it++) { + if ((*it) != classSubTree) { // passed tree + results.push_back((*it)->className); + } + } + } + } + + return results; +} + +void Tree::printClassName() { + LOG(INFO) << "Class Name is: " << this->className << '\n'; +} + +void Tree::setClassName(std::string passedClassName) { + this->className = passedClassName; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.h new file mode 100644 index 00000000..6147c2e9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/Tree.h @@ -0,0 +1,29 @@ +#include +#include +#include +#include +#include +#include + +class Tree { + +public: + // Member Functions() + Tree(std::string filename); + Tree(); + void fillSubTree(boost::property_tree::ptree tree, Tree* root); + void printClassName(); + void printChildren(); + void printChildrenRecursive(); + void setClassName(std::string passedClassName); + void insertChild(Tree* child); + void setParent(Tree* parent); + Tree* searchTree(std::string className); + std::vector getImmediateSynonmys(std::string passedClassName); + +private: + Tree* parent; + std::string className; + std::vector children; + +}; diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.cpp new file mode 100644 index 00000000..5efa3189 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.cpp @@ -0,0 +1,57 @@ +#include "CameraReader.h" + +/* Constructor function which starts taking input from the specified webcam(deviceId) */ +CameraReader::CameraReader(const int deviceId):DatasetReader(true) { + LOG(INFO) << "Starting Capture from device with DeviceId: " << deviceId; + + // Start capturing (this is the standard way using OpenCV) + this->cap = new cv::VideoCapture(deviceId); + + // check if we succeeded + if(!cap->isOpened()) + throw std::invalid_argument( "Couldn't open Video file!" ); + + // Don't know why is this set to false ! Need help !! + init=false; +} + +// Destructor -> Stop capturing if the program ends/ or is stopped by the user +CameraReader::~CameraReader() { + LOG(INFO) << "Releasing Camera"; + this->cap->release(); +} + +// Store the information in "sample" which will be later processed. +bool CameraReader::getNextSample(Sample &sample) { + + cv::Mat image; + int count = 0; + try { + // Try capturing the video frame + while (!cap->read(image)) { + LOG(ERROR) << "Frame not valid " << std::endl; + // If we get an invalid frame for more than 5 times continously, we + // assume the video has ended. + if (count >= 5) { + LOG(INFO) << "Video Ended" << '\n'; + return false; + } // Video Ended + count++; + } + + // If we succeeded in capturing the image ,set the sampleID to the sample count + // which was started from the moment we initialized video capturing. + sample.setSampleID(std::to_string(++this->sample_count)); + + //And the image to the captured frame. + sample.setColorImage(image); + return true; + } + + // If something strange happens, log the exception detected. + catch (const std::exception &exc){ + LOG(ERROR) << "Exception Detected: " << exc.what(); + return false; + } + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.h new file mode 100644 index 00000000..6270153b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/CameraReader.h @@ -0,0 +1,34 @@ +#ifndef SAMPLERGENERATOR_CAMERAREADER_H +#define SAMPLERGENERATOR_CAMERAREADER_H + + +#include +#include + +class CameraReader: public DatasetReader { +public: + // Constructor + CameraReader(const int deviceId = -1); + + // Destructor + ~CameraReader(); + + //This sample address will be passed by some process like evaluator,detector,etc. + // Later on this "sample" will be processed. + bool getNextSample(Sample &sample); +private: + // Pointer which stores the address of the video being captured. + cv::VideoCapture* cap; + bool init; + + // Counter which will be initialized from the moment we start capturing video. + long long int sample_count = 0; + +}; + +typedef boost::shared_ptr CameraReaderPtr; + + + + +#endif //SAMPLERGENERATOR_CAMERAREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.cpp new file mode 100644 index 00000000..a896ebc9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.cpp @@ -0,0 +1,139 @@ +// +// Created by frivas on 4/02/17. +// + +#include +#include "GenericLiveReader.h" + + +GenericLiveReader::GenericLiveReader(const std::string &path, const std::string& classNamesFile, const std::string &readerImplementation, std::map* deployer_params_map, int cameraID) { + configureAvailablesImplementations(this->availableImplementations); + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), readerImplementation) != this->availableImplementations.end()){ + imp = getImplementation(readerImplementation); + switch (imp) { + case VIDEO: + this->videoReaderPtr = VideoReaderPtr( new VideoReader(path)); + break; + case CAMERA: + this->cameraReaderPtr = CameraReaderPtr( new CameraReader(cameraID)); + break; + case STREAM: + this->jderobotReaderPtr = JderobotReaderPtr( new JderobotReader(deployer_params_map, path)); + break; +// case SPINELLO: +// this->spinelloDatasetReaderPtr = SpinelloDatasetReaderPtr( new SpinelloDatasetReader(path,classNamesFile)); +// break; +// case OWN: +// this->ownDatasetReaderPtr = OwnDatasetReaderPtr( new OwnDatasetReader(path,classNamesFile)); +// break; + default: + LOG(WARNING)< &paths, const std::string& classNamesFile, + const std::string &readerImplementation) { + configureAvailablesImplementations(this->availableImplementations); + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), readerImplementation) != this->availableImplementations.end()){ + imp = getImplementation(readerImplementation); + switch (imp) { +// case YOLO: +// this->yoloDatasetReaderPtr = YoloDatasetReaderPtr( new YoloDatasetReader()); +// for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ +// int idx = std::distance(paths.begin(),it); +// std::stringstream ss; +// ss << idx << "_"; +// this->yoloDatasetReaderPtr->appendDataset(*it,ss.str()); +// } +// break; +// case SPINELLO: +// this->spinelloDatasetReaderPtr = SpinelloDatasetReaderPtr( new SpinelloDatasetReader()); +// for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ +// int idx = std::distance(paths.begin(),it); +// std::stringstream ss; +// ss << idx << "_"; +// this->spinelloDatasetReaderPtr->appendDataset(*it,ss.str()); +// } +// break; +// case OWN: +// this->ownDatasetReaderPtr = OwnDatasetReaderPtr( new OwnDatasetReader()); +// for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ +// int idx = std::distance(paths.begin(),it); +// std::stringstream ss; +// ss << idx << "_"; +// this->ownDatasetReaderPtr->appendDataset(*it,ss.str()); +// } +// break; + default: + LOG(WARNING)<< readerImplementation + " is not a valid reader implementation"; + break; + } + } + else{ + LOG(WARNING) << readerImplementation + " is not a valid reader implementation"; + } + + +} + + +void GenericLiveReader::configureAvailablesImplementations(std::vector& data) { + data.push_back("recorder"); +#if defined(JDERROS) || defined(ICE) + data.push_back("stream"); +#endif + data.push_back("video"); + data.push_back("camera"); +} + +LIVEREADER_IMPLEMENTATIONS GenericLiveReader::getImplementation(const std::string& readerImplementation) { +// if (readerImplementation.compare("yolo")==0){ +// return YOLO; +// } +// if (readerImplementation.compare("spinello")==0){ +// return SPINELLO; +// } +// if (readerImplementation.compare("own")==0){ +// return OWN; +// } + if (readerImplementation.compare("video")==0){ + return VIDEO; + } + if (readerImplementation.compare("camera")==0){ + return CAMERA; + } + if (readerImplementation.compare("stream")==0){ + return STREAM; + } +} + +DatasetReaderPtr GenericLiveReader::getReader() { + switch (imp) { +// case YOLO: +// return this->yoloDatasetReaderPtr; +// case SPINELLO: +// return this->spinelloDatasetReaderPtr; + case VIDEO: + return this->videoReaderPtr; + case CAMERA: + return this->cameraReaderPtr; + case STREAM: + return this->jderobotReaderPtr; + default: + LOG(WARNING)< GenericLiveReader::getAvailableImplementations() { + std::vector data; + + configureAvailablesImplementations(data); + return data; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.h new file mode 100644 index 00000000..9f2dacf8 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/GenericLiveReader.h @@ -0,0 +1,46 @@ +// +// Created by frivas on 4/02/17. +// + +#ifndef SAMPLERGENERATOR_LIVEREADER_H +#define SAMPLERGENERATOR_LIVEREADER_H + +#include +#include +#include +#include +#include + +enum LIVEREADER_IMPLEMENTATIONS{RECORDER, STREAM, CAMERA, VIDEO}; + +/* + A generic reader(one for all kind of) which has all kinds of reader + datatypes and implementations in it. +*/ +class GenericLiveReader { +public: + GenericLiveReader(const std::string& path, const std::string& classNamesFile, const std::string& readerImplementation, std::map* deployer_params_map = NULL, int cameraID = -1); + GenericLiveReader(const std::vector& paths,const std::string& classNamesFile, const std::string& readerImplementation); + + DatasetReaderPtr getReader(); + + static std::vector getAvailableImplementations(); + +private: + + // One datatype each, for different kinds of readers. + LIVEREADER_IMPLEMENTATIONS imp; + VideoReaderPtr videoReaderPtr; + CameraReaderPtr cameraReaderPtr; + JderobotReaderPtr jderobotReaderPtr; + + std::vector availableImplementations; + + static void configureAvailablesImplementations(std::vector& data); + LIVEREADER_IMPLEMENTATIONS getImplementation(const std::string& readerImplementation); +}; + + +typedef boost::shared_ptr GenericLiveReaderPtr; + +#endif //SAMPLERGENERATOR_GENERICDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.cpp new file mode 100644 index 00000000..213aaaa3 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.cpp @@ -0,0 +1,93 @@ +// +// Created by frivas on 24/02/17. +// + + +#include "JderobotReader.h" + +JderobotReader::JderobotReader(std::map* deployer_params_map, const std::string& path):DatasetReader(true) { + + Config::Properties cfg; + + if (deployer_params_map == NULL) { + LOG(WARNING) << "null" << '\n'; + int argc=2; + char* argv[2]; + argv[0] = (char*)std::string("myFakeApp").c_str(); + argv[1] = (char*)path.c_str(); + cfg = Config::load(argc, argv); + } else { + LOG(INFO) << "not null" << '\n'; + std::map::iterator iter; + YAML::Node rootNode; // starts out as null + YAML::Node nodeConfig; + + for (iter = deployer_params_map->begin(); iter != deployer_params_map->end(); iter++) { + LOG(INFO) << iter->first << " " << iter->second << '\n'; + nodeConfig[iter->first.c_str()] = iter->second.c_str(); + LOG(INFO) << "here" << '\n'; + } + + rootNode["Camera"] = nodeConfig; + + cfg = Config::Properties(rootNode); + + LOG(INFO) << "done" << '\n'; + + } + + + try{ + + this->jdrc = new Comm::Communicator(cfg); + + this->camera = Comm::getCameraClient(jdrc, "Camera"); + + + } catch (const std::exception& ex) { + LOG(ERROR) << ex.what(); + } + + + /* Ice::CommunicatorPtr ic; + + //todo hack + int argc=2; + char* argv[2]; + argv[0] = (char*)std::string("myFakeApp").c_str(); + argv[1] = (char*)IceConfigFile.c_str(); + + + ic = EasyIce::initialize(argc,argv); + Ice::ObjectPrx base = ic->propertyToProxy("Cameraview.Camera.Proxy"); + Ice::PropertiesPtr prop = ic->getProperties(); + + if (0==base) + throw "Could not create proxy"; + + + this->camera = jderobot::CameraClientPtr (new jderobot::cameraClient(ic,"Cameraview.Camera.")); + + if (! this->camera){ + throw "Invalid proxy"; + } + this->camera->start(); +*/ +} + +bool JderobotReader::getNextSample(Sample &sample) { + + + JdeRobotTypes::Image myImage = this->camera->getImage(); + cv::Mat image = myImage.data; + + + if (!image.empty()){ + sample.setSampleID(std::to_string(++this->sample_count)); + cv::cvtColor(image, image, cv::COLOR_RGB2BGR); + sample.setColorImage(image); + //sample.setDepthImage(image); + } + //LOG(INFO) << "Fetching" << '\n'; + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.h new file mode 100644 index 00000000..c56b8484 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/JderobotReader.h @@ -0,0 +1,31 @@ +// +// Created by frivas on 24/02/17. +// + +#ifndef SAMPLERGENERATOR_JDEROBOTREADER_H +#define SAMPLERGENERATOR_JDEROBOTREADER_H + + +#include +#include +#include +#include +#include +#include + +class JderobotReader: public DatasetReader { +public: + JderobotReader(std::map* deployer_params_map, const std::string& path); + + bool getNextSample(Sample &sample); +private: + Comm::Communicator* jdrc; + Comm::CameraClient* camera; + long long int sample_count = 0; + +}; + +typedef boost::shared_ptr JderobotReaderPtr; + + +#endif //SAMPLERGENERATOR_JDEROBOTREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.cpp new file mode 100644 index 00000000..426705d9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.cpp @@ -0,0 +1,103 @@ +// +// Created by frivas on 16/11/16. +// + +#include "RecorderReader.h" +#include +#include +#include +#include +#include +#include + + +RecorderReader::RecorderReader(const std::string &colorImagesPath, const std::string &depthImagesPath):DatasetReader(true), colorPath(colorImagesPath), depthPath(depthImagesPath) { + currentIndex=0; + syncedData=false; + getImagesByIndexes(depthPath,depthIndexes); + getImagesByIndexes(colorPath,colorIndexes); +} + + +RecorderReader::RecorderReader(const std::string &dataPath):DatasetReader(true), colorPath(dataPath), depthPath(dataPath) { + currentIndex=0; + syncedData=true; + getImagesByIndexes(dataPath,depthIndexes,"-depth"); + getImagesByIndexes(dataPath,colorIndexes,"-rgb"); +} + + +void RecorderReader::getImagesByIndexes(const std::string& path, std::vector& indexes,std::string sufix){ + indexes.clear(); + if(boost::filesystem::is_directory(path)) { + + + boost::filesystem::directory_iterator end_iter; + + for (boost::filesystem::directory_iterator dir_itr(path); + dir_itr != end_iter; dir_itr++) { + + if (boost::filesystem::is_regular_file(*dir_itr) && dir_itr->path().extension() == ".png") { + std::string onlyIndexFilename; + if (not sufix.empty()) { + std::string filename=dir_itr->path().stem().string(); + if ( ! boost::algorithm::ends_with(filename, sufix)){ + continue; + } + onlyIndexFilename=dir_itr->path().filename().stem().string(); + boost::erase_all(onlyIndexFilename,sufix); + } + else{ + onlyIndexFilename=dir_itr->path().filename().stem().string(); + } + LOG(INFO) << dir_itr->path().string() << std::endl; + LOG(INFO) << onlyIndexFilename << std::endl; + + indexes.push_back(std::stoi(onlyIndexFilename)); + } + } + } + if (indexes.empty()){ + DLOG(WARNING) << "No images found in input sample path"; + } + std::sort(indexes.begin(), indexes.end()); +} + + +std::string RecorderReader::getPathByIndex(const std::string& path, int id,std::string sufix){ + std::stringstream ss; + ss << id << sufix << ".png"; + std::string pathCompleted = PathHelper::concatPaths(path, ss.str()); + return pathCompleted; +} + + + +int RecorderReader::closest(std::vector const& vec, int value) { + auto const it = std::lower_bound(vec.begin(), vec.end(), value); + if (it == vec.end()) { return -1; } + + return *it; +} + +bool RecorderReader::getNextSample(Sample &sample) { + if (this->currentIndex < this->depthIndexes.size()){ + int indexValue = this->depthIndexes[currentIndex]; + LOG(INFO)<<"Time stamp: " + std::to_string(indexValue); + + + cv::Mat colorImage= cv::imread(getPathByIndex(this->colorPath,closest(colorIndexes,indexValue),this->syncedData?"-rgb":"")); +// if (!this->syncedData) + cv::cvtColor(colorImage,colorImage,cv::COLOR_RGB2BGR); + + sample.setColorImage(colorImage); + sample.setDepthImage(getPathByIndex(this->depthPath,indexValue,this->syncedData?"-depth":"")); + this->currentIndex++; + return true; + } + return false; +} + +int RecorderReader::getNumSamples() { + return (int)this->depthIndexes.size(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.h new file mode 100644 index 00000000..7ee4b0f6 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/RecorderReader.h @@ -0,0 +1,39 @@ +// Created by frivas on 16/11/16. +// + +#ifndef SAMPLERGENERATOR_RECORDERCONVERTER_H +#define SAMPLERGENERATOR_RECORDERCONVERTER_H + +#include +#include +#include +#include "DatasetConverters/readers/DatasetReader.h" + + +class RecorderReader: public DatasetReader { +public: + RecorderReader(const std::string& colorImagesPath, const std::string& depthImagesPath); + explicit RecorderReader(const std::string& dataPath); + bool getNextSample(Sample &sample) override; + int getNumSamples(); +// virtual bool getNextSample(Sample &sample); + + +private: + const std::string depthPath; + const std::string colorPath; + bool syncedData; + int currentIndex; + std::vector depthIndexes; + std::vector colorIndexes; + + void getImagesByIndexes(const std::string& path, std::vector& indexes, std::string sufix=""); + std::string getPathByIndex(const std::string& path,int id, std::string sufix=""); + int closest(std::vector const& vec, int value); + + + }; + + typedef boost::shared_ptr RecorderReaderPtr; + +#endif //SAMPLERGENERATOR_RECORDERCONVERTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.cpp new file mode 100644 index 00000000..339d34f8 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.cpp @@ -0,0 +1,70 @@ +// +// Created by frivas on 26/03/17. +// + +#include "VideoReader.h" + +/* + Constructor which takes the video path as input and starts + reading the video file if it can. +*/ +VideoReader::VideoReader(const std::string &videoPath):DatasetReader(true) { + // Start reading the video. + this->cap = new cv::VideoCapture(videoPath); + this->framesCount = this->cap->get(cv::CAP_PROP_FRAME_COUNT); + this->isVideo = true; + + // check if we succeeded + if(!cap->isOpened()) // check if we succeeded + throw std::invalid_argument( "Couldn't open Video file!" ); + + init=false; +} + +// Destructor -> Stop reading once the program ends/ or is stopped by the user +VideoReader::~VideoReader() { + LOG(INFO) << "Releasing Video File"; + this->cap->release(); + +} + +// Store the information in "sample" which will be later processed. +bool VideoReader::getNextSample(Sample &sample) { + + cv::Mat image; + int count = 0; + + // Try reading the frame from a video. + try { + while (!cap->read(image)) { + this->validFrame = false; + LOG(ERROR) << "Frame not valid " << std::endl; + // If we get an invalid frame for more than 5 times continously, we + // assume the video has ended. + if (count >= 5) { + LOG(INFO) << "Video Ended" << '\n'; + return false; + } // Video Ended + count++; + } + + // init=true; + this->validFrame = true; + + // If we succeeded in capturing the image ,set the sampleID to the sample count + // which was started from the moment we initialized video capturing. + sample.setSampleID(std::to_string(++this->sample_count)); + //And the image to the captured frame. + sample.setColorImage(image); + return true; + } + + // If something strange happens, log the exception detected. + catch (const std::exception &exc) + { + LOG(ERROR) << "Exception Detected: " << exc.what(); + return false; + } + + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.h new file mode 100644 index 00000000..e8d1859f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/liveReaders/VideoReader.h @@ -0,0 +1,38 @@ +// +// Created by frivas on 26/03/17. +// + +#ifndef SAMPLERGENERATOR_VIDEOREADER_H +#define SAMPLERGENERATOR_VIDEOREADER_H + + + +#include +#include + +class VideoReader: public DatasetReader { +public: + // Constructor which takes the videoPath as input. + VideoReader(const std::string& videoPath); + + // Destructor + ~VideoReader(); + + //This sample address will be passed by some process like evaluator,detector,etc. + // Later on this "sample" will be processed. + bool getNextSample(Sample &sample); +private: + // Pointer which stores the address of the video being read. + cv::VideoCapture* cap; + bool init; + + // Counter which will be initialized from the moment we start capturing video. + long long int sample_count = 0; +}; + +typedef boost::shared_ptr VideoReaderPtr; + + + + +#endif //SAMPLERGENERATOR_VIDEOREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.cpp new file mode 100644 index 00000000..d3bb4a4b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.cpp @@ -0,0 +1,308 @@ +#include +#include +#include +#include "COCODatasetReader.h" + +using namespace boost::filesystem; + +bool replaceme(std::string& str, const std::string& from, const std::string& to) { + size_t start_pos = str.find(from); + if(start_pos == std::string::npos) + return false; + str.replace(start_pos, from.length(), to); + return true; +} + +COCODatasetReader::COCODatasetReader(const std::string &path,const std::string& classNamesFile, bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + + +bool COCODatasetReader::find_img_directory(const path & dir_path, path & path_found, std::string& img_dirname) { + LOG(INFO) << dir_path.string() << '\n'; + directory_iterator end_itr; + int count = 0; + for ( directory_iterator itr( dir_path ); itr != end_itr; ++itr ) { + if (is_directory(itr->path())) { + if (itr->path().filename().string() == img_dirname) { + path_found = itr->path(); + return true; + } else { + if (find_img_directory( itr->path(), path_found , img_dirname) ) + return true; + } + } + } + return false; + +} + +bool COCODatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + LOG(INFO) << "Dataset Path: " << datasetPath << '\n'; //path to json Annotations file + std::ifstream inFile(datasetPath); + path boostDatasetPath(datasetPath); + ClassTypeGeneric typeConverter(this->classNamesFile); + std::stringstream ss; + + if (inFile) { + ss << inFile.rdbuf(); + inFile.close(); + } + else { + throw std::runtime_error("!! Unable to open json file"); + } + + rapidjson::Document doc; + if (doc.Parse<0>(ss.str().c_str()).HasParseError()) + throw std::invalid_argument(std::string("JSON Parse Error: ") + rapidjson::GetParseError_En(doc.GetParseError())); + + if( !doc.HasMember("annotations") ) + throw std::invalid_argument("Invalid Annotations file Passed"); + + const rapidjson::Value& a = doc["annotations"]; + + if(!a.IsArray()) + throw std::invalid_argument("Invalid Annotations file Passed, Images member isn't an array"); + + + std::string img_filename, img_dirname; + std::size_t filename_id_start, filename_ext; + + if (this->imagesRequired || doc.HasMember("images")) { + path img_dir; + std::string filename = boostDatasetPath.filename().string(); + size_t first = filename.find_last_of('_'); + size_t last = filename.find_last_of('.'); + img_dirname = filename.substr(first + 1, last - first - 1); + + if (find_img_directory(boostDatasetPath.parent_path().parent_path(), img_dir, img_dirname)) { + LOG(INFO) << "Image Directory Found: " << img_dir.string() << '\n'; + } else { + throw std::invalid_argument("Corresponding Image Directory can't be located, please place it in the same Directory as annotations if you wish to continue without reading images"); + + } + + if(!doc.HasMember("images")) + throw std::invalid_argument("Images Member not available, invalid annotations file passed"); + + + const rapidjson::Value& imgs = doc["images"]; + + if(!imgs.IsArray()) + throw std::invalid_argument("Invalid Annotations file Passed, Images member isn't an array"); + + + for (rapidjson::Value::ConstValueIterator itr = imgs.Begin(); itr != imgs.End(); ++itr) { + + unsigned long int id = (*itr)["id"].GetUint64(); + std::string filename = (*itr)["file_name"].GetString(); + + int category = (*itr)["category_id"].GetUint(); + + Sample imsample; + imsample.setSampleID(std::to_string(id)); + imsample.setColorImagePath(img_dir.string() + "/" + filename); + if ( itr->HasMember("width") && itr->HasMember("height") ) { + imsample.setSampleDims((*itr)["width"].GetInt(), (*itr)["height"].GetInt()); + } + + this->map_image_id[id] = imsample; + } + } + + int counter = 0; + bool hasBbox = true; + for (rapidjson::Value::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) { + unsigned long int image_id = (*itr)["image_id"].GetUint64(); + int category = (*itr)["category_id"].GetUint(); + hasBbox = (*itr).HasMember("bbox"); + double x, y, w, h; + if (hasBbox) { + x = (*itr)["bbox"][0].GetDouble(); + y = (*itr)["bbox"][1].GetDouble(); + w = (*itr)["bbox"][2].GetDouble(); + h = (*itr)["bbox"][3].GetDouble(); + + } + bool isCrowd = (*itr).HasMember("iscrowd") ? ( (*itr)["iscrowd"].GetInt() > 0 ? true : false) : false; + + if ( this->map_image_id.find(image_id) == this->map_image_id.end() ) { + std::string num_string = std::to_string(image_id); + std::string full_image_path; + Sample sample; + sample.setSampleID(num_string); + LOG(INFO) << "Loading Instance for Sample: " + num_string; + typeConverter.setId(category - 1); //since index starts from 0 and categories from 1 + + if (hasBbox) { + RectRegionsPtr rectRegions(new RectRegions()); + cv::Rect_ bounding = cv::Rect_(x , y , w , h); + + if ((*itr).HasMember("score")) { + //Adding Score + rectRegions->add(bounding,typeConverter.getClassString(),(*itr)["score"].GetDouble(), isCrowd); + } else { + rectRegions->add(bounding,typeConverter.getClassString(), isCrowd); + } + sample.setRectRegions(rectRegions); + } + + + if ((*itr).HasMember("segmentation")) + appendSegmentationRegion(*itr, sample, typeConverter, isCrowd); + this->map_image_id[image_id] = sample; + } else { + typeConverter.setId(category - 1); //since index starts from 0 and categories from 1 + Sample& sample = this->map_image_id[image_id]; + + if (hasBbox) { + cv::Rect_ bounding(x , y , w , h); + RectRegionsPtr rectRegions_old = sample.getRectRegions(); + + if ((*itr).HasMember("score")) { + //Adding Score + rectRegions_old->add(bounding,typeConverter.getClassString(),(*itr)["score"].GetDouble(), isCrowd); + } else { + rectRegions_old->add(bounding,typeConverter.getClassString(), isCrowd); + } + + sample.setRectRegions(rectRegions_old); + } + + if ((*itr).HasMember("segmentation")) + appendSegmentationRegion(*itr, sample, typeConverter, isCrowd); + + LOG(INFO) << "Loading Instance for Sample: " + sample.getSampleID(); + } + } + + this->samples.reserve(this->samples.size() + this->map_image_id.size()); + std::transform (this->map_image_id.begin(), this->map_image_id.end(),back_inserter(this->samples), [] (std::pair const & pair) { + return pair.second; + }); + + //printDatasetStats(); +} + + +void COCODatasetReader::appendSegmentationRegion(const rapidjson::Value& node, Sample& sample, ClassTypeGeneric typeConverter, const bool isCrowd) { + RLE region = getSegmentationRegion(node["segmentation"], sample.getSampleWidth(), sample.getSampleHeight()); + //std::cout << "RLE String: " << rleToString( ®ion ) << '\n'; + RleRegionsPtr rleRegions = sample.getRleRegions(); + std::string className = typeConverter.getClassString(); + if (node.HasMember("score")) { + rleRegions->add(region, className, node["score"].GetDouble(), isCrowd); + } else { + rleRegions->add(region, className, isCrowd); + } + sample.setRleRegions(rleRegions); +} + + +RLE COCODatasetReader::getSegmentationRegion(const rapidjson::Value& seg, int im_width, int im_height) { + if (seg.IsArray()) { + if (!seg.Empty()) { + if (seg[0].IsArray()) { // Multiple Arrays + return fromSegmentationList(seg, im_width, im_height, (int)seg.Size()); + } else if (seg[0].IsObject()) { // list of objects, size is available no need to store + return fromSegmentationObject(seg, seg.Size()); + } else if (seg[0].IsDouble() || seg[0].IsInt()) { + return fromSegmentationList(seg, im_width, im_height, 0); + } + } + } else if (seg.IsObject()) { + return fromSegmentationObject(seg, 0); + } else { + LOG(WARNING) << "Invalid segmentation Annotations, skipping"; + } +} + +RLE COCODatasetReader::fromSegmentationObject(const rapidjson::Value& seg, int size) { + if (size == 0) { // single object + if (seg.HasMember("counts")) { + const rapidjson::Value& counts = seg["counts"]; + if (counts.IsArray()) { + return fromUncompressedRle(seg); + } else if (counts.IsString()) { + return fromRle(seg); + } else { + throw std::invalid_argument("Invalid Annotations File Passed\n Segmentation Member has an invalid counts member"); + } + } + } + + RLE* multipleRles; + rlesInit(&multipleRles, size); + for ( int i = 0; i < size; i++) { + if (seg[i].HasMember("counts")) { + const rapidjson::Value& counts = seg[i]["counts"]; + if (counts.IsArray()) { + multipleRles[i] = fromUncompressedRle(seg[i]); + } else if (counts.IsString()) { + multipleRles[i] = fromRle(seg[i]); + } else { + throw std::invalid_argument("Invalid Annotations File Passed\n Segmentation Member has an invalid counts member"); + } + } + + } + RLE* resultingRle; + rlesInit(&resultingRle, 1); + rleMerge(multipleRles, resultingRle, size, 0); + + rlesFree(&multipleRles, size); + return *resultingRle; +} + +RLE COCODatasetReader::fromUncompressedRle(const rapidjson::Value& seg) { + RLE result; + const rapidjson::Value& arr = seg["counts"]; + uint* data = (uint*) malloc((int)(arr.Size()* sizeof(uint))); + unsigned long i; + for (i = 0; i < arr.Size(); i++) { + data[i] = (uint) arr[i].GetUint(); + } + + rleInit(&result, seg["size"][0].GetInt64(), seg["size"][1].GetInt64(), i, data ); + return result; +} + +RLE COCODatasetReader::fromSegmentationList(const rapidjson::Value& seg, int im_width, int im_height, int size) { + if (size == 0) { + RLE result; + double* arr = new double[seg.Size()]; + int i; + for (i = 0; i < seg.Size(); i++) { + arr[i] = seg[i].GetDouble(); + } + rleFrPoly( &result, arr, i/2 , im_height, im_width); + return result; + } else { + RLE* multipleRles; + rlesInit(&multipleRles, size); + for (int i = 0; i < size; i++) { + if (seg[i].IsArray()) { + double* arr = new double[(int)(seg[i].Size())]; + int j; + for (j = 0; j < (int)seg[i].Size(); j++) { + arr[j] = seg[i][j].GetDouble(); + } + rleFrPoly( multipleRles + i, arr, j/2 , im_height, im_width); + } else { + throw std::invalid_argument("Invalid Annotations File Passed\n Error Detected in segmentation Member, 2D array consists of a Scalar"); + } + } + + RLE* resultingRle; + rlesInit(&resultingRle, 1); + rleMerge(multipleRles, resultingRle, size, 0); + return *resultingRle; + } +} + +RLE COCODatasetReader::fromRle(const rapidjson::Value& seg) { + RLE result; + rleFrString( &result, (char*) seg["counts"].GetString(), seg["size"][0].GetUint() , seg["size"][1].GetUint() ); + return result; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.h new file mode 100644 index 00000000..e29c85e1 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/COCODatasetReader.h @@ -0,0 +1,34 @@ +#ifndef SAMPLERGENERATOR_COCODATASETREADER_H +#define SAMPLERGENERATOR_COCODATASETREADER_H + + +#include +#include +#include +#include +#include "rapidjson/error/en.h" +#include +#include +#include "DatasetConverters/ClassTypeGeneric.h" + +class COCODatasetReader: public DatasetReader { +public: + COCODatasetReader(const std::string& path,const std::string& classNamesFile, bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + bool find_img_directory(const boost::filesystem::path & dir_path, boost::filesystem::path & path_found, std::string& img_filename); + + void appendSegmentationRegion(const rapidjson::Value& node, Sample& sample, ClassTypeGeneric typeConverter, const bool isCrowd); + + RLE fromSegmentationList(const rapidjson::Value& seg, int im_width, int im_height, int size = 1); + RLE getSegmentationRegion(const rapidjson::Value& seg, int im_width, int im_height); + RLE fromSegmentationObject(const rapidjson::Value& seg, int size = 1); + RLE fromUncompressedRle(const rapidjson::Value& seg); + RLE fromRle(const rapidjson::Value& seg); +private: + std::map < unsigned long int, Sample > map_image_id; // map image id to sample, helps storage in a sorted way + +}; + +typedef boost::shared_ptr COCODatasetReaderPtr; + +#endif //SAMPLERGENERATOR_COCODATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.cpp new file mode 100644 index 00000000..9975d510 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.cpp @@ -0,0 +1,208 @@ +// +// Created by frivas on 22/01/17. +// + +#include +#include "DatasetReader.h" +#include "DatasetConverters/ClassTypeOwn.h" +#include +#include + +DatasetReader::DatasetReader(const bool imagesRequired):readerCounter(0),imagesRequired(imagesRequired),isVideo(false),validFrame(true){ +} + +DatasetReader::~DatasetReader() { +} + +void DatasetReader::filterSamplesByID(std::vector filteredIDS) { + std::vector old_samples(this->samples); + this->samples.clear(); + + for (auto it=old_samples.begin(), end=old_samples.end(); it != end; ++it){ + Sample& sample =*it; + sample.filterSamplesByID(filteredIDS); + this->samples.push_back(sample); +// if (sample.getContourRegions() && sample.getContourRegions()->empty() && sample.getRectRegions()->empty()){ +// +// } +// else{ +// } + } +} + +int DatasetReader::getNumberOfElements() { + return this->samples.size(); +} + +void DatasetReader::resetReaderCounter() { + this->readerCounter=0; + +} + +void DatasetReader::decrementReaderCounter(const int decrement_by) { + this->readerCounter -= decrement_by; +} + +void DatasetReader::incrementReaderCounter(const int increment_by) { + this->readerCounter += increment_by; +} + +bool DatasetReader::getNextSample(Sample &sample) { + LOG(INFO) << "readCounter: " << this->readerCounter << ", size: " << this->samples.size(); + if (this->readerCounter < this->samples.size()){ + sample=this->samples[this->readerCounter]; + this->readerCounter++; + return true; + } + else{ + return false; + } + +} + +bool DatasetReader::getNextSamples(std::vector &batchOfSamples, int size ) { + LOG(INFO) << "readCounter: " << this->readerCounter << ", size: " << this->samples.size(); + + int imagesLeft = this->samples.size() - this->readerCounter; + + if ( size > imagesLeft ) + size = imagesLeft; + + if (batchOfSamples.capacity() != size) + batchOfSamples.resize(size); + + + for (int i = 0; i < size; i++) { + batchOfSamples[i] = this->samples[this->readerCounter]; + this->readerCounter++; + } + +} + +bool DatasetReader::getSampleBySampleID(Sample** sample, const std::string& sampleID){ + for (auto it=this->samples.begin(), end= this->samples.end(); it != end; ++it){ + if (it->getSampleID().compare(sampleID)==0){ + *sample=&(*it); + return true; + } + } + return false; +} + +bool DatasetReader::getSampleBySampleID(Sample** sample, const long long int sampleID) { + for (auto it=this->samples.begin(), end= this->samples.end(); it != end; ++it){ + if ((long long int)std::stoi(it->getSampleID())==sampleID){ + *sample=&(*it); + return true; + } + } + return false; +} + +void DatasetReader::printDatasetStats() { + std::unordered_map classStats; + std::unordered_map::iterator map_it; + + for (auto it=samples.begin(), end=samples.end(); it != end; ++it){ + RectRegionsPtr regions = it->getRectRegions(); + if (regions) { + std::vector regionsVector = regions->getRegions(); + for (std::vector::iterator itRegion = regionsVector.begin(), endRegion = regionsVector.end(); + itRegion != endRegion; ++itRegion) { + std::string test = itRegion->classID; + + //ClassTypeOwn typeconv(test); + map_it = classStats.find(test); + if (map_it != classStats.end()) { + map_it->second++; + } else { + classStats.insert(std::make_pair(test, 1)); + } + } + } + } + + LOG(INFO) << "------------------------------------------" << std::endl; + LOG(INFO) << "------------------------------------------" << std::endl; + int totalSamples=0; + for (auto it = classStats.begin(), end = classStats.end(); it != end; ++it){ + LOG(INFO) << "["<< it->first << "]: " << it->second << std::endl; + totalSamples+=it->second; + } + LOG(INFO) << "------------------------------------------" << std::endl; + LOG(INFO) << "-- Total samples: " << totalSamples << std::endl; + LOG(INFO) << "-- Total images: " << this->getNumberOfElements() << std::endl; + LOG(INFO) << "------------------------------------------" << std::endl; + + +} + +std::string DatasetReader::getClassNamesFile() { + return this->classNamesFile; +} + +void DatasetReader::addSample(Sample sample) { + if (imagesRequired && (!sample.getColorImage().empty() && !sample.getDepthImage().empty())) { + throw std::invalid_argument("Dataset reader requires I¡images, and sample doesn't contain any!!\n" + "The class which has instantiated dataset reader requires it to contain images but the sample doesn't contain any!"); + } + + this->samples.push_back(sample); +} + +bool DatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + return false; +} + +void DatasetReader::overWriteClasses(const std::string &from, const std::string &to) { + for (auto it = samples.begin(), end= samples.end(); it != end; ++it){ + Sample& s= *it; + + if (s.getContourRegions()) { + for (auto it2 = s.getContourRegions()->regions.begin(), end2 = s.getContourRegions()->regions.end(); + it2 != end2; ++it2) { + ContourRegion &cr = *it2; + if (cr.classID.compare(from) == 0) { + cr.classID = to; + } + } + } + for (auto it2 = s.getRectRegions()->regions.begin(), end2 = s.getRectRegions()->regions.end(); it2 != end2; ++it2){ + RectRegion& r = *it2; + if (r.classID.compare(from)==0){ + r.classID=to; + } + } + + } +} + +bool DatasetReader::IsValidFrame(){ + return this->validFrame; +} + +bool DatasetReader::IsVideo(){ + return this->isVideo; +} + +long long int DatasetReader::TotalFrames(){ + return this->framesCount; +} + +void DatasetReader::SetClassNamesFile(std::string *names){ + this->classNamesFile = *names; + DatasetReader::SetClasses(this->classNamesFile); +} + +void DatasetReader::SetClasses(const std::string& classesFile){ + std::ifstream labelFile(classesFile); + std::string data; + this->classNames.clear(); + while(getline(labelFile,data)) { + this->classNames.push_back(data); + } +} + +std::vector* DatasetReader::getClassNames(){ + return &this->classNames; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.h new file mode 100644 index 00000000..1cb551f5 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/DatasetReader.h @@ -0,0 +1,54 @@ +// +// Created by frivas on 22/01/17. +// + +#ifndef SAMPLERGENERATOR_DATASETREADER_H +#define SAMPLERGENERATOR_DATASETREADER_H + +#include +#include +#include +#include +#include + +class DatasetReader { +public: + DatasetReader(const bool imagesRequired); + virtual bool getNextSample(Sample &sample); + virtual bool getNextSamples(std::vector &samples, int size ); + void filterSamplesByID(std::vector filteredIDS); + void overWriteClasses(const std::string& from, const std::string& to); + int getNumberOfElements(); + void resetReaderCounter(); + void decrementReaderCounter(const int decrement_by = 1); + void incrementReaderCounter(const int increment_by = 1); + bool getSampleBySampleID(Sample** sample, const std::string& sampleID); + bool getSampleBySampleID(Sample** sample, const long long int sampleID); + void printDatasetStats(); + virtual bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + void addSample(Sample sample); + std::string getClassNamesFile(); + void SetClassNamesFile(std::string *names); + void SetClasses(const std::string& classesFile); + std::vector* getClassNames(); + virtual ~DatasetReader(); + bool IsVideo(); + bool IsValidFrame(); + long long int TotalFrames(); +protected: + std::vector samples; + //std::string datasetPath; + int readerCounter; + std::string classNamesFile; + std::vector classNames; + bool imagesRequired; + bool isVideo; + bool validFrame; + long long int framesCount; + unsigned int skip_count = 10; //max Number of annotations that can be skipped if Corresponding images weren't found +}; + + +typedef boost::shared_ptr DatasetReaderPtr; + +#endif //SAMPLERGENERATOR_DATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.cpp new file mode 100644 index 00000000..792e3a82 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.cpp @@ -0,0 +1,172 @@ +// +// Created by frivas on 4/02/17. +// + +#include +#include "GenericDatasetReader.h" + + +GenericDatasetReader::GenericDatasetReader(const std::string &path, const std::string& classNamesFile, const std::string &readerImplementation, bool imagesRequired) { + configureAvailablesImplementations(this->availableImplementations); + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), readerImplementation) != this->availableImplementations.end()){ + imp = getImplementation(readerImplementation); + switch (imp) { + case OPENIMAGES: + this->openimagesDatasetReaderPtr = OpenImagesDatasetReaderPtr( new OpenImagesDatasetReader(path,classNamesFile,imagesRequired)); + break; + case IMAGENET: + this->imagenetDatasetReaderPtr = ImageNetDatasetReaderPtr( new ImageNetDatasetReader(path,classNamesFile,imagesRequired)); + break; + case COCO: + this->cocoDatasetReaderPtr = COCODatasetReaderPtr( new COCODatasetReader(path,classNamesFile,imagesRequired)); + break; + case PASCALVOC: + this->pascalvocDatasetReaderPtr = PascalVOCDatasetReaderPtr( new PascalVOCDatasetReader(path,classNamesFile,imagesRequired)); + break; + case YOLO_1: + this->yoloDatasetReaderPtr = YoloDatasetReaderPtr( new YoloDatasetReader(path,classNamesFile,imagesRequired)); + break; + case SPINELLO: + this->spinelloDatasetReaderPtr = SpinelloDatasetReaderPtr( new SpinelloDatasetReader(path,classNamesFile,imagesRequired)); + break; + case OWN: + this->ownDatasetReaderPtr = OwnDatasetReaderPtr( new OwnDatasetReader(path,classNamesFile,imagesRequired)); + break; + case PRINCETON: + this->princetonDatasetReaderPtr = PrincetonDatasetReaderPtr( new PrincetonDatasetReader(path,classNamesFile,imagesRequired)); + break; + default: + LOG(WARNING)< &paths, const std::string& classNamesFile, + const std::string &readerImplementation, bool imagesRequired) { + configureAvailablesImplementations(this->availableImplementations); + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), readerImplementation) != this->availableImplementations.end()){ + imp = getImplementation(readerImplementation); + switch (imp) { + case IMAGENET: + this->imagenetDatasetReaderPtr = ImageNetDatasetReaderPtr( new ImageNetDatasetReader(classNamesFile, imagesRequired)); + for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ + int idx = std::distance(paths.begin(),it); + std::stringstream ss; + ss << idx << "_"; + this->imagenetDatasetReaderPtr->appendDataset(*it,ss.str()); + } + break; + case YOLO_1: + this->yoloDatasetReaderPtr = YoloDatasetReaderPtr( new YoloDatasetReader(classNamesFile, imagesRequired)); + for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ + int idx = std::distance(paths.begin(),it); + std::stringstream ss; + ss << idx << "_"; + this->yoloDatasetReaderPtr->appendDataset(*it,ss.str()); + } + break; + case SPINELLO: + this->spinelloDatasetReaderPtr = SpinelloDatasetReaderPtr( new SpinelloDatasetReader(imagesRequired)); + for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ + int idx = std::distance(paths.begin(),it); + std::stringstream ss; + ss << idx << "_"; + this->spinelloDatasetReaderPtr->appendDataset(*it,ss.str()); + } + break; + case OWN: + this->ownDatasetReaderPtr = OwnDatasetReaderPtr( new OwnDatasetReader(imagesRequired)); + for (auto it =paths.begin(), end= paths.end(); it != end; ++it){ + int idx = std::distance(paths.begin(),it); + std::stringstream ss; + ss << idx << "_"; + this->ownDatasetReaderPtr->appendDataset(*it,ss.str()); + } + break; + default: + LOG(WARNING)<< readerImplementation + " is not a valid reader implementation"; + break; + } + } + else{ + LOG(WARNING) << readerImplementation + " is not a valid reader implementation"; + } + + +} + + +void GenericDatasetReader::configureAvailablesImplementations(std::vector& data) { + data.push_back("Open Images"); + data.push_back("ImageNet"); + data.push_back("COCO"); + data.push_back("Pascal VOC"); + data.push_back("YOLO"); + data.push_back("Spinello"); + data.push_back("Own"); + data.push_back("Princeton"); + +} + +READER_IMPLEMENTATIONS GenericDatasetReader::getImplementation(const std::string& readerImplementation) { + if (readerImplementation == "Open Images") { + return OPENIMAGES; + } + if (readerImplementation == "ImageNet"){ + return IMAGENET; + } + if (readerImplementation == "COCO"){ + return COCO; + } + if (readerImplementation == "Pascal VOC"){ + return PASCALVOC; + } + if (readerImplementation == "YOLO"){ + return YOLO_1; + } + if (readerImplementation == "Spinello"){ + return SPINELLO; + } + if (readerImplementation == "Own"){ + return OWN; + } + if (readerImplementation == "Princeton"){ + return PRINCETON; + } +} + +DatasetReaderPtr GenericDatasetReader::getReader() { + switch (imp) { + case OPENIMAGES: + return this->openimagesDatasetReaderPtr; + case IMAGENET: + return this->imagenetDatasetReaderPtr; + case COCO: + return this->cocoDatasetReaderPtr; + case PASCALVOC: + return this->pascalvocDatasetReaderPtr; + case YOLO_1: + return this->yoloDatasetReaderPtr; + case SPINELLO: + return this->spinelloDatasetReaderPtr; + case OWN: + return this->ownDatasetReaderPtr; + case PRINCETON: + return this->princetonDatasetReaderPtr; + default: + LOG(WARNING)< GenericDatasetReader::getAvailableImplementations() { + std::vector data; + + configureAvailablesImplementations(data); + return data; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.h new file mode 100644 index 00000000..9f9ee6de --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/GenericDatasetReader.h @@ -0,0 +1,54 @@ +// +// Created by frivas on 4/02/17. +// + +#ifndef SAMPLERGENERATOR_GENERICDATASETREADER_H +#define SAMPLERGENERATOR_GENERICDATASETREADER_H + +#include +#include "SpinelloDatasetReader.h" +#include "YoloDatasetReader.h" +#include "COCODatasetReader.h" +#include "PascalVOCDatasetReader.h" +#include "ImageNetDatasetReader.h" +#include "OpenImagesDatasetReader.h" +#include +#include "OwnDatasetReader.h" +#include "PrincetonDatasetReader.h" +#include "SamplesReader.h" + + +enum READER_IMPLEMENTATIONS{OWN, SPINELLO, PASCALVOC, COCO, IMAGENET, YOLO_1, PRINCETON, OPENIMAGES}; + + +class GenericDatasetReader { +public: + GenericDatasetReader(const std::string& path, const std::string& classNamesFile, const std::string& readerImplementation, bool imagesRequired); + GenericDatasetReader(const std::vector& paths,const std::string& classNamesFile, const std::string& readerImplementation, bool imagesRegquired); + + DatasetReaderPtr getReader(); + + static std::vector getAvailableImplementations(); + +private: + READER_IMPLEMENTATIONS imp; + OwnDatasetReaderPtr ownDatasetReaderPtr; + YoloDatasetReaderPtr yoloDatasetReaderPtr; + SpinelloDatasetReaderPtr spinelloDatasetReaderPtr; + PrincetonDatasetReaderPtr princetonDatasetReaderPtr; + PascalVOCDatasetReaderPtr pascalvocDatasetReaderPtr; + COCODatasetReaderPtr cocoDatasetReaderPtr; + ImageNetDatasetReaderPtr imagenetDatasetReaderPtr; + OpenImagesDatasetReaderPtr openimagesDatasetReaderPtr; + SamplesReaderPtr samplesReaderPtr; + + std::vector availableImplementations; + + static void configureAvailablesImplementations(std::vector& data); + READER_IMPLEMENTATIONS getImplementation(const std::string& readerImplementation); +}; + + +typedef boost::shared_ptr GenericDatasetReaderPtr; + +#endif //SAMPLERGENERATOR_GENERICDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.cpp new file mode 100644 index 00000000..aa69488f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.cpp @@ -0,0 +1,145 @@ +#include +#include +#include "ImageNetDatasetReader.h" +#include "DatasetConverters/ClassTypeGeneric.h" + +using namespace boost::filesystem; + + +bool ImageNetDatasetReader::find_img_directory( const path & ann_dir_path, path & path_found ) { + if ( !exists( ann_dir_path ) ) { + return false; + } + directory_iterator end_itr; + + + path parent_folder1 = ann_dir_path.parent_path(); + path parent_folder2 = parent_folder1.parent_path(); + + for ( directory_iterator itr( parent_folder2 ); itr != end_itr; ++itr ) { + if ( is_directory(itr->status()) ) { + + LOG(INFO) << itr->path().string() << '\n'; + if (itr->path().string() == parent_folder1.string()) { + LOG(WARNING) << "skipping" << itr->path().string() << '\n'; + continue; + } else if (itr->path().filename() == ann_dir_path.filename() ) { + if ( find_directory(itr->path(), ann_dir_path.filename().string(), path_found ) ) // find the deepest nested directory + return true; + + path_found = itr->path(); + return true; + } else { + if ( find_directory( itr->path(), ann_dir_path.filename().string(), path_found ) ) + return true; + } + } + } + return false; +} + +bool ImageNetDatasetReader::find_directory(const path & dir_path, const std::string & dir_name, path & path_found) { + + directory_iterator end_itr; + + + for ( directory_iterator itr( dir_path ); itr != end_itr; ++itr ) { + if ( is_directory(itr->status()) ) { + + if (itr->path().filename() == dir_name ) { + if ( find_directory(itr->path(), dir_name, path_found ) ) // find the deepest nested directory + return true; + + path_found = itr->path(); + return true; + } else { + if ( find_directory( itr->path(), dir_name, path_found ) ) + return true; + } + } + } + return false; +} + +ImageNetDatasetReader::ImageNetDatasetReader(const std::string &path,const std::string& classNamesFile, bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + +ImageNetDatasetReader::ImageNetDatasetReader(const std::string& classNamesFile, const bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; +} + +bool ImageNetDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + boost::filesystem::path boostDatasetPath(datasetPath); + + if (!boost::filesystem::is_directory(boostDatasetPath)) { + throw std::invalid_argument("Invalid File received for Imagenet Parser"); + } + + + path img_dir; + + if (imagesRequired) { + if (find_img_directory(boostDatasetPath, img_dir)) { + LOG(INFO) << img_dir.string() << '\n'; + LOG(INFO) << "Image Directory Found" << '\n'; + } else { + LOG(WARNING) << "Corresponding Image Directory, can't be located, Skipping" << '\n'; + } + + } + + + boost::filesystem::directory_iterator end_itr; + for (boost::filesystem::directory_iterator itr(boostDatasetPath); itr!=end_itr; ++itr) + { + if (!boost::filesystem::is_directory(*itr)){ + LOG(INFO) << itr->path().string() << '\n'; + boost::property_tree::ptree tree; + + boost::property_tree::read_xml(itr->path().string(), tree); + + std::string m_folder = tree.get("annotation.folder"); + std::string m_filename = tree.get("annotation.filename"); + std::string m_width = tree.get("annotation.size.width"); + std::string m_height = tree.get("annotation.size.height"); + + + + + Sample sample; + sample.setSampleID(m_filename); + + if (imagesRequired) { + std::string imgPath = img_dir.string() + "/" + m_filename + ".JPEG"; + sample.setColorImagePath(imgPath); + + } + + + RectRegionsPtr rectRegions(new RectRegions()); + + BOOST_FOREACH(boost::property_tree::ptree::value_type &v, tree.get_child("annotation")) { + // The data function is used to access the data stored in a node. + if (v.first == "object") { + std::string object_name = v.second.get("name"); + int xmin = v.second.get("bndbox.xmin"); + int xmax = v.second.get("bndbox.xmax"); + int ymin = v.second.get("bndbox.ymin"); + int ymax = v.second.get("bndbox.ymax"); + + cv::Rect bounding(xmin, ymin, xmax - xmin, ymax - ymin); + rectRegions->add(bounding,object_name); + + } + } + + sample.setRectRegions(rectRegions); + this->samples.push_back(sample); + + } + } + + printDatasetStats(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.h new file mode 100644 index 00000000..0c02122e --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/ImageNetDatasetReader.h @@ -0,0 +1,24 @@ +#ifndef SAMPLERGENERATOR_IMAGENETDATASETREADER_H +#define SAMPLERGENERATOR_IMAGENETDATASETREADER_H + + +#include +#include +#include +#include +#include +#include + +class ImageNetDatasetReader: public DatasetReader { +public: + ImageNetDatasetReader(const std::string& path,const std::string& classNamesFile, bool imagesRequired); + ImageNetDatasetReader(const std::string& classNamesFile, bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + bool find_img_directory( const boost::filesystem::path & ann_dir_path, boost::filesystem::path & path_found ); + bool find_directory(const boost::filesystem::path & dir_path, const std::string & dir_name, boost::filesystem::path & path_found); + +}; + +typedef boost::shared_ptr ImageNetDatasetReaderPtr; + +#endif //SAMPLERGENERATOR_IMAGENETDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.cpp new file mode 100644 index 00000000..e5e27dce --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.cpp @@ -0,0 +1,171 @@ +#include +#include +#include +#include "OpenImagesDatasetReader.h" +#include "DatasetConverters/ClassTypeGeneric.h" + +#include +#include +#include + + +using namespace boost::filesystem; + + + +OpenImagesDatasetReader::OpenImagesDatasetReader(const std::string &path,const std::string& classNamesFile, bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + + +bool OpenImagesDatasetReader::find_img_directory(const path & dir_path, path & path_found, std::string& img_dirname) { + directory_iterator end_itr; + int count = 0; + for (directory_iterator itr( dir_path ); itr != end_itr; ++itr) { + if (is_directory(itr->path())) { + if (itr->path().filename().string() == img_dirname) { + path_found = itr->path(); + return true; + } else { + if (find_img_directory(itr->path(), path_found , img_dirname)) + return true; + } + } + } + return false; +} + + +enum class CSVState { + UnquotedField, + QuotedField, + QuotedQuote +}; + +std::vector readCSVRow(const std::string &row) { + CSVState state = CSVState::UnquotedField; + std::vector fields {""}; + size_t i = 0; // index of the current field + for (char c : row) { + switch (state) { + case CSVState::UnquotedField: + switch (c) { + case ',': // end of field + fields.push_back(""); i++; + break; + case '"': state = CSVState::QuotedField; + break; + default: fields[i].push_back(c); + break; } + break; + case CSVState::QuotedField: + switch (c) { + case '"': state = CSVState::QuotedQuote; + break; + default: fields[i].push_back(c); + break; } + break; + case CSVState::QuotedQuote: + switch (c) { + case ',': // , after closing quote + fields.push_back(""); i++; + state = CSVState::UnquotedField; + break; + case '"': // "" -> " + fields[i].push_back('"'); + state = CSVState::QuotedField; + break; + default: // end of quote + state = CSVState::UnquotedField; + break; } + break; + } + } + return fields; +} + + +std::vector> readCSV(std::istream &in) { + std::vector> table; + std::string row; + // Uncomment to debug with less samples + //int max_counter = 50; + //int counter = 0; + while (!in.eof() /*and counter < max_counter*/) { + std::getline(in, row); + if (in.bad() || in.fail()) { + break; + } + auto fields = readCSVRow(row); + table.push_back(fields); + //counter++; + } + return table; +} + + +bool OpenImagesDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + std::string img_filename, img_dirname; + path img_dir; + path boostDatasetPath(datasetPath); + std::string filename = boostDatasetPath.filename().string(); + size_t first = filename.find_first_of('-'); + img_dirname = filename.substr(0, first); + + ClassTypeGeneric typeConverter(this->classNamesFile); + if (find_img_directory(boostDatasetPath.parent_path().parent_path(), img_dir, img_dirname)) { + LOG(INFO) << "Image Directory Found: " << img_dir.string() << '\n'; + } else { + throw std::invalid_argument("Corresponding Image Directory can't be located, please place it in the same Directory as annotations if you wish to continue without reading images"); + } + + std::ifstream file(datasetPath); + std::vector> table = readCSV(file); + std::string previousImageID = table[1][0]; + Sample imsample; + RectRegionsPtr rectRegions(new RectRegions()); + imsample.setSampleID(previousImageID); + imsample.setColorImagePath(img_dir.string() + "/" + previousImageID + ".jpg"); + + for (int i = 1; i < table.size(); i++) { + LOG(INFO) << "Loading Instance for Sample: " + previousImageID; + if (previousImageID != table[i][0]) { + // Create the sample with all the stored bounding boxes and start the list again + + // Create complete new Sample + imsample.setRectRegions(rectRegions); + this->map_image_id[previousImageID] = imsample; + + // Restart variables + rectRegions.reset(new RectRegions()); + imsample.setSampleID(table[i][0]); + imsample.setColorImagePath(img_dir.string() + "/" + table[i][0] + ".jpg"); + previousImageID = table[i][0]; + } else { + // Save the bounding box in a list to then create the Sample + + cv::Mat src = cv::imread(imsample.getColorImagePath()); + int imgWidth = src.size().width; + int imgHeight = src.size().height; + + double x, y, w, h; + + x = atof(table[i][4].c_str()) * imgWidth; + y = atof(table[i][6].c_str()) * imgHeight; + w = (atof(table[i][5].c_str()) - atof(table[i][4].c_str())) * imgWidth; + h = (atof(table[i][7].c_str()) - atof(table[i][6].c_str())) * imgHeight; + cv::Rect_ bounding = cv::Rect_(x , y , w , h); + + typeConverter.setStringId(table[i][2]); + rectRegions->add(bounding, typeConverter.getClassString(), atof(table[i][3].c_str())); + } + } + + this->samples.reserve(this->samples.size() + this->map_image_id.size()); + std::transform (this->map_image_id.begin(), this->map_image_id.end(),back_inserter(this->samples), [] (std::pair const & pair) + { + return pair.second; + }); +} + diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.h new file mode 100644 index 00000000..03dca805 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OpenImagesDatasetReader.h @@ -0,0 +1,28 @@ +#ifndef SAMPLERGENERATOR_OPENIMAGESDATASETREADER_H +#define SAMPLERGENERATOR_OPENIMAGESDATASETREADER_H + + +#include +#include +#include +#include +#include "rapidjson/error/en.h" +#include +#include +#include "DatasetConverters/ClassTypeGeneric.h" + +class OpenImagesDatasetReader: public DatasetReader { + public: + OpenImagesDatasetReader(const std::string &path,const std::string& classNamesFile, bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + bool find_img_directory(const boost::filesystem::path & dir_path, boost::filesystem::path & path_found, std::string& img_filename); + private: + std::map map_image_id; // map image id to sample, helps storage in a sorted way +}; + + +typedef boost::shared_ptr OpenImagesDatasetReaderPtr; + + + +#endif //SAMPLERGENERATOR_OPENIMAGESDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.cpp new file mode 100644 index 00000000..b26cfa4e --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.cpp @@ -0,0 +1,45 @@ +// +// Created by frivas on 22/01/17. +// + +#include +#include +#include +#include "DatasetConverters/readers/OwnDatasetReader.h" + +OwnDatasetReader::OwnDatasetReader(const std::string &path,const std::string& classNamesFile,const bool imagesRequired):DatasetReader(imagesRequired){ + this->classNamesFile=classNamesFile; + appendDataset(path); +} + +OwnDatasetReader::OwnDatasetReader(const bool imagesRequired):DatasetReader(imagesRequired) { + +} + +bool OwnDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + boost::filesystem::directory_iterator end_itr; + boost::filesystem::path boostPath(datasetPath); + + + std::vector filesID; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if ((is_regular_file(itr->status()) && itr->path().extension()==".json") && (itr->path().string().find("-region") == std::string::npos)) { + filesID.push_back(itr->path().filename().stem().string()); + } + + } + + std::sort(filesID.begin(),filesID.end()); + + for (auto it = filesID.begin(), end=filesID.end(); it != end; ++it){ + Sample sample(datasetPath,*it); + sample.setSampleID(datasetPrefix + boost::filesystem::path(*it).filename().stem().string()); + this->samples.push_back(sample); + } + + LOG(INFO) << "Loaded: " + boost::lexical_cast(this->samples.size()) + " samples"; + printDatasetStats(); + return true; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.h new file mode 100644 index 00000000..8f11ab7d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/OwnDatasetReader.h @@ -0,0 +1,23 @@ +// +// Created by frivas on 22/01/17. +// + +#ifndef SAMPLERGENERATOR_OWNDATASETREADER_H +#define SAMPLERGENERATOR_OWNDATASETREADER_H + +#include + +class OwnDatasetReader:public DatasetReader { +public: + OwnDatasetReader(const std::string& path,const std::string& classNamesFile, const bool imagesRequired); + OwnDatasetReader(const bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); +private: + +}; + + +typedef boost::shared_ptr OwnDatasetReaderPtr; + + +#endif //SAMPLERGENERATOR_OWNDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.cpp new file mode 100644 index 00000000..a57a1443 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.cpp @@ -0,0 +1,121 @@ +#include +#include +#include +#include "PascalVOCDatasetReader.h" +#include "DatasetConverters/ClassTypeGeneric.h" + +using namespace boost::filesystem; + +PascalVOCDatasetReader::PascalVOCDatasetReader(const std::string &path,const std::string& classNamesFile, const bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + + +bool PascalVOCDatasetReader::find_directory(const path & dir_path, const std::string & dir_name, path & path_found) { + + directory_iterator end_itr; + + + for ( directory_iterator itr( dir_path ); itr != end_itr; ++itr ) { + if ( is_directory(itr->status()) ) { + + if (itr->path().filename() == dir_name ) { + if ( find_directory(itr->path(), dir_name, path_found ) ) // find the deepest nested directory + return true; + + path_found = itr->path(); + return true; + } else { + if ( find_directory( itr->path(), dir_name, path_found ) ) + return true; + } + } + } + return false; +} + + +bool PascalVOCDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + + boost::filesystem::path boostDatasetPath(datasetPath); + + if (boost::filesystem::exists(boostDatasetPath)) { + if (!boost::filesystem::is_directory(boostDatasetPath)) { + throw std::invalid_argument("Please Provide a folder containing all the annotation files, not just a single file"); + } + } else { + throw std::invalid_argument("Provided Directory Path doesn't exist"); + } + + path img_dir; + + + if (imagesRequired) { + if (find_directory(boostDatasetPath.parent_path(), "JPEGImages", img_dir)) { + LOG(INFO) << img_dir.string() << '\n'; + } else { + throw std::runtime_error("Images Directory can't be located, place it in the folder containing annotations, and name it JPEGIamges"); + } + } + + + int count = 0; + + boost::filesystem::directory_iterator end_itr; + for (boost::filesystem::directory_iterator itr(boostDatasetPath); itr!=end_itr; ++itr) + { + if (!boost::filesystem::is_directory(*itr)){ + count++; + + + LOG(INFO) << itr->path().string() << '\n'; + boost::property_tree::ptree tree; + + boost::property_tree::read_xml(itr->path().string(), tree); + + + std::string m_id = itr->path().stem().string(); // filename without extension + std::string m_imgfile = tree.get("annotation.filename"); + std::string m_width = tree.get("annotation.size.width"); + std::string m_height = tree.get("annotation.size.height"); + + + + Sample sample; + sample.setSampleID(m_id); + + if (imagesRequired) { + std::string imgPath = img_dir.string() + "/" + m_imgfile; + sample.setColorImagePath(imgPath); + + } + + + RectRegionsPtr rectRegions(new RectRegions()); + + BOOST_FOREACH(boost::property_tree::ptree::value_type &v, tree.get_child("annotation")) { + // The data function is used to access the data stored in a node. + if (v.first == "object") { + std::string object_name = v.second.get("name"); + int xmin = int(v.second.get("bndbox.xmin")); + int xmax = int(v.second.get("bndbox.xmax")); + int ymin = int(v.second.get("bndbox.ymin")); + int ymax = int(v.second.get("bndbox.ymax")); + + cv::Rect bounding(xmin, ymin, xmax - xmin, ymax - ymin); + rectRegions->add(bounding,object_name); + + } + } + + sample.setRectRegions(rectRegions); + this->samples.push_back(sample); + + } + } + + + + printDatasetStats(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.h new file mode 100644 index 00000000..0b156e7c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PascalVOCDatasetReader.h @@ -0,0 +1,23 @@ +#ifndef SAMPLERGENERATOR_PASCALVOCDATASETREADER_H +#define SAMPLERGENERATOR_PASCALVOCDATASETREADER_H + + +#include +#include +#include +#include +#include +#include + + +class PascalVOCDatasetReader: public DatasetReader { +public: + PascalVOCDatasetReader(const std::string& path,const std::string& classNamesFile, const bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + bool find_directory(const boost::filesystem::path & dir_path, const std::string & dir_name, boost::filesystem::path & path_found); + +}; + +typedef boost::shared_ptr PascalVOCDatasetReaderPtr; + +#endif //SAMPLERGENERATOR_PascalVOCDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.cpp new file mode 100644 index 00000000..358d4f4f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.cpp @@ -0,0 +1,75 @@ +// +// Created by frivas on 29/07/17. +// + +#include +#include "PrincetonDatasetReader.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +PrincetonDatasetReader::PrincetonDatasetReader(const std::string &path, const std::string &classNamesFile,const bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + +bool PrincetonDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + std::string framesData=PathHelper::concatPaths(datasetPath,"frames.json"); + auto boostPath= boost::filesystem::path(framesData); + + if (!boost::filesystem::exists(framesData.c_str())){ + LOG(ERROR) << "Dataset path: " + datasetPath + " does not contain any frames.json file"; + } + + + + boost::property_tree::ptree pt; + boost::property_tree::read_json(framesData, pt); + std::string foo = pt.get ("format"); + + auto depthTimestamp=JsonHelper::as_vector(pt, "depthTimestamp"); + auto depthFrameID=JsonHelper::as_vector(pt, "depthFrameID"); + auto imageTimestamp=JsonHelper::as_vector(pt, "imageTimestamp"); + auto imageFrameID=JsonHelper::as_vector(pt, "imageFrameID"); + + for (size_t i = 0; i < depthTimestamp.size(); i++) { + LOG(INFO) << "Loading: " << i << " of " << depthTimestamp.size(); + + //depth Image + std::stringstream ssDepth; + ssDepth << "d-" << depthTimestamp[i] << "-" << depthFrameID[i] << ".png"; + std::string depthImagePath = PathHelper::concatPaths(datasetPath, "depth"); + depthImagePath = PathHelper::concatPaths(depthImagePath, ssDepth.str()); + cv::Mat depthImage = cv::imread(depthImagePath, cv::IMREAD_ANYDEPTH); + cv::Mat ownDepthImage; + DepthUtils::mat16_to_ownFormat(depthImage,ownDepthImage); + + + + //colorImage + std::stringstream ssColor; + ssColor << "r-" << imageTimestamp[i] << "-" << imageFrameID[i] << ".png"; + std::string colorImagePath = PathHelper::concatPaths(datasetPath, "rgb"); + colorImagePath = PathHelper::concatPaths(colorImagePath, ssColor.str()); + cv::Mat colorImage = cv::imread(colorImagePath); + + + Sample sample; + sample.setDepthImage(ownDepthImage); + sample.setColorImage(colorImage); + samples.push_back(sample); + } + + printDatasetStats(); + + return true; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.h new file mode 100644 index 00000000..fa9ad788 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/PrincetonDatasetReader.h @@ -0,0 +1,24 @@ +// +// Created by frivas on 29/07/17. +// + +#ifndef SAMPLERGENERATOR_PRINCETONDATASETREADER_H +#define SAMPLERGENERATOR_PRINCETONDATASETREADER_H +#include + + +class PrincetonDatasetReader: public DatasetReader { +public: + PrincetonDatasetReader(const std::string& path,const std::string& classNamesFile, const bool imagesRequired); + PrincetonDatasetReader()= default; + virtual bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + +private: +}; + + +typedef boost::shared_ptr PrincetonDatasetReaderPtr; + + + +#endif //SAMPLERGENERATOR_PRINCETONDATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.cpp new file mode 100644 index 00000000..96237072 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.cpp @@ -0,0 +1,9 @@ +#include +#include "SamplesReader.h" + + + +SamplesReader::SamplesReader(std::vector & samples, std::string &classNamesFile) { + this->samples = samples; + this->classNamesFile = classNamesFile; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.h new file mode 100644 index 00000000..667ea1ad --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SamplesReader.h @@ -0,0 +1,17 @@ +#ifndef SAMPLERGENERATOR_SAMPLESREADER_H +#define SAMPLERGENERATOR_SAMPLESREADER_H + + +#include + +class SamplesReader: public DatasetReader { +public: + SamplesReader(std::vector & samples, std::string &classNamesFile); + + //bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + +}; + +typedef boost::shared_ptr SamplesReaderPtr; + +#endif //SAMPLERGENERATOR_SAMPLESREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.cpp new file mode 100644 index 00000000..ea1f0ed3 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.cpp @@ -0,0 +1,113 @@ +// +// Created by frivas on 29/01/17. +// + +#include +#include +#include "SpinelloDatasetReader.h" +#include +#include +#include +#include +#include + + +SpinelloDatasetReader::SpinelloDatasetReader(const std::string &path,const std::string& classNamesFile,const bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + +SpinelloDatasetReader::SpinelloDatasetReader(const bool imagesRequired):DatasetReader(imagesRequired) { + +} + +bool SpinelloDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + auto boostPath= boost::filesystem::path(datasetPath + "/track_annotations/"); + + + + boost::filesystem::directory_iterator end_itr; + + LOG(INFO) << "Path: " << boostPath.string() << std::endl; + + + std::vector labelFileNames; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if (is_regular_file(itr->status()) && itr->path().extension()==".txt") { + labelFileNames.push_back(itr->path().string()); + } + } + + std::sort(labelFileNames.begin(), labelFileNames.end()); + + for (auto it = labelFileNames.begin(), end=labelFileNames.end(); it != end; ++it){ + LOG(INFO) << "Loading: " << std::distance(labelFileNames.begin(),it) << " of " << labelFileNames.size(); + std::ifstream labelFile(*it); + std::string data; + while(getline(labelFile,data)) { + Sample sample; + if (data[0] == '#') + continue; + std::vector tokens = StringHandler::split(data, ' '); + + + std::string imageID=tokens[0]; + +// for (auto it=tokens.begin(), end=tokens.end(); it != end; ++it){ +// } + + std::string colorImagePath=datasetPath + "/" + "rgb" + "/" + imageID + ".ppm"; + std::string depthImagePath=datasetPath + "/" + "depth" + "/" + imageID + ".pgm"; + cv::Mat colorImage= cv::imread(colorImagePath); + cv::Mat depthImage= cv::imread(depthImagePath, cv::IMREAD_ANYDEPTH); + + cv::Mat ownDepthImage; + //DepthUtils::mat16_to_ownFormat(depthImage,ownDepthImage); + //cv::cvtColor(ownDepthImage,ownDepthImage,CV_RGB2BGR); + + DepthUtils::spinello_mat16_to_viewable(depthImage, ownDepthImage); + + cv::Rect colorRect; + cv::Rect depthRect; + + std::istringstream iss(tokens[2]); + iss >> colorRect.x; + iss=std::istringstream(tokens[3]); + iss >> colorRect.y; + iss=std::istringstream(tokens[4]); + iss >> colorRect.width; + iss=std::istringstream(tokens[5]); + iss >> colorRect.height; + iss=std::istringstream(tokens[6]); + iss >> depthRect.x; + iss=std::istringstream(tokens[7]); + iss >> depthRect.y; + iss=std::istringstream(tokens[8]); + iss >> depthRect.width; + iss=std::istringstream(tokens[9]); + iss >> depthRect.height; + + Normalizations::normalizeRect(colorRect,colorImage.size()); + Normalizations::normalizeRect(depthRect,depthImage.size()); + + Sample* samplePointer; + if (this->getSampleBySampleID(&samplePointer,imageID)){ + RectRegionsPtr regions=samplePointer->getRectRegions(); + regions->add(colorRect,"person"); + samplePointer->setRectRegions(regions); + } + else{ + sample.setSampleID(datasetPrefix + imageID); + sample.setColorImagePath(colorImagePath); + sample.setDepthImage(ownDepthImage); + RectRegionsPtr colorRegions(new RectRegions()); + colorRegions->add(colorRect,"person"); + sample.setRectRegions(colorRegions); + samples.push_back(sample); + } + } + } + printDatasetStats(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.h new file mode 100644 index 00000000..d6098847 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/SpinelloDatasetReader.h @@ -0,0 +1,24 @@ +// +// Created by frivas on 29/01/17. +// + +#ifndef SAMPLERGENERATOR_SPINELLODATASETREADER_H +#define SAMPLERGENERATOR_SPINELLODATASETREADER_H + + +#include + +class SpinelloDatasetReader: public DatasetReader { +public: + SpinelloDatasetReader(const std::string& path,const std::string& classNamesFile,const bool imagesRequired); + SpinelloDatasetReader(const bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + +private: +}; + + +typedef boost::shared_ptr SpinelloDatasetReaderPtr; + + +#endif //SAMPLERGENERATOR_SPINELLODATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.cpp new file mode 100644 index 00000000..2bcffc4f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.cpp @@ -0,0 +1,61 @@ +// +// Created by frivas on 22/01/17. +// + +#include +#include +#include +#include "YoloDatasetReader.h" +#include "DatasetConverters/ClassTypeGeneric.h" + + +bool replace(std::string& str, const std::string& from, const std::string& to) { + size_t start_pos = str.find(from); + if(start_pos == std::string::npos) + return false; + str.replace(start_pos, from.length(), to); + return true; +} + +YoloDatasetReader::YoloDatasetReader(const std::string &path,const std::string& classNamesFile, bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; + appendDataset(path); +} + +YoloDatasetReader::YoloDatasetReader(const std::string& classNamesFile, bool imagesRequired):DatasetReader(imagesRequired) { + this->classNamesFile=classNamesFile; +} + +bool YoloDatasetReader::appendDataset(const std::string &datasetPath, const std::string &datasetPrefix) { + std::ifstream inFile(datasetPath); + ClassTypeGeneric typeConverter(this->classNamesFile); + + std::string line; + while (getline(inFile,line)){ + Sample sample; + sample.setSampleID(datasetPrefix + boost::filesystem::path(line).filename().stem().string()); + sample.setColorImagePath(line); + LOG(INFO) << "Loading sample: " + line; + cv::Mat image = cv::imread(line); + replace(line,"JPEGImages", "labels"); + replace(line,".jpg", ".txt"); + std::ifstream labelFile(line); + std::string data; + RectRegionsPtr rectRegions(new RectRegions()); + + + while(getline(labelFile,data)) { + std::istringstream iss(data); + int class_id; + double x, y, w,h; + iss >> class_id >> x >> y >> w >> h; + cv::Rect bounding(x * image.size().width - (w * image.size().width)/2, y * image.size().height - (h * image.size().height)/2, w * image.size().width, h * image.size().height); + typeConverter.setId(class_id); + rectRegions->add(bounding,typeConverter.getClassString()); + } + labelFile.close(); + sample.setRectRegions(rectRegions); + this->samples.push_back(sample); + } + printDatasetStats(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.h new file mode 100644 index 00000000..3f69eb54 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/readers/YoloDatasetReader.h @@ -0,0 +1,21 @@ +// +// Created by frivas on 22/01/17. +// + +#ifndef SAMPLERGENERATOR_YOLODATASETREADER_H +#define SAMPLERGENERATOR_YOLODATASETREADER_H + + +#include + +class YoloDatasetReader: public DatasetReader { +public: + YoloDatasetReader(const std::string& path,const std::string& classNamesFile, bool imagesRequired); + YoloDatasetReader(const std::string& classNamesFile,bool imagesRequired); + bool appendDataset(const std::string& datasetPath, const std::string& datasetPrefix=""); + +}; + +typedef boost::shared_ptr YoloDatasetReaderPtr; + +#endif //SAMPLERGENERATOR_YOLODATASETREADER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.cpp new file mode 100644 index 00000000..1034e6c2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.cpp @@ -0,0 +1,272 @@ +#include "COCODatasetWriter.h" +#include "DatasetConverters/ClassTypeMapper.h" +#include +#include +#include +#include +#include +#include +#include + +using namespace rapidjson; + +COCODatasetWriter::COCODatasetWriter(const std::string &outPath, DatasetReaderPtr &reader, const std::string& writerNamesFile, bool overWriteclassWithZero):DatasetWriter(outPath,reader),overWriteclassWithZero(overWriteclassWithZero), writerNamesFile(writerNamesFile){ + + this->fullImagesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/train")).string(); + this->fullLabelsPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/annotations")).string(); + this->fullNamesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/coco.names")).string(); + + auto boostImages= boost::filesystem::path(fullImagesPath); + if (!boost::filesystem::exists(boostImages)){ + boost::filesystem::create_directories(boostImages); + } + auto boostLabels= boost::filesystem::path(fullLabelsPath); + if (!boost::filesystem::exists(boostLabels)){ + boost::filesystem::create_directories(boostLabels); + } + + + LOG(INFO) << "FullImagesPath: " << this->fullImagesPath << std::endl; + LOG(INFO) << "FullLabelsPath: " << this->fullLabelsPath << std::endl; + +} + +void COCODatasetWriter::process(bool writeImages, bool useDepth) { + Sample sample; + //std::ofstream sampleFile(this->outPath + "/sample.txt"); + + StringBuffer s_anns; + StringBuffer s_imgs; + + std::string labelFilePath= this->fullLabelsPath + "/" + "instances_train.json"; + std::ofstream out(labelFilePath); + + Writer writer_anns(s_anns); + Writer writer_imgs(s_imgs); + + writer_anns.StartObject(); + writer_anns.Key("annotations"); + writer_anns.StartArray(); + + if (writeImages) { + writer_imgs.StartObject(); + writer_imgs.Key("images"); + writer_imgs.StartArray(); + + } + + ClassTypeMapper typeMapper; + + if (!writerNamesFile.empty()) + typeMapper = ClassTypeMapper(writerNamesFile); + + int id = 0; + + while (reader->getNextSample(sample)){ + + auto boundingBoxes = sample.getRectRegions()->getRegions(); + auto segmentationRegions = sample.getRleRegions()->getRegions(); + int width = sample.getSampleWidth(); + int height = sample.getSampleHeight(); + + std::string id_string = sample.getSampleID(); + id++; + id_string.erase(std::remove_if(id_string.begin(), id_string.end(), isspace), id_string.end()); + + + std::string::size_type sz; // alias of size_t + + int num_id = std::stoi (id_string, &sz); + + std::string imageFileName; + if (id_string.length() == sz) { + int i = ceil(log10(num_id)); + + std::string ssID (12-i+1,'0') ; + + imageFileName = "COCO_" + ssID + std::to_string(num_id) + ".jpg"; + } else { + imageFileName = "COCO_" + std::to_string(id) + ".jpg"; + num_id = id; + } + + + cv::Mat image; + if (writeImages) { + if (useDepth) { + image= sample.getDepthImage(); + } else { + image= sample.getColorImage(); + } + + if (image.empty()) { + skip_count++; + if (skip_count > this->skip_count) { + throw std::runtime_error("Maximum limit for skipping exceeded, either turn off writing images or fix issues in dataset"); + } + LOG(WARNING) << "Image empty, skipping writing image. Skipped " + std::to_string(skip_count) + " of " + std::to_string(this->skip_count); + + } else { + cv::imwrite(this->fullImagesPath + "/" + imageFileName,image); + + } + + } + + if (writeImages) { + writer_imgs.StartObject(); + writer_imgs.Key("file_name"); + writer_imgs.String(imageFileName.c_str()); + writer_imgs.Key("id"); + writer_imgs.Int(num_id); + writer_imgs.Key("height"); + writer_imgs.Int(height); + writer_imgs.Key("width"); + writer_imgs.Int(width); + writer_imgs.EndObject(); + + } + + + int i = 0; + for (auto it = boundingBoxes.begin(), end=boundingBoxes.end(); it != end; ++it){ + + double x = it->region.x; + double y = it->region.y; + double w = it->region.width; + double h = it->region.height; + double confidence_score = it->confidence_score; + + int classId; + if (overWriteclassWithZero) + classId=0; + else { + if (writerNamesFile.empty()) { + std::vector::iterator itr; + itr = find(this->outputClasses.begin(), this->outputClasses.end(), it->classID); + if (itr == this->outputClasses.end()) { + this->outputClasses.push_back(it->classID); + classId = this->outputClasses.size() - 1; + } else { + classId = std::distance(this->outputClasses.begin(), itr); + } + + } else { + if(typeMapper.mapString(it->classID)) { // Mapping Successfull + classId = typeMapper.getClassID(); + if (it->classID != typeMapper.getClassString()) + this->mapped_classes[it->classID] = typeMapper.getClassString(); + + } else { // No Mapping Found Discarding Class + std::unordered_map::iterator itr = this->discarded_classes.find(it->classID); + if (itr != this->discarded_classes.end()) { + itr->second++; + } else { + this->discarded_classes.insert(std::make_pair(it->classID, 1)); + } + continue; + } + + } + //ClassTypeMapper typeMapper(it->classID); + //classId = typeConverter.getClassID(); + } + + writer_anns.StartObject(); + writer_anns.Key("bbox"); + writer_anns.StartArray(); + writer_anns.Double(x); + writer_anns.Double(y); + writer_anns.Double(w); + writer_anns.Double(h); + writer_anns.EndArray(); + writer_anns.Key("category_id"); + writer_anns.Int(classId + 1); // Classes in DetectionMetrics start + // 0 wherease it starts from 1 in + // COCO dataset, that's why + + writer_anns.Key("score"); + writer_anns.Double(confidence_score); + writer_anns.Key("image_id"); + writer_anns.Int(num_id); + + if (!segmentationRegions.empty()) { + writer_anns.Key("segmentation"); + writer_anns.StartObject(); + writer_anns.Key("size"); + writer_anns.StartArray(); + writer_anns.Int(height); + writer_anns.Int(width); + writer_anns.EndArray(); + writer_anns.Key("counts"); + writer_anns.String(rleToString(&(segmentationRegions[i].region))); + writer_anns.EndObject(); + + } + + writer_anns.EndObject(); + i++; + + } + + + } + + writer_anns.EndArray(); + writer_anns.EndObject(); + + std::string json_anns (s_anns.GetString(), s_anns.GetSize()); + + if (writeImages) { + + writer_imgs.EndArray(); + writer_imgs.EndObject(); + + std::string json_imgs (s_imgs.GetString(), s_imgs.GetSize()); + + + json_imgs.pop_back(); + + out << json_imgs; + json_anns.erase(0, 1); + out << ","; + } + + out << json_anns; + + + if (!out.good()) throw std::runtime_error ("Can't write the JSON string to the file!"); + + + if (writerNamesFile.empty()) { + + std::ofstream writerClassfile; + writerClassfile.open (this->fullNamesPath); + + std::vector::iterator it; + for (it = this->outputClasses.begin(); it != this->outputClasses.end(); it++) { + writerClassfile << *it << "\n"; + } + writerClassfile.close(); + } + + LOG(INFO) << "Successfully Written to COCO dataset\n"; + + if (!writerNamesFile.empty()) { + + LOG(INFO) << "\nPrinting Mapping Info\n"; + LOG(INFO) << "**********************\n"; + + for (std::unordered_map::iterator it=this->mapped_classes.begin(); it!=this->mapped_classes.end(); ++it) + LOG(INFO) << it->first << " => " << it->second << '\n'; + + LOG(INFO) << "**********************\n"; + + LOG(WARNING) << "\nPrinting Discarded Classes from Original Dataset\n"; + LOG(INFO) << "**********************\n"; + + for (std::unordered_map::iterator it=this->discarded_classes.begin(); it!=this->discarded_classes.end(); ++it) + LOG(WARNING) << it->first << " : " << it->second << '\n'; + LOG(INFO) << "**********************\n"; + + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.h new file mode 100644 index 00000000..d2868ec1 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/COCODatasetWriter.h @@ -0,0 +1,26 @@ +#ifndef SAMPLERGENERATOR_COCODATASETCONVERTER_H +#define SAMPLERGENERATOR_COCODATASETCONVERTER_H + +#include +#include "DatasetWriter.h" +#include +#include +#include + +class COCODatasetWriter: public DatasetWriter { +public: + COCODatasetWriter(const std::string& outPath, DatasetReaderPtr& reader, const std::string& writerNamesFile, bool overWriteclassWithZero=false); + void process(bool writeImages = false, bool useDepth = false); + +private: + std::string fullImagesPath; + std::string fullLabelsPath; + std::string fullNamesPath; + bool overWriteclassWithZero; + std::string writerNamesFile; +}; + +typedef boost::shared_ptr COCODatasetWriterPtr; + + +#endif diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.cpp new file mode 100644 index 00000000..16647e28 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.cpp @@ -0,0 +1,24 @@ +// +// Created by frivas on 5/02/17. +// + +#include +#include +#include +#include "DatasetWriter.h" + +DatasetWriter::DatasetWriter(const std::string &outPath, DatasetReaderPtr &reader):outPath(outPath), reader(reader) { + auto boostPath= boost::filesystem::path(outPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + boost::filesystem::directory_iterator end_it; + boost::filesystem::directory_iterator it(boostPath); + if(it != end_it) { + const std::string msg("Output directory already exists and is not empty"); + LOG(WARNING)<< msg; + throw(msg); + } + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.h new file mode 100644 index 00000000..935c2458 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/DatasetWriter.h @@ -0,0 +1,29 @@ +// +// Created by frivas on 5/02/17. +// + +#ifndef SAMPLERGENERATOR_DATASETWRITTER_H +#define SAMPLERGENERATOR_DATASETWRITTER_H + + +#include +#include + +class DatasetWriter { +public: + DatasetWriter(const std::string& outPath, DatasetReaderPtr& reader); + virtual void process(bool writeImages = false, bool useDepth = false)=0; + +protected: + std::string outPath; + DatasetReaderPtr& reader; + std::vector outputClasses; + std::unordered_map mapped_classes; + std::unordered_map discarded_classes; + unsigned int skip_count = 10; //max Number of annotations that can be skipped if Corresponding images weren't found +}; + + +typedef boost::shared_ptr DatasetWriterPtr; + +#endif //SAMPLERGENERATOR_DATASETWRITTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.cpp new file mode 100644 index 00000000..6608459a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.cpp @@ -0,0 +1,88 @@ +// +// Created by frivas on 5/02/17. +// + +#include +#include "GenericDatasetWriter.h" + + +GenericDatasetWriter::GenericDatasetWriter(const std::string &path,DatasetReaderPtr &reader, const std::string &writerImplementation, const std::string& writerNamesFile) { + configureAvailableImplementations(this->availableImplementations); + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), writerImplementation) != this->availableImplementations.end()){ + imp = getImplementation(writerImplementation); + switch (imp) { + case WR_COCO: + this->cocoDatasetWriterPtr = COCODatasetWriterPtr(new COCODatasetWriter(path,reader, writerNamesFile)); + break; + case WR_PASCALVOC: + this->pascalvocDatasetWriterPtr = PascalVOCDatasetWriterPtr(new PascalVOCDatasetWriter(path, reader, writerNamesFile)); + break; + case WR_YOLO: + this->yoloDatasetWriterPtr = YoloDatasetWriterPtr(new YoloDatasetWriter(path,reader)); + break; + case WR_OPENIMAGES: + this->openImagesDatasetWriterPtr = OpenImagesDatasetWriterPtr(new OpenImagesDatasetWriter(path, reader, writerNamesFile)); + break; + case WR_OWN: + this->ownDatasetWriterPtr = OwnDatasetWriterPtr( new OwnDatasetWriter(path,reader)); + break; + default: + LOG(WARNING)<< writerImplementation + " is not a valid writer implementation"; + break; + } + } + else{ + LOG(WARNING)< &data) { + data.push_back("own"); + data.push_back("yolo"); + data.push_back("Pascal VOC"); + data.push_back("COCO"); + data.push_back("Open Images"); +} + +WRITER_IMPLEMENTATIONS GenericDatasetWriter::getImplementation(const std::string &writerImplementation) { + if (writerImplementation.compare("Pascal VOC") == 0) { + return WR_PASCALVOC; + } + if (writerImplementation.compare("COCO") == 0) { + return WR_COCO; + } + if (writerImplementation.compare("yolo") == 0) { + return WR_YOLO; + } + if (writerImplementation.compare("Open Images") == 0) { + return WR_OPENIMAGES; + } + if (writerImplementation.compare("own") == 0) { + return WR_OWN; + } +} + +DatasetWriterPtr GenericDatasetWriter::getWriter() { + switch (imp) { + case WR_PASCALVOC: + return this->pascalvocDatasetWriterPtr; + case WR_COCO: + return this->cocoDatasetWriterPtr; + case WR_YOLO: + return this->yoloDatasetWriterPtr; + case WR_OPENIMAGES: + return this->openImagesDatasetWriterPtr; + case WR_OWN: + return this->ownDatasetWriterPtr; + default: + break; + } +} + +std::vector GenericDatasetWriter::getAvailableImplementations() { + std::vector data; + configureAvailableImplementations(data); + return data; +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.h new file mode 100644 index 00000000..7d5d3f0c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/GenericDatasetWriter.h @@ -0,0 +1,45 @@ +// +// Created by frivas on 5/02/17. +// + +#ifndef SAMPLERGENERATOR_GENERICDATASETWRITTER_H +#define SAMPLERGENERATOR_GENERICDATASETWRITTER_H + +enum WRITER_IMPLEMENTATIONS{WR_OWN, WR_YOLO, WR_PASCALVOC, WR_COCO, WR_OPENIMAGES}; + +#include +#include "DatasetWriter.h" +#include +#include "YoloDatasetWriter.h" +#include "COCODatasetWriter.h" +#include "PascalVOCDatasetWriter.h" +#include "OpenImagesDatasetWriter.h" + +class GenericDatasetWriter { +public: + GenericDatasetWriter(const std::string& path,DatasetReaderPtr &reader, const std::string &writerImplementation,const std::string& writerNamesFile = std::string()); + DatasetWriterPtr getWriter(); + static std::vector getAvailableImplementations(); + +private: + WRITER_IMPLEMENTATIONS imp; + + + YoloDatasetWriterPtr yoloDatasetWriterPtr; + OwnDatasetWriterPtr ownDatasetWriterPtr; + COCODatasetWriterPtr cocoDatasetWriterPtr; + PascalVOCDatasetWriterPtr pascalvocDatasetWriterPtr; + OpenImagesDatasetWriterPtr openImagesDatasetWriterPtr; + + std::vector availableImplementations; + + static void configureAvailableImplementations(std::vector& data); + WRITER_IMPLEMENTATIONS getImplementation(const std::string& writerImplementation); + +}; + + +typedef boost::shared_ptr GenericDatasetWriterPtr; + + +#endif //SAMPLERGENERATOR_GENERICDATASETWRITTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.cpp new file mode 100644 index 00000000..145f6564 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.cpp @@ -0,0 +1,110 @@ +#include "OpenImagesDatasetWriter.h" +#include "DatasetConverters/ClassTypeMapper.h" +#include +#include +#include +#include +#include +#include +#include + +using namespace rapidjson; + +OpenImagesDatasetWriter::OpenImagesDatasetWriter(const std::string &outPath, DatasetReaderPtr &reader, const std::string& writerNamesFile, bool overWriteclassWithZero):DatasetWriter(outPath,reader),overWriteclassWithZero(overWriteclassWithZero), writerNamesFile(writerNamesFile){ + + this->fullImagesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/images")).string(); + this->fullLabelsPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/labels")).string(); + + + auto boostImages= boost::filesystem::path(fullImagesPath); + if (!boost::filesystem::exists(boostImages)){ + boost::filesystem::create_directories(boostImages); + } + auto boostLabels= boost::filesystem::path(fullLabelsPath); + if (!boost::filesystem::exists(boostLabels)){ + boost::filesystem::create_directories(boostLabels); + } + + LOG(INFO) << "Full images path: " << this->fullImagesPath << std::endl; + LOG(INFO) << "Full labels path: " << this->fullLabelsPath << std::endl; +} + + + +void OpenImagesDatasetWriter::process(bool writeImages, bool useDepth) { + Sample sample; + ClassTypeMapper typeMapper; + + if (!writerNamesFile.empty()) + typeMapper = ClassTypeMapper(writerNamesFile); + + + std::string labelFilePath= this->fullLabelsPath + "/" + "instances_labels.csv"; + std::ofstream out(labelFilePath); + out << "ImageID,Source,LabelName,Confidence,XMin,XMax,YMin,YMax,IsOccluded,IsTruncated,IsGroupOf,IsDepiction,IsInside" << std::endl; + + while (reader->getNextSample(sample)){ + auto boundingBoxes = sample.getRectRegions()->getRegions(); + std::string sampleId = sample.getSampleID(); + std::string imageFilePath= this->fullImagesPath + "/" + sampleId + ".jpg"; + + // Write images in case of converting dataset + cv::Mat image; + if (writeImages) { + if (useDepth) { + image = sample.getDepthImage(); + } else { + image = sample.getColorImage(); + } + if (image.empty()) { + skip_count++; + if (skip_count > this->skip_count) { + throw std::runtime_error("Maximum limit for skipping exceeded, either turn off writing images or fix issues in dataset"); + } + LOG(WARNING) << "Image empty, skipping writing image. Skipped " + std::to_string(skip_count) + " of " + std::to_string(this->skip_count); + } else { + cv::imwrite(imageFilePath,image); + } + } + + for (auto it = boundingBoxes.begin(), end=boundingBoxes.end(); it != end; ++it){ + std::string classId = it->classID; + if (writerNamesFile.empty()) { + std::vector::iterator itr; + itr = find(this->outputClasses.begin(), this->outputClasses.end(), it->classID); + if (itr == this->outputClasses.end()) { + this->outputClasses.push_back(it->classID); + classId = this->outputClasses.size() - 1; + } else { + classId = std::distance(this->outputClasses.begin(), itr); + } + } else { + // Try mapping class name if the network classes are different from input dataset + if(typeMapper.mapString(it->classID)) { // Mapping Successfull + classId = typeMapper.getClassString().substr(0, typeMapper.getClassString().find(",")); + } else { // No Mapping Found Discarding Class + LOG(INFO) << "no MAPPING" << "\n"; + std::unordered_map::iterator itr = this->discarded_classes.find(it->classID); + if (itr != this->discarded_classes.end()) { + itr->second++; + } else { + this->discarded_classes.insert(std::make_pair(it->classID, 1)); + } + continue; + } + } + cv::Mat src = cv::imread(sample.getColorImagePath()); + int imgWidth = src.size().width; + int imgHeight = src.size().height; + + double xMin = it->region.x / imgWidth; + double yMin = it->region.y / imgHeight; + double xMax = (it->region.x + it->region.width) / imgWidth; + double yMax = (it->region.y + it->region.height) / imgHeight; + double confidence_score = it->confidence_score; + out << sampleId << "," << "xclick" << "," << classId << "," << confidence_score << "," << xMin << "," << xMax << "," << yMin << "," << yMax << "," << 0 << "," << 0 << "," << 0 << "," << 0 << "," << 0 << std::endl; + } + + } + if (!out.good()) throw std::runtime_error ("Can't write to the file!"); +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.h new file mode 100644 index 00000000..3a30fffe --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OpenImagesDatasetWriter.h @@ -0,0 +1,26 @@ +#ifndef SAMPLERGENERATOR_OPENIMAGESDATASETCONVERTER_H +#define SAMPLERGENERATOR_OPENIMAGESDATASETCONVERTER_H + +#include +#include "DatasetWriter.h" +#include +#include +#include + +class OpenImagesDatasetWriter: public DatasetWriter { +public: + OpenImagesDatasetWriter(const std::string& outPath, DatasetReaderPtr& reader, const std::string& writerNamesFile, bool overWriteclassWithZero=false); + void process(bool writeImages = false, bool useDepth = false); + +private: + std::string fullImagesPath; + std::string fullLabelsPath; + std::string fullNamesPath; + bool overWriteclassWithZero; + std::string writerNamesFile; +}; + +typedef boost::shared_ptr OpenImagesDatasetWriterPtr; + + +#endif diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.cpp new file mode 100644 index 00000000..8a30609c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.cpp @@ -0,0 +1,17 @@ +// +// Created by frivas on 5/02/17. +// + +#include "OwnDatasetWriter.h" + +OwnDatasetWriter::OwnDatasetWriter(const std::string &outPath, DatasetReaderPtr &reader) : DatasetWriter(outPath, + reader) { + +} + +void OwnDatasetWriter::process(bool writeImages, bool useDepth) { + Sample sample; + while (reader->getNextSample(sample)){ + sample.save(outPath); + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.h new file mode 100644 index 00000000..f3314870 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/OwnDatasetWriter.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 5/02/17. +// + +#ifndef SAMPLERGENERATOR_OWNDATASETWRITER_H +#define SAMPLERGENERATOR_OWNDATASETWRITER_H + +#include +#include +#include "DatasetWriter.h" + +class OwnDatasetWriter: public DatasetWriter { +public: + OwnDatasetWriter(const std::string &outPath, DatasetReaderPtr &reader); + void process(bool writeImages = false, bool useDepth = false); + +private: + +}; + +typedef boost::shared_ptr OwnDatasetWriterPtr; + + + +#endif //SAMPLERGENERATOR_OWNDATASETWRITER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.cpp new file mode 100644 index 00000000..631b8147 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.cpp @@ -0,0 +1,171 @@ +#include "PascalVOCDatasetWriter.h" +#include "DatasetConverters/ClassTypeMapper.h" +#include +#include +#include + + +PascalVOCDatasetWriter::PascalVOCDatasetWriter(const std::string &outPath, DatasetReaderPtr &reader,const std::string& writerNamesFile, bool overWriteclassWithZero):DatasetWriter(outPath,reader),writerNamesFile(writerNamesFile),overWriteclassWithZero(overWriteclassWithZero){ + + this->fullImagesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/VOCDevKit/VOC20xx/JPEGImages")).string(); + this->fullLabelsPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/VOCDevKit/VOC20xx/Annotations")).string(); + this->fullNamesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/voc.names")).string(); + + auto boostImages= boost::filesystem::path(fullImagesPath); + if (!boost::filesystem::exists(boostImages)){ + boost::filesystem::create_directories(boostImages); + } + auto boostLabels= boost::filesystem::path(fullLabelsPath); + if (!boost::filesystem::exists(boostLabels)){ + boost::filesystem::create_directories(boostLabels); + } + + + LOG(INFO) << "FullImagesPath: " << this->fullImagesPath << std::endl; + LOG(INFO) << "FullLabelsPath: " << this->fullImagesPath << std::endl; + +} + +void PascalVOCDatasetWriter::process(bool writeImages, bool useDepth) { + + Sample sample; + + ClassTypeMapper typeMapper; + + if (!writerNamesFile.empty()) + typeMapper = ClassTypeMapper(writerNamesFile); + + int count = 0; + int skip_count = 0; + + while (reader->getNextSample(sample)){ + count++; + if (count == 5000) + break; + + auto boundingBoxes = sample.getRectRegions()->getRegions(); + std::string id = sample.getSampleID(); + + std::string imageFilePath= this->fullImagesPath + "/" + id + ".jpg"; + std::string labelFilePath= this->fullLabelsPath + "/" + id + ".xml"; + + cv::Mat image; + if (writeImages) { + if (useDepth) { + image= sample.getDepthImage(); + } else { + image= sample.getColorImage(); + } + + if (image.empty()) { + skip_count++; + if (skip_count > this->skip_count) { + throw std::runtime_error("Maximum limit for skipping exceeded, either turn off writing images or fix issues in dataset"); + } + LOG(WARNING) << "Image empty, skipping writing image. Skipped " + std::to_string(skip_count) + " of " + std::to_string(this->skip_count); + + } else { + cv::imwrite(imageFilePath,image); + + } + + } + + boost::property_tree::ptree tree; + + tree.put("annotation.filename", id + ".jpg"); + tree.put("annotation.folder", "VOC20xx"); + + + for (auto it = boundingBoxes.begin(), end=boundingBoxes.end(); it != end; ++it){ + double x = it->region.x; + double y = it->region.y; + double w = it->region.width; + double h = it->region.height; + + double confidence_score = it->confidence_score; + + std::string className; + + if (overWriteclassWithZero) + className = "all"; + else { + if (writerNamesFile.empty()) { + if (find(this->outputClasses.begin(), this->outputClasses.end(), it->classID) == this->outputClasses.end()) + this->outputClasses.push_back(it->classID); + } else { + if(typeMapper.mapString(it->classID)) { // Mapping Successfull + className = typeMapper.getClassString(); + + if (it->classID != className) + this->mapped_classes[it->classID] = className; + + } else { // No Mapping Found Discarding Class + std::unordered_map::iterator itr = this->discarded_classes.find(it->classID); + if (itr != this->discarded_classes.end()) { + itr->second++; + } else { + this->discarded_classes.insert(std::make_pair(it->classID, 1)); + } + continue; + } + + } + + } + + boost::property_tree::ptree & node = tree.add("annotation.object", ""); + + node.put("name", className); + node.put("bndbox.xmin", x); + node.put("bndbox.xmax", x + w); + node.put("bndbox.ymin", y); + node.put("bndbox.ymax", y + h); + node.put("score", confidence_score); + + } + + tree.add("annotation.size.depth", 3); + tree.add("annotation.size.height", image.size().height); + tree.add("annotation.size.width", image.size().width); + + boost::property_tree::write_xml(labelFilePath, tree); + + + } + + if (!writerNamesFile.empty()) { + + std::ofstream writerClassfile; + writerClassfile.open (this->fullNamesPath); + + std::vector::iterator it; + for (it = this->outputClasses.begin(); it != this->outputClasses.end(); it++) { + writerClassfile << *it << "\n"; + } + writerClassfile.close(); + } + + + LOG(INFO) << "Successfully Converted given Dataset to Pascal VOC dataset\n"; + + if (!writerNamesFile.empty()) { + + LOG(INFO) << "\nPrinting Mapping Info\n"; + LOG(INFO) << "**********************\n"; + + for (std::unordered_map::iterator it=this->mapped_classes.begin(); it!=this->mapped_classes.end(); ++it) + LOG(INFO) << it->first << " => " << it->second << '\n'; + + LOG(INFO) << "**********************\n"; + + LOG(INFO) << "\nPrinting Discarded Classes from Original Dataset\n"; + LOG(INFO) << "**********************\n"; + + for (std::unordered_map::iterator it=this->discarded_classes.begin(); it!=this->discarded_classes.end(); ++it) + LOG(INFO) << it->first << " : " << it->second << '\n'; + LOG(INFO) << "**********************\n"; + + } + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.h new file mode 100644 index 00000000..e1d47d7c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/PascalVOCDatasetWriter.h @@ -0,0 +1,28 @@ +#ifndef SAMPLERGENERATOR_PASCALVOCDATASETCONVERTER_H +#define SAMPLERGENERATOR_PASCALVOCDATASETCONVERTER_H + +#include +#include "DatasetWriter.h" +#include +#include +#include +#include +#include + +class PascalVOCDatasetWriter: public DatasetWriter { +public: + PascalVOCDatasetWriter(const std::string& outPath, DatasetReaderPtr& reader, const std::string& writerNamesFile, bool overWriteclassWithZero=false); + void process(bool writeImages = false, bool useDepth = false); + +private: + std::string fullImagesPath; + std::string fullLabelsPath; + std::string fullNamesPath; + bool overWriteclassWithZero; + std::string writerNamesFile; +}; + +typedef boost::shared_ptr PascalVOCDatasetWriterPtr; + + +#endif //SAMPLERGENERATOR_PASCALVOCDATASETCONVERTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.cpp b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.cpp new file mode 100644 index 00000000..7aa628a2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.cpp @@ -0,0 +1,109 @@ +// +// Created by frivas on 22/01/17. +// + +#include "YoloDatasetWriter.h" +#include "DatasetConverters/ClassTypeOwn.h" +#include +#include +#include +#include +#include + + +YoloDatasetWriter::YoloDatasetWriter(const std::string &outPath, DatasetReaderPtr &reader, bool overWriteclassWithZero):DatasetWriter(outPath,reader),overWriteclassWithZero(overWriteclassWithZero){ + + this->fullImagesPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/JPEGImages")).string(); + this->fullLabelsPath=boost::filesystem::absolute(boost::filesystem::path(outPath + "/labels")).string(); + + + auto boostImages= boost::filesystem::path(fullImagesPath); + if (!boost::filesystem::exists(boostImages)){ + boost::filesystem::create_directories(boostImages); + } + auto boostLabels= boost::filesystem::path(fullLabelsPath); + if (!boost::filesystem::exists(boostLabels)){ + boost::filesystem::create_directories(boostLabels); + } + + + LOG(INFO) << "FullImagesPath: " << this->fullImagesPath << std::endl; + LOG(INFO) << "FullLabelsPath: " << this->fullImagesPath << std::endl; + +} + +void YoloDatasetWriter::process(bool writeImages, bool useDepth) { + Sample sample; + int id=0; + unsigned int skip_count = 0; + + + std::ofstream sampleFile(this->outPath + "/sample.txt"); + + while (reader->getNextSample(sample)){ + auto boundingBoxes = sample.getRectRegions()->getRegions(); + std::stringstream ssID ; + ssID << std::setfill('0') << std::setw(5) << id; + std::string imageFilePath= this->fullImagesPath + "/" + ssID.str() + ".jpg"; + sampleFile << imageFilePath << std::endl; + + std::string labelFilePath= this->fullLabelsPath + "/" + ssID.str() + ".txt"; + std::ofstream out(labelFilePath); + + cv::Mat image; + if (writeImages) { + if (useDepth) { + image= sample.getDepthImage(); + } else { + image= sample.getColorImage(); + } + + if (image.empty()) { + skip_count++; + if (skip_count > this->skip_count) { + throw std::runtime_error("Maximum limit for skipping exceeded, either turn off writing images or fix issues in dataset"); + } + LOG(WARNING) << "Image empty, skipping writing image. Skipped " + std::to_string(skip_count) + " of " + std::to_string(this->skip_count); + + } else { + cv::imwrite(imageFilePath,image); + + } + + } + + + for (auto it = boundingBoxes.begin(), end=boundingBoxes.end(); it != end; ++it){ + double x = it->region.x; + double y = it->region.y; + double w = it->region.width; + double h = it->region.height; + + double confidence_score = it->confidence_score; + + if ((w + x) > image.size().width){ + w = image.size().width - 1 - x; + } + if ((h + y) > image.size().height){ + h = image.size().height - 1 - y; + } + + int classId; + if (overWriteclassWithZero) + classId=0; + else { + ClassTypeOwn typeConverter(it->classID); + classId = typeConverter.getClassID(); + } + std::stringstream boundSrt; + boundSrt << classId <<" " << (it->region.x + w/2.0) / (double)image.size().width << " " << (it->region.y + h/2.0) / (double)image.size().height << " " << w / image.size().width << " " << h / image.size().height; +// std::cout << boundSrt.str() << std::endl; + out << boundSrt.str() << std::endl; + } + out.close(); + id++; + + } + sampleFile.close(); + +} diff --git a/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.h b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.h new file mode 100644 index 00000000..8fb048cb --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/DatasetConverters/writers/YoloDatasetWriter.h @@ -0,0 +1,25 @@ +// +// Created by frivas on 22/01/17. +// + +#ifndef SAMPLERGENERATOR_YOLODATASETCONVERTER_H +#define SAMPLERGENERATOR_YOLODATASETCONVERTER_H + +#include +#include "DatasetWriter.h" + +class YoloDatasetWriter: public DatasetWriter { +public: + YoloDatasetWriter(const std::string& outPath, DatasetReaderPtr& reader, bool overWriteclassWithZero=true); + void process(bool writeImages = false, bool useDepth = false); + +private: + std::string fullImagesPath; + std::string fullLabelsPath; + bool overWriteclassWithZero; +}; + +typedef boost::shared_ptr YoloDatasetWriterPtr; + + +#endif //SAMPLERGENERATOR_YOLODATASETCONVERTER_H diff --git a/DetectionMetrics/DetectionMetricsLib/Detectors/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/Detectors/CMakeLists.txt new file mode 100644 index 00000000..0d64916d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Detectors/CMakeLists.txt @@ -0,0 +1,21 @@ +SET(Detectors_SOURCE_FILES + Detector.h +) + +ADD_LIBRARY(DetectionMetrics_Detectors OBJECT ${Detectors_SOURCE_FILES}) + +TARGET_INCLUDE_DIRECTORIES ( DetectionMetrics_Detectors PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${depthLib_INCLUDE_DIRS} + ${comm_INCLUDE_DIRS} + ${ros_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${jderobottypes_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) diff --git a/DetectionMetrics/DetectionMetricsLib/Detectors/Detector.h b/DetectionMetrics/DetectionMetricsLib/Detectors/Detector.h new file mode 100644 index 00000000..825401e1 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Detectors/Detector.h @@ -0,0 +1,17 @@ +// +// Created by frivas on 28/01/17. +// + +#ifndef SAMPLERGENERATOR_DETECTOR_H +#define SAMPLERGENERATOR_DETECTOR_H + +#include + +class Detector{ +public: + Detector(){}; + + virtual Sample inferImage(const cv::Mat& image)=0; +}; + +#endif //SAMPLERGENERATOR_DETECTOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CMakeLists.txt new file mode 100644 index 00000000..05a90349 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CMakeLists.txt @@ -0,0 +1,37 @@ +SET(FrameworkEvaluator_SOURCE_FILES + ClassStatistics + FrameworkInferencer + GlobalStats + DetectionsEvaluator + MassInferencer + StatsWriter + GenericInferencer + TensorFlowInferencer + KerasInferencer + Labelling + DarknetInferencer + PyTorchInferencer +) + +IF(ENABLE_DNN_CAFFE) + SET( FrameworkEvaluator_SOURCE_FILES ${FrameworkEvaluator_SOURCE_FILES} CaffeInferencer ) +ENDIF() + +ADD_LIBRARY(DetectionMetrics_FrameworkEvaluator OBJECT ${FrameworkEvaluator_SOURCE_FILES}) + + +message("OPENCV DIRS:") +message(${OpenCV_INCLUDE_DIRS}) +set(OpenCV_INCLUDE_DIRS /usr/local/include/opencv4) +message("OPENCV DIRS:") +message(${OpenCV_INCLUDE_DIRS}) + +TARGET_INCLUDE_DIRECTORIES ( DetectionMetrics_FrameworkEvaluator PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${PYTHON_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${GLOG_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} +) diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.cpp new file mode 100644 index 00000000..d3835887 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.cpp @@ -0,0 +1,212 @@ +#include +#include +#include "CaffeInferencer.h" +#include + +CaffeInferencer::CaffeInferencer(const std::string &netConfig, const std::string &netWeights,const std::string& classNamesFile, std::map* inferencerParamsMap): netConfig(netConfig),netWeights(netWeights) { + this->classNamesFile=classNamesFile; + this->netConfig=netConfig; + this->netWeights=netWeights; + + //this->confThreshold = std::stof(inferencerParamsMap->at("conf_thresh")); + this->scaling_factor = std::stof(inferencerParamsMap->at("scaling_factor")); + this->mean_sub = cv::Scalar(std::stof(inferencerParamsMap->at("mean_sub_blue")), std::stof(inferencerParamsMap->at("mean_sub_green")), std::stof(inferencerParamsMap->at("mean_sub_red"))); + this->swapRB = inferencerParamsMap->at("useRGB") == "true"; + this->inpWidth = std::stof(inferencerParamsMap->at("inpWidth")); + this->inpHeight = std::stof(inferencerParamsMap->at("inpHeight")); + + // Load a model. + //CV_Assert(parser.has("model")); + this->net = cv::dnn::readNetFromCaffe(this->netConfig, this->netWeights); + //net.setPreferableBackend(parser.get("backend")); + //net.setPreferableTarget(parser.get("target")); + net.setPreferableBackend(0); + net.setPreferableTarget(0); + + + +} + +Sample CaffeInferencer::detectImp(const cv::Mat &image, double confidence_threshold) { + + cv::Mat blob; + + cv::Mat rgbImage = image; + this->detections.clear(); + + + cv::Size inpSize(this->inpWidth, this->inpHeight); + blob = cv::dnn::blobFromImage(rgbImage, this->scaling_factor, inpSize, this->mean_sub, false, false); + //blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false); + // Run a model. + + + this->net.setInput(blob); + + + if (this->net.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN + { + // For faster RCNN, same size images are being passed + //cv::resize(rgbImage, rgbImage, inpSize); + cv::Mat imInfo = (cv::Mat_(1, 3) << inpSize.height, inpSize.width, 1.6f); + this->net.setInput(imInfo, "im_info"); + } + std::vector outs; + + + this->net.forward(outs, getOutputsNames()); + + + postprocess(outs, rgbImage, confidence_threshold); + + + Sample sample; + RectRegionsPtr regions(new RectRegions()); + ClassTypeGeneric typeConverter(classNamesFile); + + for (auto it = detections.begin(), end=detections.end(); it !=end; ++it){ + + typeConverter.setId(it->classId); + regions->add(it->boundingBox,typeConverter.getClassString(), it->probability); + LOG(INFO)<< it->boundingBox.x << " " << it->boundingBox.y << " " << it->boundingBox.height << " " << it->boundingBox.width << std::endl; + LOG(INFO)<< typeConverter.getClassString() << ": " << it->probability << std::endl; + } + sample.setColorImage(image); + sample.setRectRegions(regions); + return sample; +} + +void CaffeInferencer::postprocess(const std::vector& outs, cv::Mat & image, double confidence_threshold) +{ + static std::vector outLayers = this->net.getUnconnectedOutLayers(); + static std::string outLayerType = this->net.getLayer(outLayers[0])->type; + + if (this->net.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN + { + // Network produces output blob with a shape 1x1xNx7 where N is a number of + // detections and an every detection is a vector of values + // [batchId, classId, confidence, left, top, right, bottom] + //cv::CV_Assert(outs.size() == 1); + assert(outs.size() == 1); + float* data = (float*)outs[0].data; + int count = 0; + for (size_t i = 0; i < outs[0].total(); i += 7) + { + float confidence = data[i + 2]; + if (confidence >= confidence_threshold) + { + detections.push_back(detection()); + detections[count].classId = (int)(data[i + 1]) - 1; + detections[count].probability = confidence; + detections[count].boundingBox.x = (((int)data[i + 3] * image.cols) / inpWidth); + detections[count].boundingBox.y = (((int)data[i + 4] * image.rows) / inpHeight); + + detections[count].boundingBox.width = (((int)data[i + 5] * image.cols) / inpWidth) - detections[count].boundingBox.x; + + detections[count].boundingBox.height = (((int)data[i + 6] * image.rows) / inpHeight) - detections[count].boundingBox.y; + + count++; + } + + } + + } + else if (outLayerType == "DetectionOutput") + { + // Network produces output blob with a shape 1x1xNx7 where N is a number of + // detections and an every detection is a vector of values + // [batchId, classId, confidence, left, top, right, bottom] + assert(outs.size() == 1); + float* data = (float*)outs[0].data; + int count = 0; + for (size_t i = 0; i < outs[0].total(); i += 7) + { + float confidence = data[i + 2]; + if (confidence >= confidence_threshold) + { + detections.push_back(detection()); + detections[count].classId = (int)(data[i + 1]) - 1; + detections[count].probability = confidence; + LOG(INFO) << data[i + 3] << '\n'; + detections[count].boundingBox.x = (int)(data[i + 3] * image.cols); + detections[count].boundingBox.y = (int)(data[i + 4] * image.rows); + + detections[count].boundingBox.width = (int)(data[i + 5] * image.cols )- detections[count].boundingBox.x; + + detections[count].boundingBox.height = (int)(data[i + 6] * image.rows ) - detections[count].boundingBox.y; + + count++; + + } + } + } + else if (outLayerType == "Region") + { + std::vector classIds; + std::vector confidences; + std::vector boxes; + for (size_t i = 0; i < outs.size(); ++i) + { + // Network produces output blob with a shape NxC where N is a number of + // detected objects and C is a number of classes + 4 where the first 4 + // numbers are [center_x, center_y, width, height] + float* data = (float*)outs[i].data; + for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols) + { + cv::Mat scores = outs[i].row(j).colRange(5, outs[i].cols); + cv::Point classIdPoint; + double confidence; + cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint); + if (confidence >= confidence_threshold) + { + int centerX = (int)(data[0] * image.cols); + int centerY = (int)(data[1] * image.rows); + int width = (int)(data[2] * image.cols); + int height = (int)(data[3] * image.rows); + int left = centerX - width / 2; + int top = centerY - height / 2; + + classIds.push_back(classIdPoint.x); + confidences.push_back((float)confidence); + boxes.push_back(cv::Rect(left, top, width, height)); + } + } + } + std::vector indices; + cv::dnn::NMSBoxes(boxes, confidences, confidence_threshold, 0.4f, indices); + for (size_t i = 0; i < indices.size(); ++i) + { + int idx = indices[i]; + cv::Rect box = boxes[idx]; + + detections.push_back(detection()); + detections[i].classId = classIds[idx]; + detections[i].probability = confidences[idx]; + detections[i].boundingBox.x = box.x; + detections[i].boundingBox.y = box.y; + + detections[i].boundingBox.width = box.width; + + detections[i].boundingBox.height = box.height; + + //drawPred(classIds[idx], confidences[idx], box.x, box.y, + // box.x + box.width, box.y + box.height, frame); + } + } + else + throw std::invalid_argument("Unknown output layer type: " + outLayerType); +} + +std::vector CaffeInferencer::getOutputsNames() +{ + + if (this->names.empty()) + { + std::vector outLayers = this->net.getUnconnectedOutLayers(); + std::vector layersNames = this->net.getLayerNames(); + this->names.resize(outLayers.size()); + for (size_t i = 0; i < outLayers.size(); ++i) + this->names[i] = layersNames[outLayers[i] - 1]; + } + return this->names; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.h new file mode 100644 index 00000000..f2533a89 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/CaffeInferencer.h @@ -0,0 +1,45 @@ +#ifndef SAMPLERGENERATOR_CAFFEINFERENCER_H +#define SAMPLERGENERATOR_CAFFEINFERENCER_H + + +#include "FrameworkInferencer.h" +#include +#include +#include + +#include +#include +#include + +class CaffeInferencer: public FrameworkInferencer { +public: + CaffeInferencer(const std::string& netConfig, const std::string& netWeights, const std::string& classNamesFile, std::map* inferencerParamsMap); + Sample detectImp(const cv::Mat& image, double confidence_threshold); + std::vector getOutputsNames(); + void postprocess(const std::vector& outs, cv::Mat& image, double confidence_threshold); + + +private: + std::string netConfig; + std::string netWeights; + struct detection { + cv::Rect boundingBox; + float probability; + int classId; + }; + + std::vector detections; + std::vector names; + double scaling_factor; + cv::Scalar mean_sub; + cv::dnn::Net net; + int inpWidth; + int inpHeight; + bool swapRB; + +}; + + +typedef boost::shared_ptr CaffeInferencerPtr; + +#endif //SAMPLERGENERATOR_CAFFEINFERENCER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.cpp new file mode 100644 index 00000000..64d72ee2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.cpp @@ -0,0 +1,117 @@ +// +// Created by frivas on 1/02/17. +// + +#include +#include "ClassStatistics.h" +#include +#include + +ClassStatistics::ClassStatistics(const std::string& classID):classID(classID),nSamples(0),truePositives(0),falsePositives(0), falseNegatives(0), trueNegatives(0){ + +} +ClassStatistics::ClassStatistics():classID(""),nSamples(0),truePositives(0),falsePositives(0), falseNegatives(0), trueNegatives(0){ + +} + +double ClassStatistics::divide(double x, double y) { + if (y == 0) + y = std::numeric_limits::min(); + return x/y; +} + +double ClassStatistics::getMeanIOU() const{ + return std::accumulate( this->iou.begin(), this->iou.end(), 0.0)/this->iou.size(); +} + +double ClassStatistics::getAveragePrecision(std::vector recallThrs) const { + + std::vector pt_rc = getPrecisionForDiffRecallThrs(recallThrs); + double precsion = 0; + int precsionCount = 0; + for (auto it = pt_rc.begin(); it != pt_rc.end(); it++) { + precsion += *it; + precsionCount++; + + } + + return precsion/precsionCount; + +} + + +std::vector ClassStatistics::getPrecisionForDiffRecallThrs(std::vector recallThrs) const{ + std::vector precisionForDiffRecallThrs(recallThrs.size()); + + std::vector precisionArrayOp = getPrecisionArrayOp(); + std::vector recallArray = getRecallArray(); + + bool isEmpty = precisionArrayOp.size() == 0; + + std::vector::iterator it; + for (int i = 0; i < recallThrs.size(); i++) { + + if (!isEmpty) { + it = std::lower_bound(recallArray.begin(), recallArray.end(), recallThrs[i]); + int index; + if (it != recallArray.end()) { + index = std::distance(recallArray.begin(), it); + precisionForDiffRecallThrs[i] = precisionArrayOp[index]; + } else { + precisionForDiffRecallThrs[i] = 0; + } + } else { + precisionForDiffRecallThrs.push_back(0); + } + } + + return precisionForDiffRecallThrs; +} + +std::vector ClassStatistics::getPrecisionArrayOp() const{ + std::vector precision_array = getPrecisionArray(); + if (precision_array.size() == 0 || precision_array.size() == 1) { + return precision_array; + } + + for (auto it = ++(precision_array.rbegin()); it != precision_array.rend(); it++) { + if (*it < *std::prev(it)) { + *it = *std::prev(it); + } + } + + return precision_array; +} + +std::vector ClassStatistics::getPrecisionArray() const{ + std::vector cumulative_truePositives(this->truePositives.size()); + std::partial_sum(this->truePositives.begin(), this->truePositives.end(), cumulative_truePositives.begin()); + std::vector cumulative_falsePositives(this->falsePositives.size()); + std::partial_sum(this->falsePositives.begin(), this->falsePositives.end(), cumulative_falsePositives.begin()); + std::vector cum_sum(this->truePositives.size()); + std::transform (cumulative_truePositives.begin(), cumulative_truePositives.end(), cumulative_falsePositives.begin(), cum_sum.begin(), std::plus()); + std::vector result(this->truePositives.size()); + std::transform (cumulative_truePositives.begin(), cumulative_truePositives.end(), cum_sum.begin(), result.begin(), divide); + return result; +} + +double ClassStatistics::getRecall() const{ + std::vector recall = getRecallArray(); + + return recall.empty() ? 0 : recall[recall.size() - 1]; +} + +std::vector ClassStatistics::getRecallArray() const{ + std::vector cumulative_truePositives(this->truePositives.size()); + std::partial_sum(this->truePositives.begin(), this->truePositives.end(), cumulative_truePositives.begin()); + + std::vector result(this->truePositives.size()); + int i =0; + for (auto it = cumulative_truePositives.begin(); it != cumulative_truePositives.end(); it++) { + + result[i] = *it / (double)this->numGroundTruthsReg; + i++; + } + + return result; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.h new file mode 100644 index 00000000..9c03bfe9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/ClassStatistics.h @@ -0,0 +1,49 @@ +// +// Created by frivas on 1/02/17. +// + +#ifndef SAMPLERGENERATOR_STATISTICS_H +#define SAMPLERGENERATOR_STATISTICS_H + + +#include +#include +#include +#include +#include +#include +#include + +struct ClassStatistics { + ClassStatistics(); + ClassStatistics(const std::string& classID); + double getMeanIOU() const; + double getAveragePrecision(std::vector recallThrs) const; + std::vector getPrecisionArray() const; + std::vector getPrecisionArrayOp() const; + std::vector getPrecisionForDiffRecallThrs(std::vector recallThrs) const; + std::vector getRecallArray() const; + double getRecall() const; + static double divide(double x, double y); + //void printStats() const; + + + std::string classID; + int numGroundTruthsReg = 0; + int numGroundTruthsIg = 0; + std::vector iou; + std::vector truePositives; + std::vector falsePositives; + std::multiset confScores; + // 2 vectors in same order on with conf score and other with true positive 1/0 + int nSamples; + //int truePositives; + //int falsePositives; + int falseNegatives; + int trueNegatives; //???? evaluar muestra negativa?? + + +}; + + +#endif //SAMPLERGENERATOR_STATISTICS_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.cpp new file mode 100644 index 00000000..cd8783bb --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.cpp @@ -0,0 +1,124 @@ +// +// Created by frivas on 31/01/17. +// +#include +#include +#include +#include +#include +// OpenCV +#include +#include +#include + +#include +#include +#include "DarknetInferencer.h" +#include + + +using namespace std; +using namespace cv; +using namespace dnn; + + +DarknetInferencer::DarknetInferencer(const std::string &netConfig, const std::string &netWeights,const std::string& classNamesFile): netConfig(netConfig),netWeights(netWeights) { + this->classNamesFile=classNamesFile; + this->netConfig=netConfig; + this->netWeights=netWeights; + + std::vector classes = {}; + ifstream ifs(this->classNamesFile.c_str()); + string line; + while (getline(ifs, line)) classes.push_back(line); + this->classes=classes; + + // Load the network + Net net = readNetFromDarknet(this->netConfig, this->netWeights); + + // CPU + //net.setPreferableBackend(DNN_BACKEND_OPENCV); + //net.setPreferableTarget(DNN_TARGET_CPU); + + // GPU + net.setPreferableBackend(DNN_BACKEND_CUDA); + net.setPreferableTarget(DNN_TARGET_CUDA); + + this->net=net; + this->outNames=net.getUnconnectedOutLayersNames(); + this->nmsThreshold = 0.4; +} + +Sample DarknetInferencer::detectImp(const cv::Mat &image, double confThreshold) { + //printf("OpenCV: %s", cv::getBuildInformation().c_str()); + //int inpWidth = (image.cols/32) * 32; + //int inpHeight = (image.rows/32) * 32; + + int inpWidth = 416; + int inpHeight = 416; + + Mat rgbImage; + resize(image, rgbImage, Size(inpWidth, inpHeight), 1, 1); + + Mat blob; + blobFromImage(rgbImage, blob, 1.0, Size(inpWidth, inpHeight), Scalar(), true, false, CV_8U); + net.setInput(blob, "", 0.00392, Scalar()); + cout << "WIDTH: " << inpWidth << endl; + cout << "HEIGHT: " << inpHeight << endl; + // END preprocess + vector outs; + cout << "Starting inference" << endl; + auto start = std::chrono::system_clock::now(); + net.forward(outs, outNames); + cout << "END INFERENCE" < elapsed_seconds = end-start; + cout << "Inference Time: " << elapsed_seconds.count() << " seconds" << endl; + // postprocess + + vector classIds; + vector confidences; + vector boxes; + + for (size_t i = 0; i < outs.size(); i++) + { + float* data = (float*)outs[i].data; + for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols) + { + Mat scores = outs[i].row(j).colRange(5, outs[i].cols); + Point classIdPoint; + double confidence; + minMaxLoc(scores, 0, &confidence, 0, &classIdPoint); + if (confidence > confThreshold) + { + int centerX = (int)(data[0] * image.cols); + int centerY = (int)(data[1] * image.rows); + int width = (int)(data[2] * image.cols); + int height = (int)(data[3] * image.rows); + int left = centerX - width / 2; + int top = centerY - height / 2; + + classIds.push_back(classIdPoint.x); + confidences.push_back((float)confidence); + boxes.push_back(Rect(left, top, width, height)); + } + } + } + cout << "Num Detections: " << classIds.size() << endl; + vector indices; + NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices); + + Sample sample; + RectRegionsPtr regions(new RectRegions()); + for (size_t i = 0; i < indices.size(); i++) { + int idx = indices[i]; + Rect box = boxes[idx]; + string label = this->classes[classIds[idx]]; + regions->add(box, label, confidences[idx]); + LOG(INFO)<< label << ": " << confidences[idx] << std::endl; + } + + sample.setColorImage(image); + sample.setRectRegions(regions); + return sample; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.h new file mode 100644 index 00000000..492ddc01 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DarknetInferencer.h @@ -0,0 +1,34 @@ +// +// Created by frivas on 31/01/17. +// + +#ifndef SAMPLERGENERATOR_DARKNETEVALUATOR_H +#define SAMPLERGENERATOR_DARKNETEVALUATOR_H + +#include +#include "FrameworkInferencer.h" +#include +#include +#include + +using namespace std; +using namespace cv; +using namespace dnn; + +class DarknetInferencer: public FrameworkInferencer { +public: + DarknetInferencer(const string& netConfig, const string& netWeights, const string& classNamesFile); + Sample detectImp(const Mat& image, double confidence_threshold); + +private: + string netConfig; + string netWeights; + vector classes; + Net net; + vector outNames; + float nmsThreshold; +}; + +typedef boost::shared_ptr DarknetInferencerPtr; + +#endif //SAMPLERGENERATOR_DARKNETEVALUATOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.cpp new file mode 100644 index 00000000..17bcb50c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.cpp @@ -0,0 +1,409 @@ +// +// Created by frivas on 1/02/17. +// + +#include +#include +#include "DetectionsEvaluator.h" + + +DetectionsEvaluator::DetectionsEvaluator(DatasetReaderPtr gt, DatasetReaderPtr detections,bool debug): + gt(gt),detections(detections),debug(debug) { + thIOU = 0.5; + + for (int i = 0; i < 101; i++) { + this->recallThrs.push_back(0.01*i); + } // Initializing Recall Thersholds with 101 values starting + // from 0 with a difference of 0.01. + +} + +std::map DetectionsEvaluator::getClassWiseAP() { + return this->classWiseMeanAP; +} + +std::map DetectionsEvaluator::getClassWiseAR() { + return this->classWiseMeanAR; +} + +double DetectionsEvaluator::getOverallmAP() { + return this->ApDiffIou.sum()/10; +} + +double DetectionsEvaluator::getOverallmAR() { + return this->ArDiffIou.sum()/10; +} + +double DetectionsEvaluator::getEvaluationTime() { + return this->timeEvaluation; +} + +double DetectionsEvaluator::getAccumulationTime() { + return this->timeAccumulation; +} + +void DetectionsEvaluator::accumulateResults() { + + int start_s=clock(); + + unsigned int index = 0; + for (auto itr = this->sampleStats.begin(); itr != this->sampleStats.end(); itr++) { + + std::map mystats = itr->second.getStats(); + + int totalCount = 0; + double totalPrecision = 0; + double totalRecall = 0; + + int classCount = 0; + + for (auto iter = mystats.begin(); iter != mystats.end(); iter++) { + + if (iter->second.numGroundTruthsReg == 0) + continue; + + std::vector pr = iter->second.getPrecisionArray(); + std::vector rc = iter->second.getRecallArray(); + + double recall = 0; + + + recall = iter->second.getRecall(); + + this->classWiseMeanAR[iter->first] += recall / 10; // 10 IOU Thresholds, + // mean will be calculated directly + totalRecall += recall; + + + double precision = iter->second.getAveragePrecision(this->recallThrs); + this->classWiseMeanAP[iter->first] += precision / 10; // 10 IOU Thresholds, + // mean will be calculated directly + totalPrecision += precision; + + totalCount++; + + + } + this->ApDiffIou[index] = totalPrecision / totalCount; + this->ArDiffIou[index] = totalRecall / totalCount; + + ++index; + } + + int stop_s=clock(); + this->timeAccumulation = (stop_s-start_s)/double(CLOCKS_PER_SEC); + LOG(INFO) << "Time Taken in Accumulation: " << this->timeAccumulation << " seconds" << std::endl; + LOG(INFO) << std::fixed; + LOG(INFO) << std::setprecision(8); + + + for (int i = 0; i < this->ApDiffIou.size(); i++) { + LOG(INFO) << "AP for IOU " << this->iouThrs[i] << ": \t" << this->ApDiffIou[i] << '\n'; + } + + for (int i = 0; i < this->ArDiffIou.size(); i++) { + LOG(INFO) << "AR for IOU " << this->iouThrs[i] << ": \t" << this->ArDiffIou[i] << '\n'; + } + + LOG(INFO) << "AP for IOU 0.5:0.95 \t" << this->ApDiffIou.sum()/10 << '\n'; + + LOG(INFO) << "AR for IOU 0.5:0.95 \t" << this->ArDiffIou.sum()/10 << '\n'; + + cv::destroyAllWindows(); + + LOG(INFO) << "Evaluated Successfully" << '\n'; + +} + +void DetectionsEvaluator::evaluate(bool isIouTypeBbox) { + int counter=-1; + int gtSamples = this->gt->getNumberOfElements(); + int detectionSamples = this->detections->getNumberOfElements(); + + + int start_s=clock(); + + if (gtSamples != detectionSamples){ + LOG(WARNING) << "Both dataset has not the same number of elements"; + } + + ClassTypeMapper classMapper(this->gt->getClassNamesFile()); + this->classMapping = classMapper.mapFile(this->detections->getClassNamesFile()); + + + Sample gtSample; + Sample detectionSample; + + + while (this->gt->getNextSample(gtSample)) { + counter++; + + + this->detections->getNextSample(detectionSample); + + + LOG(INFO) << "Evaluating: " << detectionSample.getSampleID() << "(" << counter << "/" << gtSamples << ")" << std::endl; + + + if (gtSample.getSampleID().compare(detectionSample.getSampleID()) != 0){ + LOG(WARNING) << "No detection sample available, Creating Dummy Sample\n"; + Sample dummy; + dummy.setSampleID(gtSample.getSampleID()); + dummy.setColorImagePath(gtSample.getColorImagePath()); + evaluateSample(gtSample, dummy, isIouTypeBbox); + this->detections->decrementReaderCounter(); + const std::string error="Both dataset has not the same structure ids mismatch from:" + gtSample.getSampleID() + " to " + detectionSample.getSampleID(); + LOG(WARNING) << error; + + } else { + evaluateSample(gtSample,detectionSample, isIouTypeBbox); + } + + /*if (this->debug){ + cv::imshow("GT", gtSample.getSampledColorImage()); + cv::imshow("Detection", detectionSample.getSampledColorImage()); + cv::waitKey(10); + }*/ + + } + int stop_s=clock(); + this->timeEvaluation = (stop_s-start_s)/double(CLOCKS_PER_SEC); + LOG(INFO) << "Time Taken in Evaluation: " << this->timeEvaluation << " seconds" << std::endl; + +} + +void DetectionsEvaluator::evaluateSample(Sample gt, Sample detection, bool isIouTypeBbox) { + + Eval::EvalMatrix sampleEvalMatrix; + + StatsUtils::computeIOUMatrix(gt, detection, sampleEvalMatrix, isIouTypeBbox); + + + std::string sampleID = gt.getSampleID(); + + std::map> matchingMap; + std::map> prMap; + std::map gtRegionsClassWiseCount; + + if (isIouTypeBbox) { + + auto gtRegions = gt.getRectRegions()->getRegions(); + auto detectionRegions = detection.getRectRegions()->getRegions(); + + + for (int i = 0; i < 10; i++) { + std::string current_class, previous_class; + int count = 0; + std::map gtIsCrowd; + for (auto itGt = gtRegions.begin(); itGt != gtRegions.end(); itGt++ ) { + + if (!itGt->isCrowd) { + this->sampleStats[this->iouThrs[i]].addGroundTruth(itGt->classID, true); + gtIsCrowd[itGt->uniqObjectID] = false; + + } else { + gtIsCrowd[itGt->uniqObjectID] = true; + this->sampleStats[this->iouThrs[i]].addGroundTruth(itGt->classID, false); + } + + } + + for (auto itDetection = detectionRegions.begin(); itDetection != detectionRegions.end(); itDetection++) { + previous_class = current_class; + current_class = itDetection->classID; + + if (!previous_class.empty()) { + if (current_class == previous_class) { + count++; + } else { + count = 0; + } + } + if (sampleEvalMatrix[current_class].empty()) { + LOG(INFO) << "IOU Matrix for " << sampleID << " and class " << current_class << " is empty for this Detection Ground Truth Pair" << '\n'; + + } + + + double iou = std::min(this->iouThrs[i],1-1e-10); + int m = -1; + bool isCrowd_local; + std::string current_class_gt, previous_class_gt; + int count2 = 0; + + for (auto itGt = gtRegions.begin(); itGt != gtRegions.end(); itGt++) { + + previous_class_gt = current_class_gt; + current_class_gt = itGt->classID; + if (current_class_gt != current_class) + continue; + + if (!previous_class_gt.empty()) { + if (current_class_gt == previous_class_gt) { + count2++; + } else { + count2 = 0; + } + } + + if (matchingMap[this->iouThrs[i]].find(itGt->uniqObjectID) != matchingMap[this->iouThrs[i]].end() && !itGt->isCrowd) { + continue; + } + + if (m >-1 && !gtIsCrowd[m] && itGt->isCrowd) + break; + + if (sampleEvalMatrix[current_class][count][count2] < iou) + continue; + + iou=sampleEvalMatrix[current_class][count][count2]; + m=itGt->uniqObjectID; + + } + + + if (m ==-1) { + this->sampleStats[this->iouThrs[i]].addFalsePositive(itDetection->classID, itDetection->confidence_score); + continue; + } + + if (gtIsCrowd[m]) { + this->sampleStats[this->iouThrs[i]].addIgnore(itDetection->classID, itDetection->confidence_score); + continue; + } + + matchingMap[this->iouThrs[i]][m] = itDetection->uniqObjectID; + + + this->sampleStats[this->iouThrs[i]].addTruePositive(itDetection->classID, itDetection->confidence_score); + + } + } + + + } else { + + auto gtRegions = gt.getRleRegions()->getRegions(); + auto detectionRegions = detection.getRleRegions()->getRegions(); + + + for (int i = 0; i < 10; i++) { + std::string current_class, previous_class; + int count = 0; + std::map gtIsCrowd; + for (auto itGt = gtRegions.begin(); itGt != gtRegions.end(); itGt++ ) { + + if (!itGt->isCrowd) { + this->sampleStats[this->iouThrs[i]].addGroundTruth(itGt->classID, true); + gtIsCrowd[itGt->uniqObjectID] = false; + + } else { + gtIsCrowd[itGt->uniqObjectID] = true; + this->sampleStats[this->iouThrs[i]].addGroundTruth(itGt->classID, false); + + } + } + + for (auto itDetection = detectionRegions.begin(); itDetection != detectionRegions.end(); itDetection++) { + previous_class = current_class; + current_class = itDetection->classID; + if (!previous_class.empty()) { + if (current_class == previous_class) { + count++; + } else { + count = 0; + } + } + if (sampleEvalMatrix[current_class].empty()) { + LOG(INFO) << "IOU Matrix for " << sampleID << " and class " << current_class << " is empty for this Detection Ground Truth Pair" << '\n'; + + } + + double iou = std::min(this->iouThrs[i],1-1e-10); + int m = -1; + bool isCrowd_local; + std::string current_class_gt, previous_class_gt; + int count2 = 0; + + for (auto itGt = gtRegions.begin(); itGt != gtRegions.end(); itGt++) { + previous_class_gt = current_class_gt; + current_class_gt = itGt->classID; + + if (current_class_gt != current_class) + continue; + + if (!previous_class_gt.empty()) { + if (current_class_gt == previous_class_gt) { + count2++; + } else { + count2 = 0; + } + } + + if (matchingMap[this->iouThrs[i]].find(itGt->uniqObjectID) != matchingMap[this->iouThrs[i]].end() && !itGt->isCrowd) { + continue; + } + + if (m >-1 && !gtIsCrowd[m] && itGt->isCrowd) + break; + + if (sampleEvalMatrix[current_class][count][count2] < iou) + continue; + + iou=sampleEvalMatrix[current_class][count][count2]; + m=itGt->uniqObjectID; + + } + + + if (m ==-1) { + this->sampleStats[this->iouThrs[i]].addFalsePositive(itDetection->classID, itDetection->confidence_score); + continue; + } + if (gtIsCrowd[m]) { + this->sampleStats[this->iouThrs[i]].addIgnore(itDetection->classID, itDetection->confidence_score); + continue; + } + + matchingMap[this->iouThrs[i]][m] = itDetection->uniqObjectID; + + + this->sampleStats[this->iouThrs[i]].addTruePositive(itDetection->classID, itDetection->confidence_score); + + } + } + + + + } + + +} + +void DetectionsEvaluator::printStats() { + //this->stats.printStats(classesToDisplay); +} + + +bool DetectionsEvaluator::sameClass(const std::string class1, const std::string class2) { + + if (class1.compare(class2)==0) + return true; + else{ + if (std::find(validMixClass.begin(), validMixClass.end(), std::make_pair(class1,class2)) != validMixClass.end()) + return true; + if (std::find(validMixClass.begin(), validMixClass.end(), std::make_pair(class2,class1)) != validMixClass.end()) + return true; + } + return false; +} + + +void DetectionsEvaluator::addValidMixClass(const std::string classA, const std::string classB){ + //B is valid by detecting object as A + this->validMixClass.push_back(std::make_pair(classA,classB)); +} + +void DetectionsEvaluator::addClassToDisplay(const std::string &classID) { + this->classesToDisplay.push_back(classID); +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.h new file mode 100644 index 00000000..039c152c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/DetectionsEvaluator.h @@ -0,0 +1,76 @@ +// +// Created by frivas on 1/02/17. +// + +#ifndef SAMPLERGENERATOR_DETECTIONSEVALUATOR_H +#define SAMPLERGENERATOR_DETECTIONSEVALUATOR_H + +#include +#include +#include "ClassStatistics.h" +#include "GlobalStats.h" +#include +#include +#include +#include + +class DetectionsEvaluator { +public: + DetectionsEvaluator(DatasetReaderPtr gt, DatasetReaderPtr detections, bool debug=false); + void evaluate(bool isIouTypeBbox); + void accumulateResults(); + void addValidMixClass(const std::string classA, const std::string classB); + void addClassToDisplay(const std::string& classID); + std::map getClassWiseAP(); + std::map getClassWiseAR(); + double getOverallmAP(); + double getOverallmAR(); + double getEvaluationTime(); + double getAccumulationTime(); + +private: + DatasetReaderPtr gt; + DatasetReaderPtr detections; + bool debug; + std::vector> validMixClass; + std::unordered_map classMapping; + + //void evaluateSamples(Sample gt, Sample detection); + void evaluateSample(Sample gt, Sample detection, bool isIouTypeBbox); + + void printStats(); + + bool sameClass(const std::string class1, const std::string class2); + + std::vector classesToDisplay; + double thIOU; + std::map sampleStats; + + GlobalStats stats; + + std::map classWiseMeanAP; + std::map classWiseMeanAR; + std::valarray ApDiffIou = std::valarray(10); + std::valarray ArDiffIou = std::valarray(10); + + + std::map> areaRng = { {"all", std::make_tuple(0, 10000000000) }, + {"small", std::make_tuple(0, 1024) }, + {"medium", std::make_tuple(1024, 9216) }, + {"large", std::make_tuple(9210, 10000000000)} }; + + double iouThrs[10] = {0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95}; + + std::vector recallThrs; // 101 recall Thrs initialized in constructor + + + double timeEvaluation = 0; + double timeAccumulation = 0; + +}; + + +typedef boost::shared_ptr DetectionsEvaluatorPtr; + + +#endif //SAMPLERGENERATOR_DETECTIONSEVALUATOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.cpp new file mode 100644 index 00000000..5242ce82 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.cpp @@ -0,0 +1,51 @@ +// +// Created by frivas on 24/02/17. +// + + +#include "FrameworkInferencer.h" +#include + + +Sample FrameworkInferencer::detect(const cv::Mat &image, double confidence_threshold) { + // Timestamp just before we start our detection + boost::posix_time::ptime startTime = boost::posix_time::microsec_clock::local_time(); + // Start detection + Sample s = detectImp(image, confidence_threshold); + // Timestamp after we finish our detection. + boost::posix_time::ptime endTime = boost::posix_time::microsec_clock::local_time(); + // total duration(for one detection): Time after the process is completed - Time before the process is completed. + boost::posix_time::time_duration duration = endTime-startTime; + // convert the above duration into total milliseconds taken. + long elapsedTime = duration.total_milliseconds(); + // Store the elapsedTime in a vector which will later be used to calculate mean time. + durationVector.push_back(elapsedTime); + // return the Sample. + return s; +} + +int FrameworkInferencer::getMeanDurationTime() { + // stores the total time taken + int accumulate=0; + // iterate over the entire duration vector. + for (auto it = this->durationVector.begin(), end = this->durationVector.end(); it != end; ++it) + accumulate+=(int)(*it); + // If the duration vector is empty return 0. + if (this->durationVector.size() ==0) + return 0; + // Else return the average time taken. + else + return accumulate/(int)this->durationVector.size(); +} + +FrameworkInferencer::FrameworkInferencer() { + +} + +/* + After inferencing log the information regarding the mean time taken and + the inferencer. +*/ +FrameworkInferencer::~FrameworkInferencer() { + LOG(INFO) << "Mean inference time: " << this->getMeanDurationTime() << "(ms)" << std::endl; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.h new file mode 100644 index 00000000..81b98127 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/FrameworkInferencer.h @@ -0,0 +1,43 @@ +// +// Created by frivas on 29/01/17. +// + +#ifndef SAMPLERGENERATOR_FRAMEWORKEVALUATOR_H +#define SAMPLERGENERATOR_FRAMEWORKEVALUATOR_H + +#include +#include +#include +#include +#include +#include + +class FrameworkInferencer{ +public: + // Constructor function + FrameworkInferencer(); + // Destructor function + ~ FrameworkInferencer(); + // Detect objects in a image and return the information stored in a sample. + Sample detect(const cv::Mat& image, double confidence_threshold); + // Get the total time taken for inferencing different objects. + int getMeanDurationTime(); + // Below one will be defined by the child class which inherits this as parent. + virtual Sample detectImp(const cv::Mat& image, double confidence_threshold) =0; + +protected: + // Path where the class names are stored. + std::string classNamesFile; + +private: + // This vector stores the time taken to detect an object in an image. + std::vector durationVector; +}; + + + + +typedef boost::shared_ptr FrameworkInferencerPtr; + + +#endif //SAMPLERGENERATOR_FRAMEWORKEVALUATOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.cpp new file mode 100644 index 00000000..e5870686 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.cpp @@ -0,0 +1,123 @@ +// +// Created by frivas on 4/02/17. +// + +#include +#include "GenericInferencer.h" + +// Process the image using the selected inferener. +GenericInferencer::GenericInferencer(const std::string &netConfig, const std::string &netWeights,const std::string& classNames, + const std::string &implementation, std::map* inferencerParamsMap) { + // Get all the available inferencers which are present at the user's end. + configureAvailablesImplementations(this->availableImplementations); + + // Check if the selected inferencer is available or not + if (std::find(this->availableImplementations.begin(), this->availableImplementations.end(), implementation) != this->availableImplementations.end()){ + // If available , get the inferencer implementation and store it in imp. + imp = getImplementation(implementation); + // Inference the image image using the selected inferencer(Currently supports 4 different inferencers). + switch (imp) { + case INF_YOLO: + this->darknetInferencerPtr = DarknetInferencerPtr( new DarknetInferencer(netConfig, netWeights,classNames)); + break; + case INF_TENSORFLOW: + this->tensorFlowInferencerPtr = TensorFlowInferencerPtr( new TensorFlowInferencer(netConfig, netWeights,classNames)); + break; + case INF_KERAS: + this->kerasInferencerPtr = KerasInferencerPtr( new KerasInferencer(netConfig, netWeights,classNames)); + break; +#ifdef ENABLE_DNN_CAFFE + case INF_CAFFE: + this->caffeInferencerPtr = CaffeInferencerPtr (new CaffeInferencer(netConfig, netWeights, classNames, inferencerParamsMap)); + break; +#endif + case INF_PYTORCH: + this->pyTorchInferencerPtr = PyTorchInferencerPtr (new PyTorchInferencer(netConfig, netWeights, classNames)); + break; + // If it does not belong to any of the 4 supported inferencers, log warning and break. + default: + LOG(WARNING)<& data) { + data.push_back("yolo"); + // Push tensorflow and keras, as they are neccessary dependencies to use this tool. + // If they don't exist an error should have popped up while building the tool. + data.push_back("tensorflow"); + data.push_back("keras"); + data.push_back("pytorch"); +// If Caffe exists push "caffe" +#ifdef ENABLE_DNN_CAFFE + data.push_back("caffe"); +#endif +} + +/* + Returns the INFERENCER_IMPLEMENTATIONS by comparing the inferencer string + with different available implementations. +*/ +INFERENCER_IMPLEMENTATIONS GenericInferencer::getImplementation(const std::string &inferencerImplementation) { + // Check is the selected inferencer is yolo, if it matches exactly return YOLO_INF. + if (inferencerImplementation.compare("yolo")==0){ + return INF_YOLO; + } + // Check is the selected inferencer is tensorflow, if it matches exactly return INF_TENSORFLOW. + if (inferencerImplementation.compare("tensorflow")==0){ + return INF_TENSORFLOW; + } + // Check is the selected inferencer is keras, if it matches exactly return INF_KERAS. + if (inferencerImplementation.compare("keras")==0){ + return INF_KERAS; + } + // Check is the selected inferencer is caffe, if it matches exactly return INF_CAFFE. + if (inferencerImplementation.compare("caffe")==0){ + return INF_CAFFE; + } + // Check is the selected inferencer is PyTorch, if it matches exactly return INF_PYTORCH. + if (inferencerImplementation.compare("pytorch")==0){ + return INF_PYTORCH; + } +} + +/* + Return's the inferencer pointer using the data obtained from "getImplementation". +*/ +FrameworkInferencerPtr GenericInferencer::getInferencer() { + switch (imp) { + case INF_YOLO: + return this->darknetInferencerPtr; + case INF_TENSORFLOW: + return this->tensorFlowInferencerPtr; + case INF_KERAS: + return this->kerasInferencerPtr; +// If caffe is selected, return caffe pointer. +#ifdef ENABLE_DNN_CAFFE + case INF_CAFFE: + return this->caffeInferencerPtr; +#endif + case INF_PYTORCH: + return this->pyTorchInferencerPtr; + default: + LOG(WARNING)< GenericInferencer::getAvailableImplementations() { + std::vector data; + configureAvailablesImplementations(data); + return data; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.h new file mode 100644 index 00000000..f9fb1859 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GenericInferencer.h @@ -0,0 +1,54 @@ +// +// Created by frivas on 4/02/17. +// + +// A generic framework which encapsulates all other frameworks in this. +#ifndef SAMPLERGENERATOR_GENERICINFERENCER_H +#define SAMPLERGENERATOR_GENERICINFERENCER_H + +#include "FrameworkInferencer.h" + +// Include the header files of tensorflow, keras and darknet inferencers. +#include +#include +#include +#include +// If Caffe is present include it's header files as well. +#ifdef ENABLE_DNN_CAFFE +#include +#endif + +// Inferencer can be implemented using any one of the following frameworks. +enum INFERENCER_IMPLEMENTATIONS{INF_YOLO, INF_TENSORFLOW, INF_KERAS, INF_CAFFE, INF_PYTORCH}; + + +class GenericInferencer { +public: + // Constructor function. + GenericInferencer(const std::string& netConfig, const std::string& netWeights, const std::string& classNames, const std::string& implementation, std::map* inferencerParamsMap = NULL); + // Get the inferencer which we have selected to use. + FrameworkInferencerPtr getInferencer(); + // Get all the availableImplementations. + static std::vector getAvailableImplementations(); + +private: + INFERENCER_IMPLEMENTATIONS imp; + DarknetInferencerPtr darknetInferencerPtr; + TensorFlowInferencerPtr tensorFlowInferencerPtr; + KerasInferencerPtr kerasInferencerPtr; + PyTorchInferencerPtr pyTorchInferencerPtr; + +#ifdef ENABLE_DNN_CAFFE + CaffeInferencerPtr caffeInferencerPtr; +#endif + + std::vector availableImplementations; + + static void configureAvailablesImplementations(std::vector& data); + INFERENCER_IMPLEMENTATIONS getImplementation(const std::string& inferencerImplementation); +}; + +typedef boost::shared_ptr GenericInferencerPtr; + + +#endif //SAMPLERGENERATOR_GENERICINFERENCER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.cpp new file mode 100644 index 00000000..3d1a9b9b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.cpp @@ -0,0 +1,107 @@ +// +// Created by frivas on 22/07/17. +// + +#include "GlobalStats.h" +#include + +GlobalStats::GlobalStats() = default; + +void GlobalStats::addIgnore(const std::string &classID, double confScore ) { + //std::cout << "Ignoring " << classID << "keihfiewoaiasssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss" << '\n'; + if (this->statsMap.count(classID)){ + auto it = this->statsMap[classID].confScores.insert(-confScore); + unsigned int index = std::distance(this->statsMap[classID].confScores.begin(), it); + auto itr = this->statsMap[classID].truePositives.begin(); + this->statsMap[classID].truePositives.insert(itr + index, 0); + itr = this->statsMap[classID].falsePositives.begin(); + this->statsMap[classID].falsePositives.insert(itr + index, 0); + } + else{ + ClassStatistics s(classID); + s.truePositives.push_back(0); + s.falsePositives.push_back(0); + s.confScores.insert(-confScore); + this->statsMap[classID]=s; + } +} + +void GlobalStats::addGroundTruth(const std::string &classID, bool isRegular) { + if (this->statsMap.count(classID)) { + if (isRegular) + this->statsMap[classID].numGroundTruthsReg++; + else + this->statsMap[classID].numGroundTruthsIg++; + } else { + ClassStatistics s(classID); + if (isRegular) + s.numGroundTruthsReg++; + else + s.numGroundTruthsIg++; + this->statsMap[classID] = s; + } +} + +void GlobalStats::addTruePositive(const std::string &classID, double confScore) { + if (this->statsMap.count(classID)){ + auto it = this->statsMap[classID].confScores.insert(-confScore); + unsigned int index = std::distance(this->statsMap[classID].confScores.begin(), it); + auto itr = this->statsMap[classID].truePositives.begin(); + this->statsMap[classID].truePositives.insert(itr + index, 1); + itr = this->statsMap[classID].falsePositives.begin(); + this->statsMap[classID].falsePositives.insert(itr + index, 0); + } + else{ + ClassStatistics s(classID); + s.truePositives.push_back(1); + s.falsePositives.push_back(0); + s.confScores.insert(-confScore); + this->statsMap[classID]=s; + } +} + +void GlobalStats::addFalsePositive(const std::string &classID, double confScore) { + //std::cout << "Adding False positive: " << classID << " " << confScore <<'\n'; + if (this->statsMap.count(classID)){ + + auto it = this->statsMap[classID].confScores.insert(-confScore); + unsigned int index = std::distance(this->statsMap[classID].confScores.begin(), it); + auto itr = this->statsMap[classID].truePositives.begin(); + this->statsMap[classID].truePositives.insert(itr + index, 0); + itr = this->statsMap[classID].falsePositives.begin(); + this->statsMap[classID].falsePositives.insert(itr + index, 1); + } + else{ + ClassStatistics s(classID); + s.truePositives.push_back(0); + s.falsePositives.push_back(1); + s.confScores.insert(-confScore); + this->statsMap[classID]=s; + } +} + +void GlobalStats::addFalseNegative(const std::string &classID) { + if (this->statsMap.count(classID)){ + this->statsMap[classID].falseNegatives = this->statsMap[classID].falseNegatives+1; + } + else{ + ClassStatistics s(classID); + s.falseNegatives = s.falseNegatives+1; + this->statsMap[classID]=s; + } +} + +void GlobalStats::addIOU(const std::string &classID, double value) { + if (this->statsMap.count(classID)){ + this->statsMap[classID].iou.push_back(value); + } + else{ + ClassStatistics s(classID); + s.iou.push_back(value); + this->statsMap[classID]=s; + } +} + +std::map GlobalStats::getStats() const{ + return statsMap; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.h new file mode 100644 index 00000000..0fe26621 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/GlobalStats.h @@ -0,0 +1,36 @@ +// +// Created by frivas on 22/07/17. +// + +#ifndef SAMPLERGENERATOR_GLOBALSTATS_H +#define SAMPLERGENERATOR_GLOBALSTATS_H + +#include +#include "ClassStatistics.h" + +class GlobalStats { +public: + GlobalStats(); + void addTruePositive(const std::string &classID, double confScore); + + void addFalsePositive(const std::string &classID, double confScore); + + void addFalseNegative(const std::string &classID); + + void addGroundTruth(const std::string &classID, bool isRegular); + + void addIgnore(const std::string &classID, double confScore); + + void addIOU(const std::string &classID, double value); + + //void printStats(const std::vector& classesToDisplay) const; + + std::map getStats() const; + +private: + std::map statsMap; + +}; + + +#endif //SAMPLERGENERATOR_GLOBALSTATS_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.cpp new file mode 100644 index 00000000..22622b73 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.cpp @@ -0,0 +1,224 @@ +#include +#include +#include "KerasInferencer.h" +#include +KerasInferencer::KerasInferencer(const std::string &netConfig, const std::string &netWeights,const std::string& classNamesFile): netConfig(netConfig),netWeights(netWeights) { + + this->classNamesFile=classNamesFile; + + /* Code below adds path of python models to sys.path so as to enable python + interpreter to import custom python modules from the path mentioned. This will + prevent adding python path manually. + */ + + std::string file_path = __FILE__; + std::string dir_path = file_path.substr(0, file_path.rfind("/")); + dir_path = dir_path + "/../python_modules"; + + std::string string_to_run = "import sys\nsys.path.append('" + dir_path + "')\n"; + + Py_Initialize(); + + PyRun_SimpleString(string_to_run.c_str()); + + + init(); + + LOG(INFO) << "InterPreter Initailized" << '\n'; + + pName = PyUnicode_FromString("keras_detect"); + + + pModule = PyImport_Import(pName); + Py_DECREF(pName); + + LOG(INFO) << "Loading Keras Model" << '\n'; + + if (pModule != NULL) { + pClass = PyObject_GetAttrString(pModule, "KerasDetector"); + + pArgs = PyTuple_New(1); + + pmodel = PyUnicode_FromString(netWeights.c_str()); + + + /* pValue reference stolen here: */ + PyTuple_SetItem(pArgs, 0, pmodel); + /* pFunc is a new reference */ + pInstance = PyObject_CallObject(pClass, pArgs); + + if (pInstance == NULL) + { + Py_DECREF(pArgs); + PyErr_Print(); + } + + } else { + if (PyErr_Occurred()) + PyErr_Print(); + fprintf(stderr, "Cannot find function \"keras_detect\"\n"); + } + + LOG(INFO) << "Loaded Keras Model" << '\n'; + +} + +#if PY_MAJOR_VERSION >= 3 +int* +#else +void +#endif +KerasInferencer::init() +{ + import_array(); +} + +Sample KerasInferencer::detectImp(const cv::Mat &image, double confidence_threshold) { + + if(PyErr_CheckSignals() == -1) { + throw std::runtime_error("Keyboard Interrupt"); + } + + cv::Mat rgbImage; + cv::cvtColor(image,rgbImage,cv::COLOR_BGR2RGB); + + this->detections.clear(); //remove previous detections + + int result = getKerasInferences(rgbImage, confidence_threshold); + + if (result == 0) { + LOG(ERROR) << "Error Occured during getting inferences" << '\n'; + } + + Sample sample; + RectRegionsPtr regions(new RectRegions()); + ClassTypeGeneric typeConverter(classNamesFile); + + for (auto it = detections.begin(), end=detections.end(); it !=end; ++it){ + + typeConverter.setId(it->classId); + regions->add(it->boundingBox,typeConverter.getClassString(), it->probability); + //std::cout<< it->boundingBox.x << " " << it->boundingBox.y << " " << it->boundingBox.height << " " << it->boundingBox.width << std::endl; + LOG(INFO)<< typeConverter.getClassString() << ": " << it->probability << std::endl; + } + + sample.setColorImage(image); + sample.setRectRegions(regions); + return sample; +} + +/* +This function converts the output from python scripts into a fromat compatible +DetectionMetrics to read bounding boxes, classes and detection scores, which are +drawn on the image to show detections. +*/ + +void KerasInferencer::output_result(PyObject* result, int sizes[]) +{ + + int* dims; + + if( PyArray_Check(result)) { + + + PyArrayObject* result_cont = PyArray_GETCONTIGUOUS( (PyArrayObject*) result ); + + float* result_data = (float*) result_cont->data; // not copying data + + dims = (int*) PyArray_SHAPE(result_cont); + + int i; + int k = 0; + + + for( i=0; idetections.size() << '\n'; + Py_DECREF(pValue); + } + else { + Py_DECREF(pClass); + Py_DECREF(pModule); + PyErr_Print(); + fprintf(stderr,"Call failed\n"); + + return 0; + } + + + return 1; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.h new file mode 100644 index 00000000..c7838ea7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/KerasInferencer.h @@ -0,0 +1,39 @@ +#include "pythonWrap.h" +//#include +#include "FrameworkInferencer.h" +#include +//#include +#include +#include + +class KerasInferencer: public FrameworkInferencer { +public: + KerasInferencer(const std::string& netConfig, const std::string& netWeights, const std::string& classNamesFile); + Sample detectImp(const cv::Mat& image, double confidence_threshold); + int getKerasInferences(const cv::Mat& image, double confidence_threshold); + void output_result(PyObject* result, int sizes[] ); + static + #if PY_MAJOR_VERSION >= 3 + int* + #else + void + #endif + init(); +private: + std::string netConfig; + std::string netWeights; + struct detection { + cv::Rect boundingBox; + float probability; + int classId; + }; + + PyObject *pName, *pModule, *pClass, *pInstance; + PyObject *pArgs, *pValue, *pmodel; + + std::vector detections; + +}; + + +typedef boost::shared_ptr KerasInferencerPtr; diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.cpp new file mode 100644 index 00000000..6ee4af11 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.cpp @@ -0,0 +1,274 @@ +#include +#include +#include +#include "Labelling.h" + +Labelling::Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, bool debug) + : Labelling::Labelling(reader, inferencer, resultsPath, NULL, debug) { + this->detections = new std::vector(); + this->Drawing = false; + this->Adjusting = false; + } // Delegating Constructor + +Labelling::Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, bool debug) + : Labelling::Labelling(reader, inferencer, NULL, debug) { + this->detections = new std::vector(); + this->Drawing = false; + this->Adjusting = false; + } // Delegating Constructor + +Labelling::Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + const std::string &resultsPath,double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), resultsPath(resultsPath),confidence_threshold(confidence_threshold),debug(debug) +{ + + if (resultsPath.empty()) + saveOutput = false; + else + saveOutput = true; + alreadyProcessed=0; + this->detections = new std::vector(); + this->Drawing = false; + this->Adjusting = false; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); + LOG(INFO) << reader->getClassNamesFile() << std::endl; + if (!resultsPath.empty()) { + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Continuing detecting"; + boost::filesystem::directory_iterator end_itr; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if ((is_regular_file(itr->status()) && itr->path().extension()==".png") && (itr->path().string().find("-depth") == std::string::npos)) { + alreadyProcessed++; + } + + } + } + } +} + +Labelling::Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + const std::string &resultsPath, bool* stopDeployer,double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), resultsPath(resultsPath),debug(debug),stopDeployer(stopDeployer),confidence_threshold(confidence_threshold) +{ + + if (resultsPath.empty()) + saveOutput = false; + else + saveOutput = true; + + this->detections = new std::vector(); + this->Drawing = false; + this->Adjusting = false; + LOG(INFO) << reader->getClassNamesFile() << std::endl; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); + alreadyProcessed=0; + if (!resultsPath.empty()) { + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Files might be overwritten, if present in the directory"; + boost::filesystem::directory_iterator end_itr; + } + } +} + +Labelling::Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), confidence_threshold(confidence_threshold), debug(debug) +{ + //Constructor to avoid writing results to outputPath + saveOutput = false; + alreadyProcessed=0; + LOG(INFO) << reader->getClassNamesFile() << std::endl; + this->detections = new std::vector(); + this->Drawing = false; + this->Adjusting = false; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); +} + +void DrawRectangle(cv::Mat& img, cv::Rect &box){ + cv::rectangle(img,box.tl(), box.br(),cv::Scalar(0,0,0)); + cv::imshow("Detection", img); +} + + +void Labelling::BorderChange(int event, int x, int y, int flags, void* userdata){ + int currFrame = ((Labelling *)(userdata))->playback.currentFrame(); + bool changed = false; + cv::Mat imager; + switch (event) { + case cv::EVENT_MBUTTONDOWN:{ + (((Labelling *)(userdata))->detections)->at(currFrame-1).SetClassy(x,y,((Labelling *)(userdata))->reader->getClassNames()); + changed = true; + imager = (((Labelling *)(userdata))->detections)->at(currFrame-1).getSampledColorImage(); + }break; + + case cv::EVENT_LBUTTONDOWN :{ + if( (((Labelling *)(userdata))->detections)->at(currFrame-1).AdjustBox(x,y)){ + changed = true; + imager = (((Labelling *)(userdata))->detections)->at(currFrame-1).getSampledColorImage(); + ((Labelling *)(userdata))->Adjusting = true; + } + else{ + ((Labelling *)(userdata))->Drawing = true; + ((Labelling *)(userdata))->g_rectangle = cv::Rect(x, y, 0, 0); + } + }break; + + case cv::EVENT_LBUTTONUP :{ + if( ((Labelling *)(userdata))->Adjusting ){ + (((Labelling *)(userdata))->detections)->at(currFrame-1).AdjustBox(x,y); + changed = true; + imager = (((Labelling *)(userdata))->detections)->at(currFrame-1).getSampledColorImage(); + ((Labelling *)(userdata))->Adjusting = false; + } + else if(((Labelling *)(userdata))->Drawing){ + ((Labelling *)(userdata))->Drawing = false; + if (((Labelling *)(userdata))->g_rectangle.width < 0) { + ((Labelling *)(userdata))->g_rectangle.x += ((Labelling *)(userdata))->g_rectangle.width; + ((Labelling *)(userdata))->g_rectangle.width *= -1; + } + + if (((Labelling *)(userdata))->g_rectangle.height < 0) { + ((Labelling *)(userdata))->g_rectangle.y += ((Labelling *)(userdata))->g_rectangle.height; + ((Labelling *)(userdata))->g_rectangle.height *= -1; + } + + (((Labelling *)(userdata))->detections)->at(currFrame-1).AddDetection(((Labelling *)(userdata))->g_rectangle,((Labelling *)(userdata))->reader->getClassNames()); + imager = (((Labelling *)(userdata))->detections)->at(currFrame-1).getSampledColorImage(); + changed=true; + } + }break; + + case cv::EVENT_MOUSEMOVE: { // When mouse moves, get the current rectangle's width and height + if (((Labelling *)(userdata))->Drawing) { + ((Labelling *)(userdata))->g_rectangle.width = x - ((Labelling *)(userdata))->g_rectangle.x; + ((Labelling *)(userdata))->g_rectangle.height = y - ((Labelling *)(userdata))->g_rectangle.y; + imager = (((Labelling *)(userdata))->detections)->at(currFrame-1).getSampledColorImage(); + DrawRectangle(imager, ((Labelling *)(userdata))->g_rectangle); + } + }break; + + } + + if(changed){ + ((Labelling *)(userdata))->playback.updateFrame(currFrame-1,&imager); + cv::imshow("Detection", imager); + if (((Labelling *)(userdata))->saveOutput) + (((Labelling *)(userdata))->detections)->at(currFrame-1).save(((Labelling *)(userdata))->resultsPath); + LOG(INFO) << "Updated\n"; + } +} + +void Labelling::IsProcessed(Sample *sample, int *counter , int *nsamples){ + while (alreadyProcessed>0){ + LOG(INFO) << "Already evaluated: " << sample->getSampleID() << "(" << *counter << "/" << *nsamples << ")" << std::endl; + this->reader->getNextSample(*sample); + *counter++; + alreadyProcessed--; + } +} + +void Labelling::Shower(Sample *sample, Sample *detection,cv::Mat *image2detect, bool &useDepthImages){ + if (this->debug) { + cv::Mat image =sample->getSampledColorImage(); + Sample detectionWithImage=*detection; + + if (useDepthImages) + detectionWithImage.setColorImage(sample->getDepthColorMapImage()); + else + detectionWithImage.setColorImage(sample->getColorImage()); + + if (useDepthImages){ + cv::imshow("GT on Depth", sample->getSampledDepthColorMapImage()); + cv::imshow("Input", *image2detect); + } + char keystroke=cv::waitKey(1); + if(reader->IsValidFrame() && reader->IsVideo()) + this->playback.GetInput(keystroke,detectionWithImage.getSampledColorImage(),image); + else{ + cv::imshow("GT on RGB", image); + cv::imshow("Detection", detectionWithImage.getSampledColorImage()); + cv::waitKey(100); + } + } +} + +void Labelling::finder(Sample *sample , Sample *detection, cv::Mat *image2detect ,bool &useDepthImages, int *counter , int *nsamples){ + *counter+=1; + if (this->stopDeployer != NULL && *(this->stopDeployer)) { + LOG(INFO) << "Deployer Process Stopped" << "\n"; + return; + } + + LOG(INFO) << "Evaluating : " << sample->getSampleID() << "(" << *counter << "/" << *nsamples << ")" << std::endl; + + if (useDepthImages) + *image2detect = sample->getDepthColorMapImage(); + else { + *image2detect = sample->getColorImage(); + } + + double thresh = this->confidence_threshold == NULL ? this->default_confidence_threshold + : *(this->confidence_threshold); + + try { + *detection=this->inferencer->detect(*image2detect, thresh); + } + catch(const std::runtime_error& error) { + LOG(ERROR) << "Error Occured: " << error.what() << '\n'; + exit(1); + } + + detection->setSampleID(sample->getSampleID()); + + if (saveOutput) + detection->save(this->resultsPath); +} + +void Labelling::process(bool useDepthImages, DatasetReaderPtr readerDetection) { + Sample sample; + int counter=0; + int nsamples = this->reader->getNumberOfElements(); + + Labelling::IsProcessed(&sample,&counter,&nsamples); + cv::Mat image2detect; + static Sample detection; + cv::setMouseCallback("Detection", Labelling::BorderChange ,this); + bool read_succesful = true; + while (read_succesful){ + if(!detection.GetMousy()){ + read_succesful=this->reader->getNextSample(sample); + Labelling::finder(&sample,&detection,&image2detect,useDepthImages,&counter,&nsamples); + Labelling::Shower(&sample,&detection,&image2detect,useDepthImages); + this->detections->push_back(detection); + } + detection.clearColorImage(); + detection.clearDepthImage(); + detection.SetMousy(false); + if (readerDetection != NULL) + readerDetection->addSample(detection); + } + + if(!this->reader->IsValidFrame()){ + this->playback.completeShow(); + cv::destroyAllWindows(); + LOG(INFO) << "Mean inference time: " << this->inferencer->getMeanDurationTime() << "(ms)" << std::endl; + } +} + +FrameworkInferencerPtr Labelling::getInferencer() const { + return this->inferencer; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.h new file mode 100644 index 00000000..e2c66671 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/Labelling.h @@ -0,0 +1,44 @@ + + +#ifndef SAMPLERGENERATOR_LABELLING_H +#define SAMPLERGENERATOR_LABELLING_H + +#include +#include +#include +#include "Utils/Playback.hpp" + +class Labelling { +public: + Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, double* confidence_threshold = NULL, bool debug=true); + Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, bool debug=true); + Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string &resultsPath, bool* stopDeployer, double* confidence_threshold = NULL, bool debug=true); + Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, double* confidence_threshold = NULL, bool debug=true); + Labelling(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, bool debug=true); + void process(bool writeImages, DatasetReaderPtr readerDetection = NULL); + FrameworkInferencerPtr getInferencer() const; + static void BorderChange(int event, int x, int y, int flags, void* userdata); + void IsProcessed(Sample *sample, int *counter , int *nsamples); + void Shower(Sample *sample, Sample *detection,cv::Mat *image2detect, bool &useDepthImages); + void finder(Sample *sample, Sample *detection,cv::Mat *image2detect, bool &useDepthImages, int *counter , int *nsamples); +private: + DatasetReaderPtr reader; + FrameworkInferencerPtr inferencer; + std::string resultsPath; + bool debug; + bool saveOutput; + int alreadyProcessed; + bool* stopDeployer = NULL; + double* confidence_threshold = NULL; + double default_confidence_threshold = 0.2; + Playback playback; + std::vector *detections; + bool mousy; + cv::Rect g_rectangle; + bool Drawing ; + bool Adjusting ; +}; + + + +#endif //SAMPLERGENERATOR_MASSINFERENCER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.cpp new file mode 100644 index 00000000..137e4945 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.cpp @@ -0,0 +1,127 @@ +#include +#include +#include +#include "MassBatchInferencer.h" + +MassBatchInferencer::MassBatchInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + const std::string &resultsPath,double confidence_threshold, + bool debug): reader(reader), inferencer(inferencer), + resultsPath(resultsPath),confidence_threshold(confidence_threshold), + debug(debug) +{ + if (resultsPath.empty()) { + saveOutput = false; + + } else { + saveOutput = true; + alreadyProcessed=0; + + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Continuing detecting"; + boost::filesystem::directory_iterator end_itr; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if ((is_regular_file(itr->status()) && itr->path().extension()==".png") + && (itr->path().string().find("-depth") == std::string::npos)) { + alreadyProcessed++; + } + + } + + } + + } +} + + +MassBatchInferencer::MassBatchInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + double confidence_threshold, bool debug): reader(reader), + inferencer(inferencer), confidence_threshold(confidence_threshold), + debug(debug) +{ + //Constructor to avoid writing results to outputPath + saveOutput = false; + alreadyProcessed=0; +} + +void MassBatchInferencer::process(const int batchSize, bool useDepthImages, DatasetReaderPtr readerDetection) { + + Sample sample; + int counter=0; + int nsamples = this->reader->getNumberOfElements(); + while (alreadyProcessed>0){ + LOG(INFO) << "Already evaluated: " << sample.getSampleID() << "(" << counter << "/" << nsamples << ")" << std::endl; + this->reader->getNextSample(sample); + counter++; + alreadyProcessed--; + } + + + while (this->reader->getNextSample(sample)){ + counter++; + + LOG(INFO) << "Evaluating : " << sample.gedtSampleID() << "(" << counter << "/" << nsamples << ")" << std::endl; + + cv::Mat image2detect; + if (useDepthImages) + image2detect = sample.getDepthColorMapImage(); + else { + image2detect = sample.getColorImage(); + } + + Sample detection; + + double thresh = this->confidence_threshold == NULL ? this->default_confidence_threshold + : *(this->confidence_threshold); + + try { + + detection=this->inferencer->detect(image2detect, thresh); + + } catch(const std::runtime_error& error) { + LOG(ERROR) << "Error Occured: " << error.what() << '\n'; + exit(1); + } + + detection.setSampleID(sample.getSampleID()); + + if (saveOutput) + detection.save(this->resultsPath); + + if (this->debug) { + cv::Mat image =sample.getSampledColorImage(); + Sample detectionWithImage; + detectionWithImage=detection; + if (useDepthImages) + detectionWithImage.setColorImage(sample.getDepthColorMapImage()); + else + detectionWithImage.setColorImage(sample.getColorImage()); + cv::imshow("GT on RGB", image); + if (useDepthImages){ + cv::imshow("GT on Depth", sample.getSampledDepthColorMapImage()); + cv::imshow("Input", image2detect); + } + cv::imshow("Detection", detectionWithImage.getSampledColorImage()); + cv::waitKey(100); + } + + detection.clearColorImage(); + detection.clearDepthImage(); + + if (readerDetection != NULL) { + readerDetection->addSample(detection); + //samples->push_back(detection); + } + + } + cv::destroyAllWindows(); + LOG(INFO) << "Mean inference time: " << this->inferencer->getMeanDurationTime() << "(ms)" << std::endl; + + +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.h new file mode 100644 index 00000000..ef69f9a7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassBatchInferencer.h @@ -0,0 +1,25 @@ +#ifndef SAMPLERGENERATOR_MASSBATCHINFERENCER_H +#define SAMPLERGENERATOR_MASSBATCHINFERENCER_H + +#include +#include + +class MassBatchInferencer { +public: + MassBatchInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, double confidence_threshold = 0.2, bool debug=true); + MassBatchInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, double confidence_threshold = 0.2, bool debug=true); + void process(const int batchSize, bool writeImages, DatasetReaderPtr readerDetection = NULL); + +private: + DatasetReaderPtr reader; + FrameworkInferencerPtr inferencer; + std::string resultsPath; + bool debug; + bool saveOutput; + int alreadyProcessed; + double confidence_threshold; + +}; + + +#endif //SAMPLERGENERATOR_MASSINFERENCER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.cpp new file mode 100644 index 00000000..0fca781d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.cpp @@ -0,0 +1,230 @@ +#include +#include +#include +#include "MassInferencer.h" + +MassInferencer::MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, bool debug) + : MassInferencer::MassInferencer(reader, inferencer, resultsPath, NULL, debug) {} // Delegating Constructor + +MassInferencer::MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, bool debug) + : MassInferencer::MassInferencer(reader, inferencer, NULL, debug) {} // Delegating Constructor + +MassInferencer::MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + const std::string &resultsPath,double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), resultsPath(resultsPath),confidence_threshold(confidence_threshold),debug(debug) +{ + if (resultsPath.empty()) + saveOutput = false; + else + saveOutput = true; + alreadyProcessed=0; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); + if (!resultsPath.empty()) { + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Continuing detecting"; + boost::filesystem::directory_iterator end_itr; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if ((is_regular_file(itr->status()) && itr->path().extension()==".png") && (itr->path().string().find("-depth") == std::string::npos)) { + alreadyProcessed++; + } + + } + //exit(-1); + } + } +} + +MassInferencer::MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, + const std::string &resultsPath, bool* stopDeployer,double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), resultsPath(resultsPath),debug(debug),stopDeployer(stopDeployer),confidence_threshold(confidence_threshold) +{ + + if (resultsPath.empty()) + saveOutput = false; + else + saveOutput = true; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); + alreadyProcessed=0; + if (!resultsPath.empty()) { + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Files might be overwritten, if present in the directory"; + boost::filesystem::directory_iterator end_itr; + + + } + + } + +} + +MassInferencer::MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, double* confidence_threshold, bool debug): reader(reader), inferencer(inferencer), confidence_threshold(confidence_threshold), debug(debug) +{ + //Constructor to avoid writing results to outputPath + saveOutput = false; + alreadyProcessed=0; + int time=0; + time = reader->IsVideo() ? reader->TotalFrames() : 1 ; + this->playback.AddTrackbar(time); +} + +MassInferencer::MassInferencer(FrameworkInferencerPtr inferencer, const std::string &resultsPath, double* confidence_threshold = NULL, bool debug=true):inferencer(inferencer), confidence_threshold(confidence_threshold), debug(debug), resultsPath(resultsPath){ + saveOutput = resultsPath.empty() ? false : true ; + alreadyProcessed=0; + + if (!resultsPath.empty()) { + auto boostPath= boost::filesystem::path(this->resultsPath); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + LOG(WARNING)<<"Output directory already exists"; + LOG(WARNING)<<"Files might be overwritten, if present in the directory"; + boost::filesystem::directory_iterator end_itr; + } + } + +} + +void MassInferencer::process(bool useDepthImages, DatasetReaderPtr readerDetection) { + + Sample sample; + int counter=0; + int nsamples = this->reader->getNumberOfElements(); + while (alreadyProcessed>0){ + LOG(INFO) << "Already evaluated: " << sample.getSampleID() << "(" << counter << "/" << nsamples << ")" << std::endl; + this->reader->getNextSample(sample); + counter++; + alreadyProcessed--; + } + + + while (this->reader->getNextSample(sample)){ + counter++; + if (this->stopDeployer != NULL && *(this->stopDeployer)) { + LOG(INFO) << "Deployer Process Stopped" << "\n"; + return; + } + + LOG(INFO) << "Evaluating : " << sample.getSampleID() << "(" << counter << "/" << nsamples << ")" << std::endl; + + cv::Mat image2detect; + if (useDepthImages) + image2detect = sample.getDepthColorMapImage(); + else { + image2detect = sample.getColorImage(); + } + + Sample detection; + + double thresh = this->confidence_threshold == NULL ? this->default_confidence_threshold + : *(this->confidence_threshold); + + try { + + detection=this->inferencer->detect(image2detect, thresh); + + } catch(const std::runtime_error& error) { + LOG(ERROR) << "Error Occured: " << error.what() << '\n'; + exit(1); + } + + detection.setSampleID(sample.getSampleID()); + detection.setColorImagePath(sample.getColorImagePath()); + + if (saveOutput) + detection.save(this->resultsPath); + + if (this->debug) { + cv::Mat image =sample.getSampledColorImage(); + Sample detectionWithImage; + detectionWithImage=detection; + if (useDepthImages) + detectionWithImage.setColorImage(sample.getDepthColorMapImage()); + else + detectionWithImage.setColorImage(sample.getColorImage()); + // cv::imshow("GT on RGB", image); + if (useDepthImages){ + cv::imshow("GT on Depth", sample.getSampledDepthColorMapImage()); + cv::imshow("Input", image2detect); + } + // cv::imshow("Detection", detectionWithImage.getSampledColorImage()); + // cv::waitKey(100); + char keystroke=cv::waitKey(1); + if(reader->IsValidFrame() && reader->IsVideo()) + this->playback.GetInput(keystroke,detectionWithImage.getSampledColorImage(),image); + else{ + cv::imshow("GT on RGB", image); + cv::imshow("Detection", detectionWithImage.getSampledColorImage()); + cv::waitKey(100); + + } + } + + detection.clearColorImage(); + detection.clearDepthImage(); + + if (readerDetection != NULL) { + readerDetection->addSample(detection); + //samples->push_back(detection); + } + + } + if(!reader->IsValidFrame()){ + this->playback.completeShow(); + cv::destroyAllWindows(); + LOG(INFO) << "Mean inference time: " << this->inferencer->getMeanDurationTime() << "(ms)" << std::endl; + } + + +} + +void MassInferencer::process(bool useDepthImages, cv::Mat image2detect){ + + Sample detection; + double thresh = this->confidence_threshold == NULL ? this->default_confidence_threshold + : *(this->confidence_threshold); + + try { + detection=this->inferencer->detect(image2detect, thresh); + } + catch(const std::runtime_error& error) { + LOG(ERROR) << "Error Occured: " << error.what() << '\n'; + exit(1); + } + + if (saveOutput) + detection.save(this->resultsPath); + + // cv::imshow("GT on RGB", image2detect); + cv::imshow("Detection", detection.getSampledColorImage()); + // cv::waitKey(100); + + this->CurrFrame = detection; + detection.clearColorImage(); + detection.clearDepthImage(); +} + +FrameworkInferencerPtr MassInferencer::getInferencer() const { + return this->inferencer; +} +RectRegionsPtr MassInferencer::detections(){ + return this->CurrFrame.getRectRegions(); +} + +Sample MassInferencer::getSample(){ + return this->CurrFrame; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.h new file mode 100644 index 00000000..7e3e1e8f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/MassInferencer.h @@ -0,0 +1,43 @@ +// +// Created by frivas on 1/02/17. +// + +#ifndef SAMPLERGENERATOR_MASSINFERENCER_H +#define SAMPLERGENERATOR_MASSINFERENCER_H + +#include +#include +#include "Utils/Playback.hpp" + +class MassInferencer { +public: + MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, double* confidence_threshold = NULL, bool debug=true); + MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string& resultsPath, bool debug=true); + MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, const std::string &resultsPath, bool* stopDeployer, double* confidence_threshold = NULL, bool debug=true); + MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, double* confidence_threshold = NULL, bool debug=true); + MassInferencer(DatasetReaderPtr reader, FrameworkInferencerPtr inferencer, bool debug=true); + MassInferencer(FrameworkInferencerPtr inferencer, const std::string &resultsPath, double* confidence_threshold , bool debug); + + void process(bool writeImages, DatasetReaderPtr readerDetection = NULL); + void process(bool writeImages, cv::Mat image2detect); + + FrameworkInferencerPtr getInferencer() const; + RectRegionsPtr detections(); + Sample getSample(); +private: + DatasetReaderPtr reader; + FrameworkInferencerPtr inferencer; + std::string resultsPath; + bool debug; + bool saveOutput; + int alreadyProcessed; + bool* stopDeployer = NULL; + double* confidence_threshold = NULL; + double default_confidence_threshold = 0.2; + Playback playback; + Sample CurrFrame; +}; + + + +#endif //SAMPLERGENERATOR_MASSINFERENCER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.cpp new file mode 100644 index 00000000..50d7e8a2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.cpp @@ -0,0 +1,223 @@ +#include +#include +#include "PyTorchInferencer.h" +#include + +void PyTorchInferencer::CallBackFunc(int event, int x, int y, int flags, void* userdata){ + ((PyTorchInferencer *)(userdata))->mousy = true; + for(auto itr = ((PyTorchInferencer *)(userdata))->detections.begin(); itr != ((PyTorchInferencer *)(userdata))->detections.end() ; itr++){ + itr->boundingBox.x = x; + itr->boundingBox.y = y; + } +} + + + +PyTorchInferencer::PyTorchInferencer( const std::string &netConfig, const std::string &netWeights,const std::string& classNamesFile): netConfig(netConfig),netWeights(netWeights) { + LOG(INFO) << "PyTorch Constructor" << '\n'; + this->classNamesFile=classNamesFile; + this->mousy = false; + /* + * Code below adds path of python models to sys.path so as to enable python + * interpreter to import custom python modules from the path mentioned. This will + * prevent adding python path manually. + */ + + std::string file_path = __FILE__; + std::string dir_path = file_path.substr(0, file_path.rfind("/")); + dir_path = dir_path + "/../python_modules"; + std::string string_to_run = "import sys\nsys.path.append('" + dir_path + "')\n"; + + /* Initialize the python interpreter.Neccesary step to later call + * the python interpreter from any part of the application. + */ + + Py_Initialize(); + PyRun_SimpleString(string_to_run.c_str()); + init(); + LOG(INFO) << "Interpreter Initialized" << '\n'; + pName = PyUnicode_FromString("pytorch_detect"); + pModule = PyImport_Import(pName); + Py_DECREF(pName); + + LOG(INFO) << "Loading Detection Graph" << '\n'; + if (pModule != NULL) { + pClass = PyObject_GetAttrString(pModule, "PyTorchDetector"); + pArgs = PyTuple_New(2); + pmodel = PyUnicode_FromString(netWeights.c_str()); + pconfig = PyUnicode_FromString(netConfig.c_str()); + + PyTuple_SetItem(pArgs, 0, pmodel); + PyTuple_SetItem(pArgs, 1, pconfig); + pInstance = PyObject_CallObject(pClass, pArgs); + if (pInstance == NULL) { + Py_DECREF(pArgs); + PyErr_Print(); + } + } else { + if (PyErr_Occurred()) + PyErr_Print(); + fprintf(stderr, "Cannot find function \"pytorch_detect\"\n"); + } + LOG(INFO) << "Detection Graph Loaded" << '\n'; +} + +#if PY_MAJOR_VERSION >= 3 +int* +#else +void +#endif +PyTorchInferencer::init() { + import_array(); +} + +Sample PyTorchInferencer::detectImp(const cv::Mat &image, double confidence_threshold) { + LOG(ERROR) << "DETECT IMP" << "\n"; + if(PyErr_CheckSignals() == -1) { + throw std::runtime_error("Keyboard Interrupt"); + } + cv::Mat rgbImage; + cv::cvtColor(image,rgbImage,cv::COLOR_BGR2RGB); + if(!this->mousy){ + LOG(ERROR) << "DETECT IMP 2" << "\n"; + this->detections.clear(); + int result = gettfInferences(rgbImage, confidence_threshold); + if (result == 0) { + LOG(ERROR) << "Error Occured during getting inferences" << '\n'; + } + } + Sample sample; + RectRegionsPtr regions(new RectRegions()); + RleRegionsPtr rleRegions(new RleRegions()); + ClassTypeGeneric typeConverter(classNamesFile); + for (auto it = detections.begin(), end=detections.end(); it !=end; ++it){ + typeConverter.setId(it->classId); + regions->add(it->boundingBox,typeConverter.getClassString(),it->probability); + if (this->hasMasks) + rleRegions->add(it->rleRegion, typeConverter.getClassString(), it->probability); + + LOG(INFO)<< typeConverter.getClassString() << ": " << it->probability << std::endl; + } + + + sample.setColorImage(image); + sample.setRectRegions(regions); + sample.setRleRegions(rleRegions); + sample.SetMousy(this->mousy); + this->mousy=false; + return sample; +} + +void PyTorchInferencer::output_result(int num_detections, int width, int height, PyObject* bounding_boxes, PyObject* detection_scores, PyObject* classIds, PyObject* pDetection_masks ) { + this->hasMasks = false; + int mask_dims; + long long int* mask_shape; + if( PyArray_Check(bounding_boxes) && PyArray_Check(detection_scores) && PyArray_Check(classIds) ) { + PyArrayObject* detection_masks_cont = NULL; + + if (pDetection_masks != NULL && PyArray_Check(pDetection_masks)) { + detection_masks_cont = PyArray_GETCONTIGUOUS( (PyArrayObject*) pDetection_masks ); + this->hasMasks = true; + mask_dims = PyArray_NDIM(detection_masks_cont); + if (mask_dims != 3) { + throw std::invalid_argument("Returned Mask by pytorch doesn't have 2 dimensions"); + } + mask_shape = (long long int*) PyArray_SHAPE(detection_masks_cont); + } + PyArrayObject* bounding_boxes_cont = PyArray_GETCONTIGUOUS( (PyArrayObject*) bounding_boxes ); + PyArrayObject* detection_scores_cont = PyArray_GETCONTIGUOUS( (PyArrayObject*) detection_scores ); + PyArrayObject* classIds_cont = PyArray_GETCONTIGUOUS( (PyArrayObject*) classIds ); + float* bounding_box_data = (float*) bounding_boxes_cont->data; + float* detection_scores_data = (float*) detection_scores_cont->data; + unsigned char* classIds_data = (unsigned char*) classIds_cont->data; + float* detection_masks_data; + if (this->hasMasks) { + detection_masks_data = (float*) detection_masks_cont->data; + } + int i; + int boxes = 0, scores = 0, classes = 0, masks = 0; + + for( i=0; ihasMasks) { + cv::Mat image_mask(height, width, CV_8UC1, cv::Scalar(0)); + cv::Mat mask = cv::Mat(mask_shape[1], mask_shape[2], CV_32F, detection_masks_data + i*mask_shape[1]*mask_shape[2]); + cv::Mat mask_r; + cv::resize(mask, mask_r, cv::Size(detections[i].boundingBox.width, detections[i].boundingBox.height)); + cv::Mat mask_char; + mask_r.convertTo(mask_char, CV_8U, 255); + cv::threshold(mask_char, mask_char, 127, 255, cv::THRESH_BINARY); + mask_char.copyTo(image_mask(cv::Rect(detections[i].boundingBox.x,detections[i].boundingBox.y,detections[i].boundingBox.width, detections[i].boundingBox.height))); + RLE forMask; + cv::Mat t_mask = image_mask.t(); + rleEncode( &forMask, t_mask.data, t_mask.cols, t_mask.rows, 1 ); + detections[i].rleRegion = forMask; + } + } + Py_XDECREF(bounding_boxes); + Py_XDECREF(detection_scores); + Py_XDECREF(classIds); + } +} + + +int PyTorchInferencer::gettfInferences(const cv::Mat& image, double confidence_threshold) { + int i, num_detections, dims, sizes[3]; + if (image.channels() == 3) { + dims = 3; + sizes[0] = image.rows; + sizes[1] = image.cols; + sizes[2] = image.channels(); + } else if (image.channels() == 1) { + dims = 2; + sizes[0] = image.rows; + sizes[1] = image.cols; + } else { + LOG(ERROR) << "Invalid Image Passed" << '\n'; + return 0; + } + + npy_intp _sizes[4]; + for( i = 0; i < dims; i++ ) { + _sizes[i] = sizes[i]; + } + PyObject* mynparr = PyArray_SimpleNewFromData(dims, _sizes, NPY_UBYTE, image.data); + PyObject* conf = PyFloat_FromDouble(confidence_threshold); + if (!mynparr || !conf) { + Py_DECREF(pArgs); + Py_DECREF(pModule); + fprintf(stderr, "Cannot convert argument\n"); + return 0; + } + pValue = PyObject_CallMethodObjArgs(pInstance, PyUnicode_FromString("detect"), mynparr, conf, NULL); + Py_DECREF(pArgs); + if (pValue != NULL) { + num_detections = _PyLong_AsInt( PyDict_GetItem(pValue, PyUnicode_FromString("num_detections") ) ); + printf("Num Detections: %d\n", num_detections ); + PyObject* pBounding_boxes = PyDict_GetItem(pValue, PyUnicode_FromString("detection_boxes") ); + PyObject* pDetection_scores = PyDict_GetItem(pValue, PyUnicode_FromString("detection_scores") ); + PyObject* classIds = PyDict_GetItem(pValue, PyUnicode_FromString("detection_classes") ); + PyObject* key = PyUnicode_FromString("detection_masks"); + if (PyDict_Contains(pValue, key)) { + PyObject* pDetection_masks = PyDict_GetItem(pValue, PyUnicode_FromString("detection_masks") ); + output_result(num_detections, image.cols, image.rows, pBounding_boxes, pDetection_scores, classIds, pDetection_masks); + } else { + output_result(num_detections, image.cols, image.rows, pBounding_boxes, pDetection_scores, classIds); + } + Py_DECREF(pValue); + } else { + Py_DECREF(pClass); + Py_DECREF(pModule); + PyErr_Print(); + fprintf(stderr,"Call failed\n"); + return 0; + } + return 1; +} + diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.h new file mode 100644 index 00000000..91e49a12 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/PyTorchInferencer.h @@ -0,0 +1,42 @@ +#include "pythonWrap.h" +#include "FrameworkInferencer.h" +#include +//#include +#include +#include + +class PyTorchInferencer: public FrameworkInferencer { +public: + PyTorchInferencer(const std::string& netConfig, const std::string& netWeights, const std::string& classNamesFile); + Sample detectImp(const cv::Mat& image, double confidence_threshold); + int gettfInferences(const cv::Mat& image, double confidence_threshold); + void output_result(int num_detections, int width, int height, PyObject* bounding_boxes, PyObject* detection_scores, PyObject* classIds, PyObject* detections_masks=NULL ); + static + #if PY_MAJOR_VERSION >= 3 + int* + #else + void + #endif + init(); + static void CallBackFunc(int event, int x, int y, int flags, void* userdata); +private: + std::string netConfig; + std::string netWeights; + struct detection { + cv::Rect boundingBox; + RLE rleRegion; + float probability; + int classId; + }; + bool mousy; + PyObject *pName, *pModule, *pClass, *pInstance; + PyObject *pArgs, *pValue, *pmodel, *pconfig; + + std::vector detections; + bool hasMasks; + +}; + + +typedef boost::shared_ptr PyTorchInferencerPtr; + diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.cpp new file mode 100644 index 00000000..ccd5d26c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.cpp @@ -0,0 +1,71 @@ +#include "StatsWriter.h" +#include +StatsWriter::StatsWriter(DatasetReaderPtr dataset, std::string& writerFile) { + + this->writerFile = writerFile; + this->writer = std::ofstream(writerFile); + + std::ifstream classNamesReader(dataset->getClassNamesFile()); + + std::string className; + + int counter = 0; + while(getline(classNamesReader, className)) { + + if (className.empty()) + continue; + if (counter == 0) + this->writer << ", " << className; + else + this->writer << ",," << className; + counter++; + this->classNamesinOrder.push_back(className); + } + + this->writer << "\n"; + + for (int i = 0; i< counter; i++) { + this->writer << ", mAP(IOU=0.5:0.95), mAR(IOU=0.5:0.95)"; + } + + this->writer << ", mAP(Overall)(IOU=0.5:0.95), mAR(Overall)(IOU=0.5:0.95)"; + this->writer << " , Mean inference time(ms) , Time Taken in Evaluation (second), Time Taken in Accumulation (second)"; + + this->writer << "\n"; + +} + +void StatsWriter::writeInferencerResults(std::string inferencerName, DetectionsEvaluatorPtr evaluator, unsigned int mean_inference_time) { + + this->writer << inferencerName; + std::map meanAP = evaluator->getClassWiseAP(); + std::map meanAR = evaluator->getClassWiseAR(); + + std::map::const_iterator iter; + + for (std::vector::iterator it = this->classNamesinOrder.begin(); it != this->classNamesinOrder.end(); it++) { + if ((*it).empty()) + continue; + if (meanAP.count(*it)) { + double AP = meanAP.at(*it); + double AR = meanAR.at(*it); + this->writer << ", " << AP << ", " << AR; + } else { + LOG(INFO) << "Class " << *it << " not present!!" << " Skipping"; + this->writer << ",,"; + } + + } + this->writer << ", " << evaluator->getOverallmAP() << ", " << evaluator->getOverallmAR(); + this->writer << ", " << mean_inference_time << ", "<< evaluator->getEvaluationTime() << ", " << evaluator->getAccumulationTime(); + this->writer << "\n"; + LOG(INFO) << "Inference Results Written Successfully"; + + this->writer.flush(); // Update File contents + +} + +void StatsWriter::saveFile() { + this->writer.close(); + LOG(INFO) << "File " << this->writerFile << " Saved Successfully" << '\n'; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.h new file mode 100644 index 00000000..808ab3a4 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/StatsWriter.h @@ -0,0 +1,21 @@ +#ifndef SAMPLERGENERATOR_STATSWRITER_H +#define SAMPLERGENERATOR_STATSWRITER_H + +#include +#include +#include + +class StatsWriter { +public: + StatsWriter(DatasetReaderPtr dataset, std::string& writerFile); + void writeInferencerResults(std::string inferencerName, DetectionsEvaluatorPtr evaluator, unsigned int mean_inference_time = 0); + void saveFile(); + +private: + std::ofstream writer; + std::string writerFile; + std::vector classNamesinOrder; +}; + + +#endif //SAMPLERGENERATOR_STATSWRITER_H diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.cpp b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.cpp new file mode 100644 index 00000000..07742b94 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.cpp @@ -0,0 +1,268 @@ +#include +#include +#include "TensorFlowInferencer.h" +#include + +// Construcor function for the tensorflow Inferencer +void TensorFlowInferencer::CallBackFunc(int event, int x, int y, int flags, void* userdata) { + ((TensorFlowInferencer *)(userdata))->mousy = true; + for(auto itr = ((TensorFlowInferencer *)(userdata))->detections.begin(); itr !=((TensorFlowInferencer *)(userdata))->detections.end(); itr++) { + itr->boundingBox.x = x; + itr->boundingBox.y = y; + } +} + +TensorFlowInferencer::TensorFlowInferencer(const std::string &netConfig, const std::string &netWeights,const std::string& classNamesFile): netConfig(netConfig),netWeights(netWeights) { + LOG(INFO) << "in tensorflow constructor" << '\n'; + this->classNamesFile=classNamesFile; + this->mousy = false; + /* Code below adds path of python models to sys.path so as to enable python + interpreter to import custom python modules from the path mentioned. This will + prevent adding python path manually. + */ + + std::string file_path = __FILE__; + std::string dir_path = file_path.substr(0, file_path.rfind("/")); + dir_path = dir_path + "/../python_modules"; + + std::string string_to_run = "import sys\nsys.path.append('" + dir_path + "')\n"; + + /* Initialize the python interpreter.Neccesary step to later call + * the python interpreter from any part of the application. + * */ + Py_Initialize(); + + /* Any python code is run in this format. + str = "print('Hello World')" + PyRun_SimpleString(str) + */ + PyRun_SimpleString(string_to_run.c_str()); + init(); + LOG(INFO) << "InterPreter Initailized" << '\n'; + + pName = PyUnicode_FromString("tensorflow_detect"); + pModule = PyImport_Import(pName); + Py_DECREF(pName); + + LOG(INFO) << "Loading Detection Graph" << '\n'; + + if (pModule != NULL) { + pClass = PyObject_GetAttrString(pModule, "TensorFlowDetector"); + pArgs = PyTuple_New(1); + pmodel = PyUnicode_FromString(netWeights.c_str()); + /* pValue reference stolen here: */ + PyTuple_SetItem(pArgs, 0, pmodel); + /* pFunc is a new reference */ + pInstance = PyObject_CallObject(pClass, pArgs); + + if (pInstance == NULL) { + Py_DECREF(pArgs); + PyErr_Print(); + } + } else { + if (PyErr_Occurred()) { + PyErr_Print(); + } + fprintf(stderr, "Cannot find function \"tensorflow_detect\"\n"); + } + LOG(INFO) << "Detection Graph Loaded" << '\n'; +} + +#if PY_MAJOR_VERSION >= 3 +int* +#else +void +#endif +TensorFlowInferencer::init() { + import_array(); +} + +Sample TensorFlowInferencer::detectImp(const cv::Mat &image, double confidence_threshold) { + if(PyErr_CheckSignals() == -1) { + throw std::runtime_error("Keyboard Interrupt"); + } + + /* + Initialize a matrix to store the image in RGB format. + Currently the format is BGR,below function cvtColor takes the image and stores the new RGB format in rgbImage. + */ + cv::Mat rgbImage; + cv::cvtColor(image,rgbImage,cv::COLOR_BGR2RGB); + if(!this->mousy) { + this->detections.clear(); //remove previous detections + + /* + Get the tensorflow inferences of an image/frame provided the image + and the confidence threshold, and store them in result. + */ + int result = gettfInferences(rgbImage, confidence_threshold); + if (result == 0) { + LOG(ERROR) << "Error Occured during getting inferences" << '\n'; + } + } + + Sample sample; + RectRegionsPtr regions(new RectRegions()); + RleRegionsPtr rleRegions(new RleRegions()); + ClassTypeGeneric typeConverter(classNamesFile); + + // Loop through all the new detections + for (auto it = detections.begin(), end=detections.end(); it !=end; ++it) { + // Set the classID of the detected object. + typeConverter.setId(it->classId); + // Store the bounding boxes,class type,and its probability in regions + regions->add(it->boundingBox,typeConverter.getClassString(),it->probability); + // If masks are also available, store them aswell. + if (this->hasMasks) + rleRegions->add(it->rleRegion, typeConverter.getClassString(), it->probability); + //std::cout<< it->boundingBox.x << " " << it->boundingBox.y << " " << it->boundingBox.height << " " << it->boundingBox.width << std::endl; + LOG(INFO)<< typeConverter.getClassString() << ": " << it->probability << std::endl; + } + + /* + Store the image on which the detection is done, + the set of detections(both the bounding boxes and masks) + in the variable Sample. + */ + sample.setColorImage(image); + sample.setRectRegions(regions); + sample.setRleRegions(rleRegions); + sample.SetMousy(this->mousy); + this->mousy=false; + return sample; +} + +/* +This function converts the output from python scripts into a fromat compatible +DetectionMetrics to read bounding boxes, classes and detection scores, which are +drawn on the image to show detections. +*/ + +void TensorFlowInferencer::output_result(int num_detections, int width, int height, PyObject* bounding_boxes, PyObject* detection_scores, PyObject* classIds, PyObject* pDetection_masks) { + this->hasMasks = false; + int mask_dims; + long long int* mask_shape; + if(PyArray_Check(bounding_boxes) && PyArray_Check(detection_scores) && PyArray_Check(classIds)) { + PyArrayObject* detection_masks_cont = NULL; + if (pDetection_masks != NULL && PyArray_Check(pDetection_masks)) { + detection_masks_cont = PyArray_GETCONTIGUOUS((PyArrayObject*) pDetection_masks); + this->hasMasks = true; + mask_dims = PyArray_NDIM(detection_masks_cont); + if (mask_dims != 3) { + throw std::invalid_argument("Returned Mask by tensorflow doesn't have 2 dimensions"); + } + mask_shape = (long long int*) PyArray_SHAPE(detection_masks_cont); + } + PyArrayObject* bounding_boxes_cont = PyArray_GETCONTIGUOUS((PyArrayObject*) bounding_boxes); + PyArrayObject* detection_scores_cont = PyArray_GETCONTIGUOUS((PyArrayObject*) detection_scores); + PyArrayObject* classIds_cont = PyArray_GETCONTIGUOUS((PyArrayObject*) classIds); + float* bounding_box_data = (float*) bounding_boxes_cont->data; // not copying data + float* detection_scores_data = (float*) detection_scores_cont->data; + unsigned char* classIds_data = (unsigned char*) classIds_cont->data; + float* detection_masks_data; + if (this->hasMasks) { + detection_masks_data = (float*) detection_masks_cont->data; + } + int i; + int boxes = 0, scores = 0, classes = 0, masks = 0; + + for(i=0; ihasMasks) { + cv::Mat image_mask(height, width, CV_8UC1, cv::Scalar(0)); + cv::Mat mask = cv::Mat(mask_shape[1], mask_shape[2], CV_32F, detection_masks_data + i*mask_shape[1]*mask_shape[2]); + cv::Mat mask_r; + cv::resize(mask, mask_r, cv::Size(detections[i].boundingBox.width, detections[i].boundingBox.height)); + cv::Mat mask_char; + mask_r.convertTo(mask_char, CV_8U, 255); + cv::threshold(mask_char, mask_char, 127, 255, cv::THRESH_BINARY); + mask_char.copyTo(image_mask(cv::Rect(detections[i].boundingBox.x,detections[i].boundingBox.y,detections[i].boundingBox.width, detections[i].boundingBox.height))); + RLE forMask; + cv::Mat t_mask = image_mask.t(); + rleEncode( &forMask, t_mask.data, t_mask.cols, t_mask.rows, 1 ); + detections[i].rleRegion = forMask; + } + } + // clean + Py_XDECREF(bounding_boxes); + Py_XDECREF(detection_scores); + Py_XDECREF(classIds); + } +} + + +/* This function gets inferences from the Python script by calling coressponding +function and the uses output_result() to convert it into a DetectionMetrics C++ +readble format. +*/ + +int TensorFlowInferencer::gettfInferences(const cv::Mat& image, double confidence_threshold) { + int i, num_detections, dims, sizes[3]; + // Check if it is RGB image and store the dimensions of the image in "sizes" + if (image.channels() == 3) { + dims = 3; + sizes[0] = image.rows; + sizes[1] = image.cols; + sizes[2] = image.channels(); + + // Check if it is a monochromatic image and repeat the above. + } else if (image.channels() == 1) { + dims = 2; + sizes[0] = image.rows; + sizes[1] = image.cols; + } else { + LOG(ERROR) << "Invalid Image Passed" << '\n'; + return 0; + } + + npy_intp _sizes[4]; + + for(i = 0; i < dims; i++) { + _sizes[i] = sizes[i]; + } + + PyObject* mynparr = PyArray_SimpleNewFromData(dims, _sizes, NPY_UBYTE, image.data); + PyObject* conf = PyFloat_FromDouble(confidence_threshold); + + if (!mynparr || !conf) { + Py_DECREF(pArgs); + Py_DECREF(pModule); + fprintf(stderr, "Cannot convert argument\n"); + return 0; + } + + //pValue = PyObject_CallObject(pFunc, pArgs); + pValue = PyObject_CallMethodObjArgs(pInstance, PyUnicode_FromString("detect"), mynparr, conf, NULL); + + Py_DECREF(pArgs); + if (pValue != NULL) { + num_detections = _PyLong_AsInt( PyDict_GetItem(pValue, PyUnicode_FromString("num_detections") ) ); + printf("Num Detections: %d\n", num_detections ); + PyObject* pBounding_boxes = PyDict_GetItem(pValue, PyUnicode_FromString("detection_boxes") ); + PyObject* pDetection_scores = PyDict_GetItem(pValue, PyUnicode_FromString("detection_scores") ); + PyObject* classIds = PyDict_GetItem(pValue, PyUnicode_FromString("detection_classes") ); + PyObject* key = PyUnicode_FromString("detection_masks"); + if (PyDict_Contains(pValue, key)) { + PyObject* pDetection_masks = PyDict_GetItem(pValue, PyUnicode_FromString("detection_masks") ); + output_result(num_detections, image.cols, image.rows, pBounding_boxes, pDetection_scores, classIds, pDetection_masks); + } else { + output_result(num_detections, image.cols, image.rows, pBounding_boxes, pDetection_scores, classIds); + } + Py_DECREF(pValue); + } else { + Py_DECREF(pClass); + Py_DECREF(pModule); + PyErr_Print(); + fprintf(stderr,"Call failed\n"); + + return 0; + } + return 1; +} diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.h new file mode 100644 index 00000000..d17e44c3 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/TensorFlowInferencer.h @@ -0,0 +1,42 @@ +//#include +#include "pythonWrap.h" +#include "FrameworkInferencer.h" +#include +//#include +#include +#include + +class TensorFlowInferencer: public FrameworkInferencer { +public: + TensorFlowInferencer(const std::string& netConfig, const std::string& netWeights, const std::string& classNamesFile); + Sample detectImp(const cv::Mat& image, double confidence_threshold); + int gettfInferences(const cv::Mat& image, double confidence_threshold); + void output_result(int num_detections, int width, int height, PyObject* bounding_boxes, PyObject* detection_scores, PyObject* classIds, PyObject* detections_masks=NULL ); + static + #if PY_MAJOR_VERSION >= 3 + int* + #else + void + #endif + init(); + static void CallBackFunc(int event, int x, int y, int flags, void* userdata); +private: + std::string netConfig; + std::string netWeights; + struct detection { + cv::Rect boundingBox; + RLE rleRegion; + float probability; + int classId; + }; + bool mousy; + PyObject *pName, *pModule, *pClass, *pInstance; + PyObject *pArgs, *pValue, *pmodel; + + std::vector detections; + bool hasMasks; + +}; + + +typedef boost::shared_ptr TensorFlowInferencerPtr; diff --git a/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/pythonWrap.h b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/pythonWrap.h new file mode 100644 index 00000000..c5022f07 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/FrameworkEvaluator/pythonWrap.h @@ -0,0 +1,4 @@ +#pragma push_macro("slots") +#undef slots +#include "Python.h" +#pragma pop_macro("slots") diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.cpp b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.cpp new file mode 100644 index 00000000..c18d5ebe --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.cpp @@ -0,0 +1,90 @@ +// +// Created by frivas on 22/11/16. +// + +#include "BoundingRectGuiMover.h" + +BoundingRectGuiMover::BoundingRectGuiMover(const std::vector &points):points(points) { + +} + +BoundingRectGuiMover::BoundingRectGuiMover(const cv::Rect_ &rectangle) { + points.push_back(cv::Point(rectangle.x,rectangle.y)); + points.push_back(cv::Point(rectangle.x + rectangle.width,rectangle.y)); + points.push_back(cv::Point(rectangle.x + rectangle.width,rectangle.y + rectangle.height)); + points.push_back(cv::Point(rectangle.x,rectangle.y + rectangle.height)); +} + + + + +std::vector BoundingRectGuiMover::getPoints() { + return points; +} + +void BoundingRectGuiMover::move(const cv::Point &from, const cv::Point &to,const MovementType& type) { + if (type == LOCAL_MOVEMENT) { + unsigned int idx1, idx2; + + getClosestsLinePoints(from, idx1, idx2); + + cv::Point &p1 = points[idx1]; + cv::Point &p2 = points[idx2]; + + + // std::cout << "closest points: " << p1 << ", " << p2<< std::endl; + cv::Point movement; + if (isVertical(p1, p2)) { + movement = cv::Point(to.x - from.x, 0); + } else { + movement = cv::Point(0, to.y - from.y); + } + + // std::cout << "Vertical: " << isVertical(p1,p2) << " Movement: " << movement << std::endl; + // std::cout << "Previous : " << p1 << ", " << p2 << std::endl; + p1 = p1 + movement; + p2 = p2 + movement; + } + else if (type == GLOBAL_MOVEMENT){ + cv::Point movement= to - from; + for (auto it = points.begin(), end= points.end(); it != end; ++it){ + cv::Point &point = *it; + point= point + movement; + } + } + + +} + +void BoundingRectGuiMover::getClosestsLinePoints(const cv::Point &from, unsigned int &p1, unsigned int &p2) { + + double minDistance=999999999; + +// std::cout << "number of points: " << points.size() << std::endl; + + for (unsigned int i=0; i < points.size(); i++){ + int j=i+1; + if (j >= points.size()) + j=0; + + cv::Point midpoint = (points[i] + points[j])*0.5; + double distance = cv::norm(midpoint-from); +// std::cout << "distance:" << distance << std::endl; + if (distance < minDistance){ + minDistance = distance; + p1 = i; + p2 = j; + } + } +} + +bool BoundingRectGuiMover::isVertical(const cv::Point &from, const cv::Point &to) { + int xMovement = abs(from.x-to.x); + int yMovement = abs(from.y-to.y); +// std::cout << "Xmov: " << xMovement << ", ymov: " << yMovement << std::endl; + return abs(from.x-to.x) < abs(from.y-to.y); +} + +cv::Rect_ BoundingRectGuiMover::getRect(const double scale) { + return cv::Rect_(points[0].x/scale, points[0].y/scale,(points[1].x - points[0].x)/scale,(points[2].y - points[1].y)/scale); +} diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.h b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.h new file mode 100644 index 00000000..f1d7c208 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingRectGuiMover.h @@ -0,0 +1,31 @@ +// +// Created by frivas on 22/11/16. +// + +#ifndef SAMPLERGENERATOR_BOUDINGRECT_H +#define SAMPLERGENERATOR_BOUDINGRECT_H + +#include + +struct BoundingRectGuiMover { +public: + enum MovementType{ LOCAL_MOVEMENT, GLOBAL_MOVEMENT, NONE}; + BoundingRectGuiMover(const std::vector& points); + BoundingRectGuiMover(const cv::Rect_& rectangle); + + std::vector getPoints(); + void move(const cv::Point& from, const cv::Point& to, const MovementType& type); + cv::Rect_ getRect(const double scale=1); + + +private: + std::vector points; + + + void getClosestsLinePoints(const cv::Point &from,unsigned int& p1, unsigned int& p2); + bool isVertical(const cv::Point& from, const cv::Point& to); + +}; + + +#endif //SAMPLERGENERATOR_BOUDINGRECT_H diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.cpp b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.cpp new file mode 100644 index 00000000..e25abfa9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.cpp @@ -0,0 +1,196 @@ +// +// Created by frivas on 22/11/16. +// + +#include "BoundingValidator.h" + +BoundingValidator::BoundingValidator(const cv::Mat &image_in,double scale) { + this->image=image_in.clone(); + this->scale=scale; +// cv::cvtColor(this->image, this->image, CV_RGB2BGR); + cv::resize(this->image,this->image,cv::Size(), scale,scale); + clicked=false; + movementType = BoundingRectGuiMover::NONE; + +} + +bool BoundingValidator::validate(std::vector &bounding, cv::Rect_& validatedBound, int& key) { + + cv::Rect_ boundingRectangle = cv::boundingRect(bounding); + validate(boundingRectangle,validatedBound,key); + + +} + + + + + +void BoundingValidator::CallBackFunc(int event, int x, int y, int flags, void* userdata) +{ + auto rect = (BoundingRectGuiMover*)userdata; + if ( event == cv::EVENT_LBUTTONDOWN ) + { + if (!clicked) { + movementType = BoundingRectGuiMover::LOCAL_MOVEMENT; + clicked = true; + from=cv::Point(x,y); + tempFrom=cv::Point(x,y); + + } + } + else if ( event == cv::EVENT_LBUTTONUP ) + { + if (clicked) { + movementType = BoundingRectGuiMover::NONE; + clicked = false; + /*to=cv::Point(x,y); + std::cout << "moving from: " << from << ", to: " << to << std::endl; + rect->move(from,to);*/ + } + } + else if ( event == cv::EVENT_MOUSEMOVE ) + { + if (clicked) { + to=cv::Point(x,y); +// std::cout << "moving from: " << from << ", to: " << to << std::endl; + rect->move(tempFrom,to,movementType); + tempFrom=cv::Point(x,y); + + + } + } + else if (event == cv::EVENT_MBUTTONDOWN){ + if (!clicked) { + movementType = BoundingRectGuiMover::GLOBAL_MOVEMENT; + clicked = true; + from=cv::Point(x,y); + tempFrom=cv::Point(x,y); + + } + } + else if (event == cv::EVENT_MBUTTONUP){ + movementType = BoundingRectGuiMover::NONE; + if (clicked) { + clicked = false; + } + } +} + +bool BoundingValidator::validate(const cv::Rect_ &bounding, cv::Rect_ &validatedBound, int &key) { + + + cv::Rect_ scaledBounding((int)(bounding.x * this->scale), + (int)(bounding.y*this->scale), + (int)(bounding.width * this->scale), + (int)(bounding.height*this->scale)); + + BoundingRectGuiMover rect(scaledBounding); + std::vector validationKeys; + validationKeys.push_back('1'); + validationKeys.push_back('2'); + validationKeys.push_back('3'); + char rejectionKey='n'; + key='0'; + while (char(key) != rejectionKey and (std::find(validationKeys.begin(), validationKeys.end(), char(key))== validationKeys.end() )) { + cv::Mat image2show= this->image.clone(); + cv::rectangle(image2show, rect.getRect(), cv::Scalar(255, 0, 0), 3); + std::string windowName="Validation window"; + cv::namedWindow(windowName, 1); + cv::setMouseCallback(windowName, CallBackFunc, &rect); + cv::imshow(windowName, image2show); + key=cv::waitKey(1); + //std::cout << "key: " << char(key) << std::endl; + } + validatedBound=rect.getRect(this->scale); + + + return std::find(validationKeys.begin(), validationKeys.end(), char(key))!= validationKeys.end(); +} + +bool BoundingValidator::validateNDetections(std::vector ®ions) { + auto imageInputRects=updateRegionsImage(regions); + + std::string windowName="Validate number of detecions"; + + char key='p'; + cv::Rect_ rect; + char rejectionKey='q'; + while ((key != ' ') and (key != rejectionKey)){ + cv::Mat image2show= imageInputRects.clone(); + + cv::setMouseCallback(windowName, CallBackFuncNumberDetections, &rect); + if (rect != cv::Rect_()){ + cv::rectangle(image2show,rect,cv::Scalar(0,255,0),int(this->scale)); + if (!clicked){ + cv::Rect_ newScaledRect=cv::Rect_(int(rect.x/scale), + int(rect.y/scale), + int(rect.width/scale), + int(rect.height/scale)); + RectRegion newRegion(newScaledRect,"person"); + regions.push_back(newRegion); + imageInputRects=updateRegionsImage(regions); + rect=cv::Rect_(); + } + } + cv::imshow(windowName,image2show); + key=(char)cv::waitKey(100); + } + if (key == 'q'){ + regions.clear(); + } + cv::destroyWindow(windowName); + return false; +} + + + +void BoundingValidator::CallBackFuncNumberDetections(int event, int x, int y, int flags, void* userdata) +{ + auto rect = (cv::Rect_*)userdata; + if ( event == cv::EVENT_LBUTTONDOWN ) + { + if (!clicked) { + movementType = BoundingRectGuiMover::LOCAL_MOVEMENT; + clicked = true; + from=cv::Point(x,y); + tempFrom=cv::Point(x,y); + rect->x=x; + rect->y=y; + rect->width=1; + rect->height=1; + + } + } + else if ( event == cv::EVENT_LBUTTONUP ) + { + if (clicked) { + movementType = BoundingRectGuiMover::NONE; + clicked = false; + rect->width=to.x - from.x; + rect->height=to.y - from.y; + } + } + else if ( event == cv::EVENT_MOUSEMOVE ) + { + if (clicked) { + to=cv::Point(x,y); + rect->width=to.x - from.x; + rect->height=to.y - from.y; + } + } + +} + +cv::Mat BoundingValidator::updateRegionsImage(const std::vector ®ions) { + cv::Mat imageInputRects= this->image.clone(); + for (auto it:regions) { + auto bounding=it.region; + cv::Rect_ scaledBounding(int(bounding.x * this->scale), + int(bounding.y*this->scale), + int(bounding.width * this->scale), + int(bounding.height*this->scale)); + cv::rectangle(imageInputRects, scaledBounding, cv::Scalar(0, 255, 255), 3); + } + return imageInputRects; +} diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.h b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.h new file mode 100644 index 00000000..c1ad0e5a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/BoundingValidator.h @@ -0,0 +1,44 @@ +// +// Created by frivas on 22/11/16. +// + +#ifndef SAMPLERGENERATOR_BOUNDINGVALIDATOR_H +#define SAMPLERGENERATOR_BOUNDINGVALIDATOR_H + + +#include +#include +#include "BoundingRectGuiMover.h" + +static bool clicked; +static cv::Point from, to; +static cv::Point tempFrom; +static BoundingRectGuiMover::MovementType movementType; + +class BoundingValidator { +public: + explicit BoundingValidator(const cv::Mat& image_in, double scale=3); + bool validate(std::vector& bounding,cv::Rect_& validatedBound, int& key); + bool validate(const cv::Rect_& bounding,cv::Rect_& validatedBound, int& key); + bool validateNDetections(std::vector& regions); + + +private: + double scale; + cv::Mat image; + + + static void CallBackFunc(int event, int x, int y, int flags, void* userdata); + static void CallBackFuncNumberDetections(int event, int x, int y, int flags, void* userdata); + + + + + + cv::Mat updateRegionsImage(const std::vector& regions); + + +}; + + +#endif //SAMPLERGENERATOR_BOUNDINGVALIDATOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/CMakeLists.txt new file mode 100644 index 00000000..d9b86b39 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/CMakeLists.txt @@ -0,0 +1,21 @@ +SET(GenerationUtils_SOURCE_FILES + DepthForegroundSegmentator + BoundingValidator + BoundingRectGuiMover + DetectionsValidator +) + +ADD_LIBRARY(DetectionMetrics_GenerationUtils OBJECT ${GenerationUtils_SOURCE_FILES}) + + + +TARGET_INCLUDE_DIRECTORIES (DetectionMetrics_GenerationUtils PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${GLOG_INCLUDE_DIRS} + ${depthLib_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} +) diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.cpp b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.cpp new file mode 100644 index 00000000..855c0ef3 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.cpp @@ -0,0 +1,214 @@ +// +// Created by frivas on 17/11/16. +// + +#include "DepthForegroundSegmentator.h" + + + +DepthForegroundSegmentator::DepthForegroundSegmentator(bool filterActive):filterActive(filterActive) { + +#if CV_MAJOR_VERSION == 3 || CV_MAJOR_VERSION == 4 + std::cerr << "OpenCV 3 is not working with doubles foreground segmentation" << std::endl; + throw "Opencv v3 is not supported"; +#else +#endif + + if (this->filterActive){ + this->filter= boost::shared_ptr(new jderobot::DepthFilter()); + } + + defaultLearningRate=0.0001; + minBlobArea=400; +} + + +std::vector> DepthForegroundSegmentator::process(const cv::Mat &image) { + cv::Mat localImage = image.clone(); + + cv::imshow("localImage", localImage); + if (this->filterActive){ + cv::Mat temp; + this->filter->update(localImage,temp); + temp.copyTo(localImage); + } + + std::vector layers; + cv::split(localImage, layers); + + cv::Mat distance(localImage.rows, localImage.cols, CV_32FC1,cv::Scalar(0,0,0)); // muestreada + cv::Mat realDistance(localImage.rows, localImage.cols, CV_32FC1); //distancia real + + //discretizamos la imagen de profundidad + + int val=0; + for (int x=0; x< layers[1].cols ; x++){ + for (int y=0; y(y,x)<<8)|(int)layers[2].at(y,x); + distance.at(y,x) = float(floor((pow(d,1./4.)/10)*1600)); + realDistance.at(y,x) = d; + val++; + } + } + cv::Mat normDistanceFloat; + cv::normalize(distance,normDistanceFloat,0, 255, cv::NORM_MINMAX, CV_32F); + cv::Mat normDistance; + normDistanceFloat.convertTo(normDistance,CV_8UC1); + + + cv::Mat normRealDistanceFloat; + cv::normalize(realDistance,normRealDistanceFloat,0, 255, cv::NORM_MINMAX, CV_32F); + cv::Mat normRealDistance; + normRealDistanceFloat.convertTo(normRealDistance,CV_8UC1); + + cv::imshow("distance", normDistance); + cv::imshow("realDistance", normRealDistance); + + + + if (!this->bg){ +#if CV_MAJOR_VERSION == 3 || CV_MAJOR_VERSION == 4 + this->bg= cv::createBackgroundSubtractorMOG2(); +// this->bg= cv::createBackgroundSubtractorKNN(500,20000); + bg->apply(distance,fore,defaultLearningRate); +#else + this->bg=new cv::BackgroundSubtractorMOG2(); + bg->operator()(distance,fore,defaultLearningRate); +#endif + + } +#if CV_MAJOR_VERSION == 3 || CV_MAJOR_VERSION == 4 + bg->apply(distance,fore,defaultLearningRate); +#else + bg->operator()(distance,fore,defaultLearningRate); +#endif + + + cv::imshow("fore",fore); + cv::Mat back(240,320,CV_8UC3,cv::Scalar(0,0,0)); + cv::erode(fore,fore,cv::Mat()); + cv::dilate(fore,fore,cv::Mat()); + std::vector> contours; + std::vector hierarchy; + cv::findContours(fore,contours, hierarchy,cv::RETR_TREE,cv::CHAIN_APPROX_SIMPLE); + + + + + cv::Mat dst1=cv::Mat(distance.size(), CV_8UC1,cv::Scalar(0)); + + std::vector> goodContours; + int cCounter1=0; + int idx = 0; + if (contours.size() != 0){ + for( ; idx >= 0; idx = hierarchy[idx][0] ) + { + //area minima!!! + double area0 = contourArea(contours[idx]); + //UMBRAL DE AREA + if (area0< this->minBlobArea) + continue; + std::vector a = contours[idx]; + goodContours.push_back(a); + /*cv::Scalar color( 255); + cv::drawContours( dst1, contours, idx, color, CV_FILLED, 8, hierarchy ); + std::cout << "something detected" << std::endl;*/ + } + } + + return goodContours; + +} + + + +cv::Mat DepthForegroundSegmentator::process2(const cv::Mat &image) { + /* cv::Mat localImage = image.clone(); + + std::cout << "size: " << localImage.size() << std::endl; + cv::imshow("localImage", localImage); + if (this->filterActive){ + cv::Mat temp; + this->filter->update(localImage,temp); + temp.copyTo(localImage); + } + + std::vector layers; + cv::split(localImage, layers); + + cv::Mat distance(localImage.rows, localImage.cols, CV_32FC1,cv::Scalar(0,0,0)); // muestreada + cv::Mat realDistance(localImage.rows, localImage.cols, CV_32FC1); //distancia real + + //discretizamos la imagen de profundidad + + std::cout << "size: " << layers[1].cols << ", " << layers[1].rows << std::endl; + int val=0; + for (int x=0; x< layers[1].cols ; x++){ + for (int y=0; y(y,x)<<8)|(int)layers[2].at(y,x); + distance.at(y,x) = float(floor((pow(d,1./4.)/10)*1600)); + realDistance.at(y,x) = d; + val++; + } + } + cv::Mat normDistanceFloat; + cv::normalize(distance,normDistanceFloat,0, 255, cv::NORM_MINMAX, CV_32F); + cv::Mat normDistance; + normDistanceFloat.convertTo(normDistance,CV_8UC1); + + + cv::Mat normRealDistanceFloat; + cv::normalize(realDistance,normRealDistanceFloat,0, 255, cv::NORM_MINMAX, CV_32F); + cv::Mat normRealDistance; + normRealDistanceFloat.convertTo(normRealDistance,CV_8UC1); + + cv::imshow("distance", normDistance); + cv::imshow("realDistance", normRealDistance); + + + + if (!this->bg){ + this->bg= cv::createBackgroundSubtractorMOG2(); + //this->bg= cv::createBackgroundSubtractorKNN(500,20000); + bg->apply(distance,fore,defaultLearningRate); + } + bg->apply(normRealDistance,fore,defaultLearningRate); + + std::cout << "size: " << fore.size() << std::endl; + + + cv::imshow("fore",fore); + cv::waitKey(1); + + cv::Mat back(240,320,CV_8UC3,cv::Scalar(0,0,0)); + + cv::erode(fore,fore,cv::Mat()); + cv::dilate(fore,fore,cv::Mat()); + std::vector > contours; + std::vector hierarchy; + cv::findContours(fore,contours, hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_SIMPLE); + + + + + cv::Mat dst1=cv::Mat(distance.size(), CV_8UC1,cv::Scalar(0)); + std::vector mc1( contours.size() ); + int cCounter1=0; + int idx = 0; + if (contours.size() != 0){ + for( ; idx >= 0; idx = hierarchy[idx][0] ) + { + //area minima!!! + double area0 = contourArea(contours[idx]); + //UMBRAL DE AREA + if (area0< this->minBlobArea) + continue; + cv::Scalar color( 255); + cv::drawContours( dst1, contours, idx, color, CV_FILLED, 8, hierarchy ); + std::cout << "something detected" << std::endl; + } + } + + return dst1;*/ + +} diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.h b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.h new file mode 100644 index 00000000..c1a8f89a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DepthForegroundSegmentator.h @@ -0,0 +1,34 @@ +// +// Created by frivas on 17/11/16. +// + +#ifndef SAMPLERGENERATOR_DEPTHFOREGROUNDSEGMENTADOR_H +#define SAMPLERGENERATOR_DEPTHFOREGROUNDSEGMENTADOR_H +#include +#include +#include +#include + +class DepthForegroundSegmentator { +public: + DepthForegroundSegmentator(bool filterActive=true); + std::vector>process(const cv::Mat& image); + cv::Mat process2(const cv::Mat& image); + + + +private: +#if CV_MAJOR_VERSION == 3 + cv::BackgroundSubtractorMOG2* bg; +#else + cv::Ptr bg; +#endif + cv::Mat fore; + bool filterActive; + boost::shared_ptr filter; + double defaultLearningRate; + double minBlobArea; +}; + + +#endif //SAMPLERGENERATOR_DEPTHFOREGROUNDSEGMENTADOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.cpp b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.cpp new file mode 100644 index 00000000..020710d2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.cpp @@ -0,0 +1,192 @@ +// +// Created by frivas on 20/01/17. +// + +#include +#include +#include "DetectionsValidator.h" +#include "BoundingValidator.h" +#include +#include +#include + + + +DetectionsValidator::DetectionsValidator(const std::string& pathToSave,double scale):validationCounter(0), path(pathToSave),scale(scale) { + auto boostPath= boost::filesystem::path(this->path); + if (!boost::filesystem::exists(boostPath)){ + boost::filesystem::create_directories(boostPath); + } + else{ + boost::filesystem::directory_iterator end_itr; + + for (boost::filesystem::directory_iterator itr(boostPath); itr!=end_itr; ++itr) + { + if ((is_regular_file(itr->status()) && itr->path().extension()==".png") && (itr->path().string().find("-depth") == std::string::npos)) { + validationCounter++; + } + + } + if (this->validationCounter != 0) { + LOG(WARNING) << "Including samples to an existing dataset, starting with: " + + std::to_string(this->validationCounter); + char confirmation = 'a'; + while (confirmation != 'y' && confirmation != 'n') { + LOG(INFO) << "Do you want to continue? (y/n) \n"; + std::cin >> confirmation; + } + if (confirmation == 'n') { + LOG(WARNING) << "Exiting"; + exit(1); + } + } + } + +} + +DetectionsValidator::~DetectionsValidator()=default; + + + +void DetectionsValidator::validate(const cv::Mat& colorImage,const cv::Mat& depthImage, std::vector>& detections){ + + cv::Mat mask=cv::Mat(colorImage.size(), CV_8UC1,cv::Scalar(0)); + + for (auto it= detections.begin(), end = detections.end(); it != end; ++it){ + int idx= (int)std::distance(detections.begin(),it); + cv::Scalar color( 150); + cv::drawContours( mask, detections, idx, color, -1, 8); + } + + + std::vector channels; + cv::split(colorImage,channels); + cv::Mat colorMask(colorImage.size(),CV_8UC1,cv::Scalar(150)); + colorMask.copyTo(channels[0],mask); + cv::Mat image2show; + cv::merge(channels,image2show); + + + RectRegionsPtr regions(new RectRegions()); + ContourRegionsPtr cRegions(new ContourRegions()); + + BoundingValidator validator(image2show,scale); + for (auto it : detections){ + cv::Rect_ validatedRect; + int classVal; + if (validator.validate(it,validatedRect,classVal)){ + std::string validationID; + if (char(classVal)=='1') { + validationID = "person"; + } + else if (char(classVal)=='2') + validationID="person-falling"; + else if (char(classVal)=='3') + validationID="person-fall"; + + + fillRectIntoImageDimensions(validatedRect,colorImage.size()); + LOG(INFO)<<"Validated"; + regions->add(validatedRect,validationID); + cRegions->add(it,validationID); + } + else{ + LOG(INFO)<<"Discarded"; + } + } + + if (not regions->getRegions().empty()){ + Sample sample(colorImage,depthImage,regions,cRegions); + sample.save(this->path,this->validationCounter); + this->validationCounter++; + } +} + +void DetectionsValidator::fillRectIntoImageDimensions(cv::Rect_ &rect, const cv::Size size) { + //check the format x,y -> w.h + + if (rect.width < 0){ + rect.x=rect.x-rect.width; + rect.width*=-1; + } + if (rect.height < 0){ + rect.y=rect.y-rect.height; + rect.height*=-1; + } + + if (rect.x + rect.width > size.width){ + rect.width = size.width - rect.x - 1; + } + if (rect.y + rect.height > size.height){ + rect.height = size.height - rect.y -1; + } + + if (rect.x < 0){ + rect.width=rect.width + rect.x; + rect.x=0; + } + if (rect.y <0){ + rect.height = rect.height + rect.y; + rect.y=0; + } + +} + +void DetectionsValidator::validate(const Sample &inputSample) { + auto rectDetections = inputSample.getRectRegions()->getRegions(); + RectRegionsPtr validatedRegions(new RectRegions()); + cv::Mat initialImage=inputSample.getColorImage().clone(); + cv::imshow("Source Image", initialImage); + cv::waitKey(100); + LOG(INFO) << "Number of detections: " << rectDetections.size() << std::endl; + + BoundingValidator validatorNumber(initialImage, this->scale); + + + validatorNumber.validateNDetections(rectDetections); + + + for (auto it= rectDetections.begin(), end=rectDetections.end(); it != end; ++it){ + //draw all detections + cv::Mat currentTestImage=initialImage.clone(); + + for (auto it2= rectDetections.begin(), end2=rectDetections.end(); it2 != end2; ++it2) { + if (it2== it) + continue; + cv::rectangle(currentTestImage,it2->region,cv::Scalar(255,255,0)); + cv::imshow("Source Image", currentTestImage); + cv::waitKey(100); + + } + BoundingValidator validator(currentTestImage, this->scale); + + + + cv::Rect_ validatedRect; + int classVal; + if (validator.validate(it->region,validatedRect,classVal)){ + std::string validationID; + if (char(classVal)=='1') { + validationID = "person"; + } + else if (char(classVal)=='2') + validationID="person-falling"; + else if (char(classVal)=='3') + validationID="person-fall"; + + + fillRectIntoImageDimensions(validatedRect,inputSample.getColorImage().size()); + LOG(INFO)<<"Validated"; + validatedRegions->add(validatedRect,validationID); + } + else{ + LOG(INFO)<<"Discarded"; + } + } + + if (not validatedRegions->getRegions().empty()){ + Sample sample(inputSample.getColorImage(),inputSample.getDepthImage(),validatedRegions); + sample.save(this->path,this->validationCounter); + this->validationCounter++; + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.h b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.h new file mode 100644 index 00000000..8ad0b27c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/GenerationUtils/DetectionsValidator.h @@ -0,0 +1,28 @@ +// +// Created by frivas on 20/01/17. +// + +#ifndef SAMPLERGENERATOR_DETECTIONSVALIDATOR_H +#define SAMPLERGENERATOR_DETECTIONSVALIDATOR_H + + +#include +#include + +class DetectionsValidator { +public: + DetectionsValidator(const std::string& pathToSave, double scale=3); + ~DetectionsValidator(); + void validate(const cv::Mat& colorImage,const cv::Mat& depthImage, std::vector>& detections); + void validate(const Sample& inputSample); + +private: + int validationCounter; + std::string path; + double scale; + + void fillRectIntoImageDimensions(cv::Rect_& rect, const cv::Size size); +}; + + +#endif //SAMPLERGENERATOR_DETECTIONSVALIDATOR_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/Regions/CMakeLists.txt new file mode 100644 index 00000000..97e84a7c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/CMakeLists.txt @@ -0,0 +1,26 @@ +SET(Regions_SOURCE_FILES + RectRegions + RleRegions + ContourRegions + Regions.h + Region + RectRegion + RleRegion + ContourRegion + maskApi +) + +ADD_LIBRARY(DetectionMetrics_Regions OBJECT ${Regions_SOURCE_FILES}) + + + +TARGET_INCLUDE_DIRECTORIES ( DetectionMetrics_Regions PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${GLOG_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.cpp new file mode 100644 index 00000000..f1e81bb9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.cpp @@ -0,0 +1,14 @@ +// +// Created by frivas on 26/01/17. +// + +#include "ContourRegion.h" + +ContourRegion::ContourRegion(const ContourRegion &other) { + this->classID=other.classID; + if (other.region.size()) { + this->region.resize(other.region.size()); + std::copy(other.region.begin(), other.region.end(), this->region.begin()); + } + this->valid=other.valid; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.h b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.h new file mode 100644 index 00000000..0703e436 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegion.h @@ -0,0 +1,55 @@ +// +// Created by frivas on 26/01/17. +// + +#ifndef SAMPLERGENERATOR_CONTOURREGION_H +#define SAMPLERGENERATOR_CONTOURREGION_H + +#include +#include + +struct ContourRegion { + ContourRegion():valid(false){}; + ContourRegion(const ContourRegion& other); + ContourRegion(const std::vector& region, std::string classID, + bool isCrowd = false):region(region),classID(classID),isCrowd(isCrowd),valid(true){}; //person by default + ContourRegion(const std::vector& region, std::string classID, + double confidence_score, bool isCrowd = false):region(region),classID(classID),confidence_score(confidence_score),isCrowd(isCrowd),valid(true){}; + + + bool operator < (const ContourRegion &obj) const { + + if (classID.empty() || obj.classID.empty()) { + throw std::invalid_argument("One of the ContourRegions passed for comparision were not initialized"); + } + + if (classID != obj.classID) { + //std::cout << "returning not equal class" << '\n'; + return classID < obj.classID; + + } else { + //std::cout << "came here" << '\n'; + if (isCrowd || obj.isCrowd) { + return (isCrowd ^ obj.isCrowd) & (!isCrowd); + } + return confidence_score > obj.confidence_score; //Reverse Sorting of Confidence Scores + } + + } + + std::vectorregion; + + std::string classID; + bool isCrowd = false; // Can be substantial for COCO dataset, which ignores iscrowd in evaluations + long double area; // This can be either Bounding Box area or Contour Area, necessary for + // determining area Range in evaluations, and may be directly read from + // dataset like COCO. + int uniqObjectID; + double confidence_score = -1; + bool valid; + + +}; + + +#endif //SAMPLERGENERATOR_CONTOURREGION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.cpp new file mode 100644 index 00000000..3bbefa7f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.cpp @@ -0,0 +1,145 @@ +// +// Created by frivas on 21/01/17. +// + +#include "ContourRegions.h" +#include +#include +#include +#include +#include +#include "rapidjson/filereadstream.h" +#include +ContourRegions::ContourRegions(const std::string &jsonPath) { + FILE* fp = fopen(jsonPath.c_str(), "rb"); // non-Windows use "r" + char readBuffer[65536]; + rapidjson::FileReadStream is(fp, readBuffer, sizeof(readBuffer)); + rapidjson::Document d; + d.ParseStream(is); + fclose(fp); + this->regions.clear(); + for (auto it = d.Begin(), end= d.End(); it != end; ++it){ + std::vector detection; + for (auto it2= (*it)["region"].Begin(), end2=(*it)["region"].End(); it2!= end2; ++it2) { + cv::Point point; + point.x = (*it2)["x"].GetInt(); + point.y = (*it2)["y"].GetInt(); + detection.push_back(point); + } + std::string id = (*it)["id"].GetString(); + this->regions.push_back(ContourRegion(detection,id)); + } +} + +ContourRegions::ContourRegions(){ + +} + +void ContourRegions::add(const std::vector &detections, const std::string& classId, const bool isCrowd) { + ContourRegion regionToInsert(detections, classId, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); +} + +void ContourRegions::add(const std::vector& detections, const std::string& classId, const double confidence_score, const bool isCrowd) { + ContourRegion regionToInsert(detections, classId, confidence_score, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); + //regions.push_back(RectRegion(rect, cla +} + + +void ContourRegions::saveJson(const std::string &outPath) { + rapidjson::Document d; + d.SetObject(); + d.SetArray(); + for (auto it = this->regions.begin(), end=this->regions.end(); it != end; it++){ + rapidjson::Value detection; + detection.SetObject(); + rapidjson::Value idValue(it->classID.c_str(),d.GetAllocator()); + detection.AddMember("classID",idValue,d.GetAllocator()); + + rapidjson::Value regionValue; + regionValue.SetArray(); + + for (auto it2=it->region.begin(), end2= it->region.end(); it2 != end2; ++it2) { + rapidjson::Value point; + point.SetObject(); + rapidjson::Value xValue(it2->x); + point.AddMember("x", xValue, d.GetAllocator()); + + rapidjson::Value yValue(it2->y); + point.AddMember("y", yValue, d.GetAllocator()); + + regionValue.PushBack(point, d.GetAllocator()); + } + detection.AddMember("region",regionValue,d.GetAllocator()); + d.PushBack(detection,d.GetAllocator()); + } + + rapidjson::StringBuffer buffer; + + buffer.Clear(); + + rapidjson::Writer writer(buffer); + d.Accept(writer); + + std::ofstream outFile(outPath); + outFile << buffer.GetString() << std::endl; + outFile.close(); +} + + + + +ContourRegion ContourRegions::getRegion(int idx) { + if (this->regions.size() -1 >= idx) + return this->regions[idx]; + else + return ContourRegion(); +} + +void ContourRegions::drawRegions(cv::Mat &image) { + for (auto it = regions.begin(), end= regions.end(); it != end; ++it) { + cv::Mat mask = cv::Mat(image.size(), CV_8UC1, cv::Scalar(0)); + cv::Scalar color(255); + std::vector> contours; + contours.push_back(it->region); + cv::drawContours(mask, contours, 0, color, -1, 8); + std::vector channels; + cv::split(image, channels); + cv::Mat colorMask(image.size(), CV_8UC1, cv::Scalar(255)); + colorMask.copyTo(channels[0], mask); + colorMask.copyTo(channels[1], mask); + cv::Mat image2show; + cv::merge(channels, image2show); + image2show.copyTo(image); + + } + +} + +std::vector ContourRegions::getRegions() { + return this->regions; +} + +void ContourRegions::filterSamplesByID(std::vector filteredIDS) { + std::vector oldRegions(this->regions); + this->regions.clear(); + for(auto it = oldRegions.begin(), end=oldRegions.end(); it != end; ++it) { + if (std::find(filteredIDS.begin(), filteredIDS.end(), it->classID) != filteredIDS.end()) { + this->regions.push_back(*it); + } + } +} + +bool ContourRegions::empty() { + return (this->regions.size()==0); +} + +void ContourRegions::print() { + //todo + LOG(ERROR) << "Not yet implemented" << std::endl; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.h b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.h new file mode 100644 index 00000000..3b94093b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/ContourRegions.h @@ -0,0 +1,31 @@ +// +// Created by frivas on 21/01/17. +// + +#ifndef SAMPLERGENERATOR_CONTOURREGIONS_H +#define SAMPLERGENERATOR_CONTOURREGIONS_H + +#include +#include "Regions.h" +#include "ContourRegion.h" + + +struct ContourRegions:Regions { + ContourRegions(const std::string& jsonPath); + ContourRegions(); + void saveJson(const std::string& outPath); + void add(const std::vector& detections, const std::string& classId, const bool isCrowd = false); + void add(const std::vector& detections, const std::string& classId, const double confidence_score, const bool isCrowd = false); + ContourRegion getRegion(int idx); + std::vector getRegions(); + void drawRegions(cv::Mat& image); + void filterSamplesByID(std::vector filteredIDS); + bool empty(); + void print(); + std::vector regions; +}; + +typedef boost::shared_ptr ContourRegionsPtr; + + +#endif //SAMPLERGENERATOR_CONTOURREGIONS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.cpp new file mode 100644 index 00000000..324465b1 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.cpp @@ -0,0 +1,5 @@ +// +// Created by frivas on 25/01/17. +// + +#include "RectRegion.h" diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.h b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.h new file mode 100644 index 00000000..b8d685c2 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegion.h @@ -0,0 +1,50 @@ +// +// Created by frivas on 25/01/17. +// + +#ifndef SAMPLERGENERATOR_RECTREGION_H +#define SAMPLERGENERATOR_RECTREGION_H + +#include + +struct RectRegion { + + RectRegion():valid(false){}; + RectRegion(const cv::Rect_& region, const std::string& classID, const bool isCrowd = false):region(region),classID(classID),valid(true),isCrowd(isCrowd){}; + RectRegion(const cv::Rect_& region, const std::string& classID, const double confidence_score, const bool isCrowd = false):region(region),classID(classID),confidence_score(confidence_score),valid(true),isCrowd(isCrowd){}; + + bool operator < (const RectRegion &obj) const { + + if (classID.empty() || obj.classID.empty()) { + throw std::invalid_argument("One of the RectRegions passed for comparision were not initialized, ClassID found empty"); + } + + if (classID != obj.classID) { + //std::cout << "returning not equal class" << '\n'; + return classID < obj.classID; + + } else { + //std::cout << "came here" << '\n'; + if (isCrowd || obj.isCrowd) { + return (isCrowd ^ obj.isCrowd) & (!isCrowd); + } + return confidence_score > obj.confidence_score; //Reverse Sorting of Confidence Scores + } + + } + + cv::Rect_ region; + cv::Rect_ nameRect; + std::string classID; + bool isCrowd = false; // Can be substantial for COCO dataset, which ignores iscrowd in evaluations + long double area; // This can be either Bounding Box area or Contour Area, necessary for + // determining area Range in evaluations, and may be directly read from + // dataset like COCO. + int uniqObjectID; + double confidence_score = -1; + bool valid; + +}; + + +#endif //SAMPLERGENERATOR_RECTREGION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.cpp new file mode 100644 index 00000000..4ca9216d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.cpp @@ -0,0 +1,173 @@ +// +// Created by frivas on 21/01/17. +// + +#include "RectRegions.h" +#include +#include +#include +#include +#include +#include "rapidjson/filereadstream.h" +#include +#include + + +RectRegions::RectRegions(const std::string &jsonPath) { + FILE* fp = fopen(jsonPath.c_str(), "rb"); // non-Windows use "r" + char readBuffer[65536]; + rapidjson::FileReadStream is(fp, readBuffer, sizeof(readBuffer)); + rapidjson::Document d; + d.ParseStream(is); + fclose(fp); + for (auto it=d.Begin(), end = d.End(); it != end; ++it){ + cv::Rect reg((*it)["x"].GetInt(),(*it)["y"].GetInt(),(*it)["w"].GetInt(),(*it)["h"].GetInt()); + std::string id; + id= (*it)["id"].GetString(); + regions.push_back(RectRegion(reg,id)); + } +} +RectRegions::RectRegions() { +} + + +void RectRegions::saveJson(const std::string &outPath) { + rapidjson::Document d; + d.SetArray(); + for (auto it = regions.begin(), end= regions.end(); it != end; ++it){ + rapidjson::Value node; + node.SetObject(); + rapidjson::Value xValue(it->region.x); + node.AddMember("x",xValue,d.GetAllocator()); + + rapidjson::Value yValue(it->region.y); + node.AddMember("y",yValue,d.GetAllocator()); + + rapidjson::Value wValue(it->region.width); + node.AddMember("w",wValue,d.GetAllocator()); + + rapidjson::Value hValue(it->region.height); + node.AddMember("h",hValue,d.GetAllocator()); + + rapidjson::Value confValue(it->confidence_score); + node.AddMember("score",confValue,d.GetAllocator()); + + rapidjson::Value idValue(it->classID.c_str(), d.GetAllocator()); + node.AddMember("id",idValue,d.GetAllocator()); + + d.PushBack(node,d.GetAllocator()); + } + + + + rapidjson::StringBuffer buffer; + + buffer.Clear(); + + rapidjson::Writer writer(buffer); + d.Accept(writer); + + std::ofstream outFile(outPath); + outFile << buffer.GetString() << std::endl; + outFile.close(); +} + +void RectRegions::add(const cv::Rect_ rect,const std::string classId, bool isCrowd){ + RectRegion regionToInsert(rect, classId, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); + +} + +void RectRegions::add(const cv::Rect_ rect, const std::string classId, double confidence_score, bool isCrowd) { + RectRegion regionToInsert(rect, classId, confidence_score, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); +} + + +void RectRegions::add(const std::vector> &detections,const std::string classId, const bool isCrowd) { + regions.push_back(RectRegion(cv::boundingRect(detections),classId, isCrowd)); +} + +void RectRegions::add(double x, double y, double w, double h,const std::string classId, const bool isCrowd) { + RectRegion regionToInsert(cv::Rect_(x,y,w,h), classId, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); +} + +void RectRegions::add(double x, double y, double w, double h, const std::string classId, const double confidence_score, const bool isCrowd) { + RectRegion regionToInsert(cv::Rect_(x,y,w,h), classId, confidence_score, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); +} + +RectRegion RectRegions::getRegion(int id) { + if (this->regions.size() -1 >= id) + return this->regions[id]; + else + return RectRegion(); +} + + +void RectRegions::drawRegions(cv::Mat &image) { + if (!image.empty()) + for (auto it = regions.begin(), end=regions.end(); it != end; ++it) { + ClassTypeOwn classType(it->classID); + cv::rectangle(image, it->region, classType.getColor(), 2); + cv::Size rectSize(80,20); + cv::Rect nameRectangle(it->region.x, it->region.y - rectSize.height, rectSize.width,rectSize.height); + if (nameRectangle.y < 0){ + nameRectangle.y=it->region.y; + } + if (nameRectangle.x + nameRectangle.width > image.size().width){ + nameRectangle.x = image.size().width - nameRectangle.width -1; + } + if (nameRectangle.y + nameRectangle.height > image.size().height){ + nameRectangle.y = image.size().height - nameRectangle.height -1; + } + + if (nameRectangle.x<0) + nameRectangle.x=0; + if (nameRectangle.y<0) + nameRectangle.y=0; + it->nameRect = nameRectangle; + image(nameRectangle)=cv::Scalar(classType.getColor()); + cv::putText(image, classType.getClassString(),cv::Point(nameRectangle.x - nameRectangle.height/4 + 5 ,nameRectangle.y + nameRectangle.height - 5),cv::FONT_HERSHEY_TRIPLEX,0.4,cv::Scalar(0,0,0),1); + } +} + +std::vector RectRegions::getRegions() { + return this->regions; +} + +void RectRegions::setRegions(std::vector &ans){ + this->regions = ans; +} + +void RectRegions::filterSamplesByID(std::vector filteredIDS) { + std::vector oldRegions(this->regions); + this->regions.clear(); + for(auto it = oldRegions.begin(), end=oldRegions.end(); it != end; ++it) { + if (std::find(filteredIDS.begin(), filteredIDS.end(), it->classID) != filteredIDS.end()) { + this->regions.push_back(*it); + } + } +} + +bool RectRegions::empty() { + return (this->regions.size()==0); +} + +void RectRegions::print() { + LOG(INFO) << "-------------------" << std::endl; + for (auto it = this->regions.begin(), end = this->regions.end(); it != end; ++it){ + int idx = std::distance(this->regions.begin(),it); + LOG(INFO) << "[" << idx << "]: " << it->region << " (" << it->classID << ")" << std::endl; + } + LOG(INFO) << "-------------------" << std::endl; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.h b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.h new file mode 100644 index 00000000..d2df017d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RectRegions.h @@ -0,0 +1,47 @@ +// +// Created by frivas on 21/01/17. +// + +#ifndef SAMPLERGENERATOR_RECTREGIONS_H +#define SAMPLERGENERATOR_RECTREGIONS_H + +#include +#include "Regions.h" +#include "RectRegion.h" +#include + +struct RectRegions:Regions { + RectRegions(const std::string& jsonPath); + RectRegions(); + + void add(const cv::Rect_ rect, const std::string classId, const bool isCrowd = false); + void add(const cv::Rect_ rect, const std::string classId, const double confidence_score, const bool isCrowd = false); + void add(const std::vector>& detections, const std::string classId, const bool isCrowd = false); + void add(double x, double y, double w, double h, const std::string classId, const bool isCrowd = false); + void add(double x, double y, double w, double h, const std::string classId, const double confidence_score, const bool isCrowd = false); + // void CallBackFunc(int event, int x, int y, int flags, void* userdata); + void saveJson(const std::string& outPath); + RectRegion getRegion(int id); + std::vector getRegions(); + void setRegions(std::vector &data); + // cv::Mat* getImage(); + void drawRegions(cv::Mat& image); + // void doit(cv::Mat& image); + void filterSamplesByID(std::vector filteredIDS); + bool empty(); + void print(); + // static void CallBackFunc(int event, int x, int y, int flags, void* userdata); + // void SetPoints(int x, int y); + // void Draw(); + // void img_pointer(); + + std::vector regions; + // cv::Mat img; +}; + + + +typedef boost::shared_ptr RectRegionsPtr; + + +#endif //SAMPLERGENERATOR_RECTREGIONS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/Region.h b/DetectionMetrics/DetectionMetricsLib/Regions/Region.h new file mode 100644 index 00000000..6a610b57 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/Region.h @@ -0,0 +1,8 @@ +// +// Created by frivas on 25/01/17. +// + +#ifndef SAMPLERGENERATOR_REGION_H +#define SAMPLERGENERATOR_REGION_H + +#endif //SAMPLERGENERATOR_REGION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/Regions.h b/DetectionMetrics/DetectionMetricsLib/Regions/Regions.h new file mode 100644 index 00000000..3e995b70 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/Regions.h @@ -0,0 +1,26 @@ +// +// Created by frivas on 21/01/17. +// + +#ifndef SAMPLERGENERATOR_REGION_H +#define SAMPLERGENERATOR_REGION_H + + +#include +#include + + +struct Regions{ + Regions(){}; + // Saves the detections at the path specified in JSON format + virtual void saveJson(const std::string& outPath)=0; + virtual void drawRegions(cv::Mat& image)=0; + virtual void filterSamplesByID(std::vector filteredIDS)=0; + virtual bool empty()=0; + virtual void print()=0; + +}; + +typedef boost::shared_ptr RegionsPtr; + +#endif //SAMPLERGENERATOR_REGION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.cpp new file mode 100644 index 00000000..6e534343 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.cpp @@ -0,0 +1 @@ +#include "RleRegion.h" diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.h b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.h new file mode 100644 index 00000000..2100fa21 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegion.h @@ -0,0 +1,50 @@ +#ifndef SAMPLERGENERATOR_RLEREGION_H +#define SAMPLERGENERATOR_RLEREGION_H + +#include +#include "maskApi.h" + +struct RleRegion { + RleRegion():valid(false){}; + RleRegion(const RLE region, std::string classID, + bool isCrowd = false):region(region),classID(classID),isCrowd(isCrowd),valid(true){}; //person by default + RleRegion(const RLE region, std::string classID, + double confidence_score, bool isCrowd = false):region(region),classID(classID),confidence_score(confidence_score),isCrowd(isCrowd),valid(true){}; + + + bool operator < (const RleRegion &obj) const { + + if (classID.empty() || obj.classID.empty()) { + throw std::invalid_argument("One of the ContourRegions passed for comparision were not initialized"); + } + + if (classID != obj.classID) { + //std::cout << "returning not equal class" << '\n'; + return classID < obj.classID; + + } else { + //std::cout << "came here" << '\n'; + if (isCrowd || obj.isCrowd) { + return (isCrowd ^ obj.isCrowd) & (!isCrowd); + } + return confidence_score > obj.confidence_score; //Reverse Sorting of Confidence Scores + } + + } + + RLE region; + + std::string classID; + bool isCrowd = false; // Can be substantial for COCO dataset, which ignores iscrowd in evaluations + long double area; // This can be either Bounding Box area or Contour Area, necessary for + // determining area Range in evaluations, and may be directly read from + // dataset like COCO. + int uniqObjectID; + double confidence_score = -1; + bool valid; + + +}; + + +#endif //SAMPLERGENERATOR_RLEREGION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.cpp new file mode 100644 index 00000000..0a1bbe5b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.cpp @@ -0,0 +1,100 @@ +#include "RleRegions.h" +#include +#include +#include +#include +#include +#include +#include "rapidjson/filereadstream.h" +#include + +RleRegions::RleRegions(){ + +} + +void RleRegions::add(RLE region, const std::string& classId, const bool isCrowd) { + RleRegion regionToInsert(region, classId, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); +} + +void RleRegions::add(RLE region, const std::string& classId, const double confidence_score, const bool isCrowd) { + RleRegion regionToInsert(region, classId, confidence_score, isCrowd); + auto itr = std::upper_bound(regions.begin(), regions.end(), regionToInsert); + regionToInsert.uniqObjectID = regions.size(); + regions.insert(itr, regionToInsert); + //regions.push_back(RectRegion(rect, cla +} + + +void RleRegions::saveJson(const std::string &outPath) { + rapidjson::Document d; +} + + + + +RleRegion RleRegions::getRegion(int idx) { + if (this->regions.size() -1 >= idx) + return this->regions[idx]; + else + return RleRegion(); +} + +void RleRegions::drawRegions(cv::Mat &image) { + std::default_random_engine generator; + std::uniform_real_distribution distribution(0.0,1.0); + + for (auto it = regions.begin(), end= regions.end(); it != end; ++it) { + + cv::Mat mask = cv::Mat(it->region.w, it->region.h, CV_8UC1, cv::Scalar(0)); + + rleDecode(&(it->region), mask.data , 1); + mask = mask * 255; + cv::Mat rotatedMask = mask.t(); + + cv::Scalar color; + std::vector > contours; + if (it->isCrowd) { + color = cv::Scalar(2,166,101); + } else { + color = cv::Scalar((unsigned int)(distribution(generator)*170), (unsigned int)(distribution(generator)*170), (unsigned int)(distribution(generator)*170)); + cv::findContours( rotatedMask.clone(), contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0) ); + + } + + cv::Mat colorMask(image.size(), CV_8UC3, color); + + cv::Mat output(colorMask.size(), CV_8UC3, cv::Scalar(0)); + colorMask.copyTo(output, rotatedMask); + + image = image.mul((( 255 - output )/255 )) + output; + cv::drawContours(image, contours, -1, color, 2, 8); + + } + +} + +std::vector RleRegions::getRegions() { + return this->regions; +} + +void RleRegions::filterSamplesByID(std::vector filteredIDS) { + std::vector oldRegions(this->regions); + this->regions.clear(); + for(auto it = oldRegions.begin(), end=oldRegions.end(); it != end; ++it) { + if (std::find(filteredIDS.begin(), filteredIDS.end(), it->classID) != filteredIDS.end()) { + this->regions.push_back(*it); + } + } +} + +bool RleRegions::empty() { + return (this->regions.size()==0); +} + +void RleRegions::print() { + //todo + LOG(ERROR) << "Not yet implemented" << std::endl; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.h b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.h new file mode 100644 index 00000000..dcf6fd77 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/RleRegions.h @@ -0,0 +1,26 @@ +#ifndef SAMPLERGENERATOR_RLEREGIONS_H +#define SAMPLERGENERATOR_RLEREGIONS_H + +#include +#include "Regions.h" +#include "RleRegion.h" + + +struct RleRegions:Regions { + RleRegions(); + void saveJson(const std::string& outPath); + void add(RLE region, const std::string& classId, const bool isCrowd = false); + void add(RLE region, const std::string& classId, const double confidence_score, const bool isCrowd = false); + RleRegion getRegion(int idx); + std::vector getRegions(); + void drawRegions(cv::Mat& image); + void filterSamplesByID(std::vector filteredIDS); + bool empty(); + void print(); + std::vector regions; +}; + +typedef boost::shared_ptr RleRegionsPtr; + + +#endif //SAMPLERGENERATOR_RLEREGIONS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.cpp b/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.cpp new file mode 100644 index 00000000..ea15325b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.cpp @@ -0,0 +1,227 @@ +#include "maskApi.h" +#include +#include +#include + +uint umin( uint a, uint b ) { return (ab) ? a : b; } + +void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ) { + R->h=h; R->w=w; R->m=m; R->cnts=(m==0)?0:(uint*)malloc(sizeof(uint)*m); + siz j; if(cnts) for(j=0; jcnts[j]=cnts[j]; +} + +void rleFree( RLE *R ) { + free(R->cnts); R->cnts=0; +} + +void rlesInit( RLE **R, siz n ) { + siz i; *R = (RLE*) malloc(sizeof(RLE)*n); + for(i=0; i0 ) { + c=umin(ca,cb); cc+=c; ct=0; + ca-=c; if(!ca && a0) { + crowd=iscrowd!=NULL && iscrowd[g]; + if(dt[d].h!=gt[g].h || dt[d].w!=gt[g].w) { o[g*m+d]=-1; continue; } + siz ka, kb, a, b; uint c, ca, cb, ct, i, u; int va, vb; + ca=dt[d].cnts[0]; ka=dt[d].m; va=vb=0; + cb=gt[g].cnts[0]; kb=gt[g].m; a=b=1; i=u=0; ct=1; + while( ct>0 ) { + c=umin(ca,cb); if(va||vb) { u+=c; if(va&&vb) i+=c; } ct=0; + ca-=c; if(!ca && athr) keep[j]=0; + } + } +} + +void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ) { + double h, w, i, u, ga, da; siz g, d; int crowd; + for( g=0; gthr) keep[j]=0; + } + } +} + +void rleToBbox( const RLE *R, BB bb, siz n ) { + siz i; for( i=0; id?1:c=dy && xs>xe) || (dxye); + if(flip) { t=xs; xs=xe; xe=t; t=ys; ys=ye; ye=t; } + s = dx>=dy ? (double)(ye-ys)/dx : (double)(xe-xs)/dy; + if(dx>=dy) for( d=0; d<=dx; d++ ) { + t=flip?dx-d:d; u[m]=t+xs; v[m]=(int)(ys+s*t+.5); m++; + } else for( d=0; d<=dy; d++ ) { + t=flip?dy-d:d; v[m]=t+ys; u[m]=(int)(xs+s*t+.5); m++; + } + } + /* get points along y-boundary and downsample */ + free(x); free(y); k=m; m=0; double xd, yd; + x=(int*)malloc(sizeof(int)*k); y=(int*)malloc(sizeof(int)*k); + for( j=1; jw-1 ) continue; + yd=(double)(v[j]h) yd=h; yd=ceil(yd); + x[m]=(int) xd; y[m]=(int) yd; m++; + } + /* compute rle encoding given y-boundary points */ + k=m; a=(uint*)malloc(sizeof(uint)*(k+1)); + for( j=0; j0) b[m++]=a[j++]; else { + j++; if(jm, p=0; long x; int more; + char *s=(char*)malloc(sizeof(char)*m*6); + for( i=0; icnts[i]; if(i>2) x-=(long) R->cnts[i-2]; more=1; + while( more ) { + char c=x & 0x1f; x >>= 5; more=(c & 0x10) ? x!=-1 : x!=0; + if(more) c |= 0x20; c+=48; s[p++]=c; + } + } + s[p]=0; return s; +} + +void rleFrString( RLE *R, char *s, siz h, siz w ) { + siz m=0, p=0, k; long x; int more; uint *cnts; + while( s[m] ) m++; cnts=(uint*)malloc(sizeof(uint)*m); m=0; + while( s[p] ) { + x=0; k=0; more=1; + while( more ) { + char c=s[p]-48; x |= (c & 0x1f) << 5*k; + more = c & 0x20; p++; k++; + if(!more && (c & 0x10)) x |= -1 << 5*k; + } + if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x; + } + rleInit(R,h,w,m,cnts); free(cnts); +} diff --git a/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.h b/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.h new file mode 100644 index 00000000..bc312d6b --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Regions/maskApi.h @@ -0,0 +1,53 @@ + +typedef unsigned int uint; +typedef unsigned long siz; +typedef unsigned char byte; +typedef double* BB; +typedef struct { siz h, w, m; uint *cnts; } RLE; + +/* Initialize/destroy RLE. */ +void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ); +void rleFree( RLE *R ); + +/* Initialize/destroy RLE array. */ +void rlesInit( RLE **R, siz n ); +void rlesFree( RLE **R, siz n ); + +/* Encode binary masks using RLE. */ +void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n ); + +/* Decode binary masks encoded via RLE. */ +void rleDecode( const RLE *R, byte *mask, siz n ); + +/* Compute union or intersection of encoded masks. */ +void rleMerge( const RLE *R, RLE *M, siz n, int intersect ); + +/* Compute area of encoded masks. */ +void rleArea( const RLE *R, siz n, uint *a ); + +/* Compute intersection over union between masks. */ +void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ); + +/* Compute non-maximum suppression between bounding masks */ +void rleNms( RLE *dt, siz n, uint *keep, double thr ); + +/* Compute intersection over union between bounding boxes. */ +void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ); + +/* Compute non-maximum suppression between bounding boxes */ +void bbNms( BB dt, siz n, uint *keep, double thr ); + +/* Get bounding boxes surrounding encoded masks. */ +void rleToBbox( const RLE *R, BB bb, siz n ); + +/* Convert bounding boxes to encoded masks. */ +void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ); + +/* Convert polygon to encoded mask. */ +void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ); + +/* Get compressed string representation of encoded mask. */ +char* rleToString( const RLE *R ); + +/* Convert from compressed string representation of encoded mask. */ +void rleFrString( RLE *R, char *s, siz h, siz w ); diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/CMakeLists.txt b/DetectionMetrics/DetectionMetricsLib/Utils/CMakeLists.txt new file mode 100644 index 00000000..c4718f01 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/CMakeLists.txt @@ -0,0 +1,53 @@ +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTOUIC ON) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + + +# MESSAGE(${QT_INCLUDE_DIRS}) + +SET(Utils_SOURCE_FILES + StringHandler.h + Normalizations + Configuration + Key + SampleGenerationApp + StatsUtils + JsonHelper.h + DepthUtils + PathHelper + Playback + pop_up + setclass + addclass +) + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${comm_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${ros_INCLUDE_DIRS} + ${PYTHON_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${jderobottypes_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} +) + +ADD_LIBRARY(DetectionMetrics_Utils OBJECT ${Utils_SOURCE_FILES}) + + +TARGET_INCLUDE_DIRECTORIES (DetectionMetrics_Utils PUBLIC + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${depthLib_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${DetectionMetrics_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} +) diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.cpp new file mode 100644 index 00000000..b81d46b3 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.cpp @@ -0,0 +1,82 @@ +// +// Created by frivas on 4/02/17. +// + +#include +#include +#include +#include +#include +#include "Configuration.h" +#include +#include + +Configuration::Configuration(const std::string &configFilePath):configFilePath(configFilePath) { + boost::filesystem::path boostPath(configFilePath); + if (boost::filesystem::exists(boostPath)){ + std::string line; + std::ifstream inFile(configFilePath); + std::string key; + while (getline(inFile,line)) { + if (boost::starts_with(line, "#")){ + continue; + } + if (line.empty()) + continue; + if (boost::starts_with(line, "--")){ + key = line.erase(0,2); + if (this->config.count(key)){ + LOG(ERROR) << "Duplicated key in configuration file: " + key; + exit(1); + } + else { + Key keyConfig(key); + this->config[key] = keyConfig; + } + } + else{ + if (key.empty()){ + LOG(WARNING) << "Error no key detected for " + line + " value"; + } + else{ + this->config[key].addValue(line); + } + + } + + } + + } + showConfig(); +} + +void Configuration::showConfig() { + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + for (auto it = this->config.begin(), end = this->config.end(); it != end; ++it){ + LOG(INFO) << it->first << std::endl; + int nElements= it->second.getNValues(); + for (int i=0; i < nElements; i++) { + LOG(INFO) << " " << it->second.getValue(i) << std::endl; + } + } + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + LOG(INFO) << "------------------------------------------------------------------" << std::endl; +} + +Configuration::Configuration() { + +} + +Key Configuration::getKey(const std::string &key) { + if (this->config.count(key)==0) { + LOG(ERROR) << "Key: " + key + " does not exists inside the configuration"; + exit(1); + } + else + return this->config[key]; +} + +bool Configuration::keyExists(const std::string& key) { + return this->config.count(key)!=0; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.h b/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.h new file mode 100644 index 00000000..d7f2ad41 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Configuration.h @@ -0,0 +1,28 @@ +// +// Created by frivas on 4/02/17. +// + +#ifndef SAMPLERGENERATOR_CONFIGURATION_H +#define SAMPLERGENERATOR_CONFIGURATION_H + +#include +#include +#include +#include "Key.h" + +class Configuration { +public: + Configuration(); + Configuration(const std::string& configFilePath); + void showConfig(); + Key getKey(const std::string& key); + bool keyExists(const std::string& key); + +private: + std::string configFilePath; + std::map config; +}; + +typedef boost::shared_ptr ConfigurationPtr; + +#endif //SAMPLERGENERATOR_CONFIGURATION_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.cpp new file mode 100644 index 00000000..29335d9c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.cpp @@ -0,0 +1,69 @@ +// +// Created by frivas on 30/07/17. +// + +#include +#include "DepthUtils.h" +#include +void DepthUtils::mat16_to_ownFormat(const cv::Mat &inputImage, cv::Mat& outImage) { + + double MAX_LENGHT=10000; + + auto imageSize = inputImage.size(); + outImage=cv::Mat(imageSize,CV_8UC3,cv::Scalar(0,0,0)); + + + cv::Mat evalImage= inputImage.clone(); + evalImage=cv::Scalar(0,0,0); + + for (int y = 0; y < imageSize.height; ++y) { + for (int x = 0; x < imageSize.width; ++x) { + uint16_t value = inputImage.at(y, x); + value= (value >> 3); + evalImage.at(y, x) = value; + outImage.data[(y*imageSize.width+ x)*3+0] = (float(value)/(float)MAX_LENGHT)*255.; + outImage.data[(y*imageSize.width+ x)*3+1] = (value)>>8; + outImage.data[(y*imageSize.width+ x)*3+2] = (value)&0xff; + } + } + + + double min,max; + cv::minMaxLoc(inputImage,&min,&max); + + LOG(INFO) << "min: " << min << std::endl; + LOG(INFO) << "max: " << max << std::endl; + + cv::minMaxLoc(evalImage,&min,&max); + + LOG(INFO) << "min: " << min << std::endl; + LOG(INFO) << "max: " << max << std::endl; + + + +} + +void DepthUtils::spinello_mat16_to_viewable(const cv::Mat &inputImage, cv::Mat& outImage) { + + double min,max; + cv::minMaxLoc(inputImage,&min,&max); + + + if (max > 10000) { // Swapping bytes + + cv::Mat toswap(inputImage.rows, inputImage.cols, CV_8UC2, inputImage.data); + cv::Mat merged; + + std::vector channels(2); + cv::split(toswap, channels); + std::reverse(channels.begin(), channels.end()); + cv::merge(&channels[0], 2, merged); + + merged.addref(); + outImage = cv::Mat(toswap.rows, toswap.cols, CV_16UC1, merged.data); + + } else { + outImage = inputImage; + } + +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.h b/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.h new file mode 100644 index 00000000..04c7d332 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/DepthUtils.h @@ -0,0 +1,17 @@ +// +// Created by frivas on 30/07/17. +// + +#ifndef SAMPLERGENERATOR_DEPTHUTILS_H +#define SAMPLERGENERATOR_DEPTHUTILS_H + +#include + +class DepthUtils { +public: + static void mat16_to_ownFormat(const cv::Mat& inputImage, cv::Mat& outImage); + static void spinello_mat16_to_viewable(const cv::Mat &inputImage, cv::Mat& outImage); +}; + + +#endif //SAMPLERGENERATOR_DEPTHUTILS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/JsonHelper.h b/DetectionMetrics/DetectionMetricsLib/Utils/JsonHelper.h new file mode 100644 index 00000000..e16dc47e --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/JsonHelper.h @@ -0,0 +1,24 @@ +// +// Created by frivas on 30/07/17. +// + +#ifndef SAMPLERGENERATOR_JSONHELPER_H +#define SAMPLERGENERATOR_JSONHELPER_H + + +#include +#include +#include + +class JsonHelper{ +public: + template + static std::vector as_vector(boost::property_tree::ptree const& pt, boost::property_tree::ptree::key_type const& key) + { + std::vector r; + for (auto& item : pt.get_child(key)) + r.push_back(item.second.get_value()); + return r; + } +}; +#endif //SAMPLERGENERATOR_JSONHELPER_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Key.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/Key.cpp new file mode 100644 index 00000000..03cdbd73 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Key.cpp @@ -0,0 +1,71 @@ +// +// Created by frivas on 4/02/17. +// + +#include +#include "Key.h" +#include + +Key::Key(const std::string &key):key(key) { + +} + + +bool Key::isVector() { + return this->values.size()>1; +} + +void Key::addValue(const std::string &value) { + this->values.push_back(value); +} + +std::string Key::getValue() { + if (this->values.size()==1) + return this->values[0]; + else { + const std::string ErrorMsg="Key [" + this->key + "] is an array not value"; + LOG(WARNING)<key; +} + +std::string Key::getValue(int id) { + if (this->values.size()> id) + { + return this->values[id]; + } +} + +std::string Key::getValueOrLast(int id) { + return this->values[ this->values.size()> id ? id : this->values.size() - 1 ]; + // if id overflows return the last element of the array +} + +std::vector Key::getValues() { + return this->values; +} + +Key::Key() { + +} + +int Key::getNValues() { + return this->values.size(); +} + +int Key::getValueAsInt() { + if (this->values.size() != 1) { + LOG(ERROR)<<"Cannot extract int from array type. Key=" + this->key; + exit(1); + } + else{ + int value; + std::istringstream iss(this->values[0]); + iss >> value; + return value; + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Key.h b/DetectionMetrics/DetectionMetricsLib/Utils/Key.h new file mode 100644 index 00000000..af0e9645 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Key.h @@ -0,0 +1,35 @@ +// +// Created by frivas on 4/02/17. +// + +#ifndef SAMPLERGENERATOR_KEY_H +#define SAMPLERGENERATOR_KEY_H + +#include +#include + +struct Key { + Key(); + explicit Key(const std::string& key); + + + + bool isVector(); + void addValue(const std::string& value); + std::string getValue(); + std::string getKey(); + std::string getValue(int id); + std::string getValueOrLast(int id); //return last value if id overflows + std::vector getValues(); + int getValueAsInt(); + + int getNValues(); + +private: + std::string key; + std::vector values; + bool valid; +}; + + +#endif //SAMPLERGENERATOR_KEY_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.cpp new file mode 100644 index 00000000..ca14e099 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.cpp @@ -0,0 +1,28 @@ +// +// Created by frivas on 30/01/17. +// + +#include "Normalizations.h" + + + + +void Normalizations::normalizeRect(cv::Rect ®ion, cv::Size size) { + normalizeLower(region.x); + normalizeLower(region.y); + normalizeUpper(region.x, region.width, size.width); + normalizeUpper(region.y, region.height,size.height); + +} + +void Normalizations::normalizeLower(int &value, int min) { + if (value < min){ + value=min; + } +} + +void Normalizations::normalizeUpper(int pos, int& size, int max) { + if (pos + size >= max){ + size= (max - pos - 1); + } +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.h b/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.h new file mode 100644 index 00000000..f4260665 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Normalizations.h @@ -0,0 +1,20 @@ +// +// Created by frivas on 30/01/17. +// + +#ifndef SAMPLERGENERATOR_NORMALIZATIONS_H +#define SAMPLERGENERATOR_NORMALIZATIONS_H + +#include + +class Normalizations { +public: + static void normalizeRect(cv::Rect& region, cv::Size size); + +private: + static void normalizeLower(int& value, int min=0); + static void normalizeUpper(int pos, int& size, int max); +}; + + +#endif //SAMPLERGENERATOR_NORMALIZATIONS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.cpp new file mode 100644 index 00000000..d3d438ae --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.cpp @@ -0,0 +1,23 @@ +// +// Created by frivas on 19/07/17. +// + +#include +#include "PathHelper.h" + +std::string PathHelper::concatPaths(const std::string &p1, const std::string &p2) { + if (boost::algorithm::ends_with(p1, getPathSeparator())){ + return p1 + p2; + } + else{ + return p1 + getPathSeparator() + p2; + } +} + +std::string PathHelper::getPathSeparator() { +#ifdef __linux__ + return std::string("/"); +#else + return std::string("\\"); +#endif +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.h b/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.h new file mode 100644 index 00000000..0154fdf7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/PathHelper.h @@ -0,0 +1,19 @@ +// +// Created by frivas on 19/07/17. +// + +#ifndef SAMPLERGENERATOR_PATHHELPER_H +#define SAMPLERGENERATOR_PATHHELPER_H + +#include + + +class PathHelper { + public: + static std::string concatPaths(const std::string& p1, const std::string& p2); + static std::string getPathSeparator(); + +}; + + +#endif //SAMPLERGENERATOR_PATHHELPER_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Playback.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/Playback.cpp new file mode 100644 index 00000000..dc1999a7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Playback.cpp @@ -0,0 +1,121 @@ +#include "Playback.hpp" + +// Callback function that is triggered when someone uses the slidebar +void Playback::onSlide(int currentPos,void *frame){ + // updateFrame the frame to currentPos of slidebar + *(int *)(frame) = currentPos; +} + +// Constructor which is called if the frames count is known before hand +Playback::Playback(long long int framesCount):framesCount(framesCount),pause(false),speed(1),frameId(0),inferences(0),groundTruth(0){ + int *frame = &this->frameId; + cv::namedWindow("Detection",cv::WINDOW_NORMAL); + cv::namedWindow("GT on RGB",cv::WINDOW_NORMAL); + cv::createTrackbar("Frames", "Detection",frame,this->framesCount,&Playback::onSlide,frame); + cv::createTrackbar("Frames", "GT on RGB",frame,this->framesCount,&Playback::onSlide,frame); +} + +// Constructor if the framesCount is not known beforehand +Playback::Playback():framesCount(0),pause(false),speed(1),frameId(0),inferences(0),groundTruth(0){ + cv::namedWindow("Detection",cv::WINDOW_NORMAL); + cv::namedWindow("GT on RGB",cv::WINDOW_NORMAL); +} + +// Adds trackbar to the Detection and undetected window +void Playback::AddTrackbar(long long int framesCount){ + int *frame = &this->frameId; + this->framesCount = framesCount; + cv::createTrackbar("Frames", "Detection",frame,this->framesCount,&Playback::onSlide,frame); + cv::createTrackbar("Frames", "GT on RGB",frame,this->framesCount,&Playback::onSlide,frame); +} + +// Check if paused +bool Playback::IsPaused(){ + return this->pause; +} + +// Store the new frames into a vector , so that we can slide across them later +void Playback::GetInput(char keypressed , cv::Mat inference, cv::Mat groundTruth){ + this->inferences.push_back(inference); + this->groundTruth.push_back(groundTruth); + // Upate the keystroke with the new stroke + this->keystroke = keypressed; + // Call process + Playback::process(); +} + +void Playback::GetInput(char keypressed ){ + this->keystroke = keypressed; + Playback::process(); +} + +// After updating the keystroke , perform the actions accordingly +void Playback::process(){ + switch (this->keystroke) { + // "Space" , "p" , "k" to pause the video + case ' ' : + case 'p' : + case 'k' : this->pause = !this->pause; + std::cout << "Keystroke : " << this->keystroke << std::endl; + break; + // '-' to reduce the video playback rate + case '-' : this->speed += 2; + std::cout << "Keystroke : " << this->keystroke << std::endl; + break; + // '-' to increase the video playback rate + case '+' : if(this->speed>2) + this->speed-=2; + std::cout << "Keystroke : " << this->keystroke << std::endl; + break; + default : break; + } + Playback::show(); +} + +void Playback::show(){ + // If not paused , output the frame + if(!this->pause){ + usleep(int(this->rate()*10000)); + cv::imshow("Detection",this->inferences[this->frameId]); + cv::imshow("GT on RGB",this->groundTruth[this->frameId]); + // Update the frameID + this->frameId++; + cv::setTrackbarPos("Frames","Detection",this->frameId); + cv::setTrackbarPos("Frames","GT on RGB",this->frameId); + } + // Else wait + else + Playback::WaitTillResume(); +} + +// Below function takes care once the video ends +void Playback::completeShow(){ + while(this->frameId!=this->inferences.size()) + Playback::GetInput(cv::waitKey(1)); +} + +// Current playback rate +double Playback::rate(){ + return this->speed; +} + +// Wait till the video is resumed +void Playback::WaitTillResume(){ + while(this->pause) + Playback::GetInput(cv::waitKey(0)); +} + +// Update the frame +void Playback::updateFrame(int frameId){ + this->frameId = frameId; +} + +// Return current frameID +int Playback::currentFrame(){ + return this->frameId; +} + +// Updates both the frameID and the image at that ID +void Playback::updateFrame(int FrameId ,cv::Mat *image){ + this->inferences.at(FrameId) = *image; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/Playback.hpp b/DetectionMetrics/DetectionMetricsLib/Utils/Playback.hpp new file mode 100644 index 00000000..a7d30d01 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/Playback.hpp @@ -0,0 +1,35 @@ +#ifndef PLAYBACK_RATES +#define PLAYBACK_RATES +// Code for controlling playback functionalities +#include +#include +#include +#include + +class Playback { +public: + Playback(long long int framesCount); + Playback(); + void GetInput(char keypressed , cv::Mat inference, cv::Mat groundTruth); + void GetInput(char keypressed); + void AddTrackbar(long long int framesCount); + bool IsPaused(); + void WaitTillResume(); + void process(); + double rate(); + void show(); + void completeShow(); + void updateFrame(int frameId); + void updateFrame(int FrameId, cv::Mat *image); + int currentFrame(); + static void onSlide(int currentPos,void *frame); +private: + bool pause; + double speed; + char keystroke; + std::vector inferences; + std::vector groundTruth; + int frameId; + long long int framesCount; +}; +#endif diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.cpp new file mode 100644 index 00000000..0cc2b3d9 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.cpp @@ -0,0 +1,144 @@ +// +// Created by frivas on 4/02/17. +// + +// This is the main parent class of all the child like delpoyer,evaluator. +#include "SampleGenerationApp.h" +#include + +#include +#include +#include + +namespace +{ + const size_t ERROR_IN_COMMAND_LINE = 1; + const size_t SUCCESS = 0; + const size_t ERROR_UNHANDLED_EXCEPTION = 2; + +} // namespace + +// Constructor +SampleGenerationApp::SampleGenerationApp(int argc, char **argv):argc(argc),argv(argv) { + // QApplication a(argc,argv); + // this->a = new QApplication(argc,argv); + // Check if command line arguments are passed or not , if not passed return error + if (parse_arguments(argc,argv,configFilePath) != SUCCESS){ + exit(1); + } + // This loads the config file present at configFilePath which is passed + config = jderobotconfig::loader::load(configFilePath); + this->path= new std::string(); +} + +// Constructor which is called if a node itself is directly passed instead of +// configFilePath +SampleGenerationApp::SampleGenerationApp(YAML::Node node){ + config = jderobotconfig::loader::load(node); +} + +// If a filepath is passed , it is loaded +SampleGenerationApp::SampleGenerationApp(std::string filepath, bool isPath){ + config = jderobotconfig::loader::load(filepath,true); +} + +int SampleGenerationApp::parse_arguments(const int argc, char* argv[], std::string& configFile){ + for (google::LogSeverity s = google::WARNING; s < google::NUM_SEVERITIES; s++) + google::SetLogDestination(s, ""); + google::SetLogDestination(google::INFO, "log.log"); + FLAGS_alsologtostderr = 1; + fLI::FLAGS_max_log_size=10; + + try + { + /** Define and parse the program options + */ + namespace po = boost::program_options; + po::options_description desc("Options"); + desc.add_options() + ("help", "Print help messages") + ("configFile,c", po::value(&configFile)->required()); + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, desc), + vm); // can throw + + /** --help option + */ + if ( vm.count("help") ) + { + LOG(INFO) << "Basic Command Line Parameter App" << std::endl + << desc << std::endl; + return SUCCESS; + } + + po::notify(vm); // throws on error, so do after help in case + // there are any problems + } + catch(po::error& e) + { + LOG(ERROR) << "ERROR: " << e.what() << std::endl << std::endl; + LOG(ERROR) << desc << std::endl; + return ERROR_IN_COMMAND_LINE; + } + + // application code here // + + } + catch(std::exception& e) + { + LOG(ERROR) << "Unhandled Exception reached the top of main: " + << e.what() << ", application will now exit" << std::endl; + return ERROR_UNHANDLED_EXCEPTION; + + } + return SUCCESS; + +} + +// If all the requirements are satisfied this process further. +void SampleGenerationApp::process() { + if (verifyRequirements()) + (*this)(); +} + +// Check if all the required Parameters like evalpath,classnamesfile, weigths etc are present or not +bool SampleGenerationApp::verifyRequirements() { + bool success=true; + this->config.showConfig(); + std::string msg; + // We loop through the requiredArguments vector and check if every one of + // them is present in the loaded configFile. + for (auto it = this->requiredArguments.begin(), end =this->requiredArguments.end(); it != end; ++it){ + if (!this->config.keyExists(*it)){ + LOG(WARNING)<< "Key: " + *it + " is missing somewhere in the cofiguration file"; + // If certain Parameter is not present , a GUI is popped up to select + // that parameter + QApplication arm(this->argc,this->argv); + pop_up win; + win.SetPath(this->path); + win.SetName(*it); + win.show(); + arm.exec(); + // After selecting the property it is added to the config object + this->config.SetProperty(*it,*(this->path)); + success=false; + continue; + } + + } + // If not success , verify requirements again. + if(!success) + success=SampleGenerationApp::verifyRequirements(); + return success; +} + +// Return the config parameter +Config::Properties SampleGenerationApp::getConfig() { + return config; +} + +// Destructor function +SampleGenerationApp::~SampleGenerationApp(){ +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.h b/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.h new file mode 100644 index 00000000..3cc1a194 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/SampleGenerationApp.h @@ -0,0 +1,36 @@ +// +// Created by frivas on 4/02/17. +// + +#ifndef SAMPLERGENERATOR_SAMPLEGENERATIOAPP_H +#define SAMPLERGENERATOR_SAMPLEGENERATIOAPP_H + +#include +#include "pop_up.h" + +class SampleGenerationApp { +public: + SampleGenerationApp(int argc, char* argv[]); + SampleGenerationApp(YAML::Node node); + SampleGenerationApp(std::string filepath,bool isPath); + ~SampleGenerationApp(); + virtual void operator()() =0; + void process(); + Config::Properties getConfig(); + + +protected: + Config::Properties config; + std::string configFilePath; + std::vector requiredArguments; + + bool verifyRequirements(); + int parse_arguments(const int argc, char* argv[], std::string& configFile); + int argc; + char** argv; +private: + std::string *path; +}; + + +#endif //SAMPLERGENERATOR_SAMPLEGENERATIOAPP_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.cpp new file mode 100644 index 00000000..023381b4 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.cpp @@ -0,0 +1,140 @@ +// +// Created by frivas on 7/02/17. +// + +#include "StatsUtils.h" +#include +double StatsUtils::getIOU(const cv::Rect_ >, const cv::Rect_ &detection, bool isCrowd) { + //compute iou + + double xA = std::max(gt.x, detection.x); + double yA = std::max(gt.y, detection.y); + double xB = std::min(gt.x + gt.width, detection.x + detection.width); + double yB = std::min(gt.y + gt.height, detection.y + detection.height); + + // computer area of intersection + double interArea = ((xB - xA) > 0 ? (xB - xA) : 0 ) * ((yB - yA) > 0 ? (yB - yA) : 0); + + // compute the area of both the prediction and ground-truth + // rectangles + double boxAArea = (gt.width) * (gt.height); + double boxBArea = (detection.width) * (detection.height); + + //compute the intersection over union by taking the intersection + //area and dividing it by the sum of prediction + ground-truth + //areas - the interesection area + double iou; + if (isCrowd) { + iou = interArea / (boxBArea); + } else { + iou = interArea / (boxAArea + boxBArea - interArea); + } + + + //std::cout << gt.x << " " << gt.y << " " << gt.width << " " << gt.height << '\n'; + //std::cout << detection.x << " " << detection.y << " " << detection.width << " " << detection.height << '\n'; + //std::cout << imageSize << '\n'; + + return iou; + + /*cv::Mat maskGT(imageSize, CV_8UC1, cv::Scalar(0)); + cv::Mat maskDetection(imageSize, CV_8UC1, cv::Scalar(0)); + + cv::rectangle(maskGT, gt, cv::Scalar(255), -1); + cv::rectangle(maskDetection, detection, cv::Scalar(255), -1); + + cv::Mat unionMask(imageSize, CV_8UC1, cv::Scalar(0)); + cv::rectangle(unionMask, gt, cv::Scalar(150), -1); + cv::rectangle(unionMask, detection, cv::Scalar(255), -1); + + cv::Mat interSection = maskGT & maskDetection; + + int interSectionArea = cv::countNonZero(interSection); + int unionArea = cv::countNonZero(unionMask); + double iouValue = double(interSectionArea) / double(unionArea); + return iouValue;*/ +} + +void StatsUtils::computeIOUMatrix(Sample gt, Sample detection, Eval::EvalMatrix& evalmatrix, bool isIouTypeBbox) { + + if (!evalmatrix.empty()) { + throw std::runtime_error("EvalMatrix with sample ID isn't empty, Data might be duplicated while Evaluation"); + } + + + if (isIouTypeBbox) { + + auto detectionRegions = detection.getRectRegions()->getRegions(); + auto gtRegions = gt.getRectRegions()->getRegions(); + + // Sorting RectRegions by confidence_score for same classID only + // So, first of all it is necessary to segregate out RectRegions with + // different classIds + + + for (auto itDetection = detectionRegions.begin(); itDetection != detectionRegions.end(); ++itDetection) { + + std::string classID = itDetection->classID; + std::vector detectionIOUclassRow; + + for(auto itgt = gtRegions.begin(); itgt != gtRegions.end(); itgt++) { + + if (itgt->classID != classID) { + continue; + } + + double iouValue; + //std::cout << itDetection->classID << " " << itDetection->confidence_score <<'\n'; + iouValue = StatsUtils::getIOU(itgt->region, itDetection->region, itgt->isCrowd); + //std::cout << "Bbox Gt: " << itgt->region.x << " " << itgt->region.y << " " << itgt->region.width << " " << itgt->region.height << '\n'; + //std::cout << "Bbox Dt: " << itDetection->region.x << " " << itDetection->region.y << " " << itDetection->region.width << " " << itDetection->region.height << '\n'; + //std::cout << iouValue << '\n'; + detectionIOUclassRow.push_back(iouValue); + + } + + evalmatrix[classID].push_back(detectionIOUclassRow); + + } + + } else { + + LOG(INFO) << "For Seg regions" << '\n'; + + auto detectionRegions = detection.getRleRegions()->getRegions(); + auto gtRegions = gt.getRleRegions()->getRegions(); + + int m = gtRegions.size(); + int n = detectionRegions.size(); + + for (auto itDetection = detectionRegions.begin(); itDetection != detectionRegions.end(); ++itDetection) { + + std::string classID = itDetection->classID; + std::vector detectionIOUclassRow; + + for(auto itgt = gtRegions.begin(); itgt != gtRegions.end(); itgt++) { + + if (itgt->classID != classID) { + continue; + } + + double iouValue; + //std::cout << itDetection->classID << " " << itDetection->confidence_score <<'\n'; + //iouValue = StatsUtils::getIOU(itgt->region, itDetection->region, itgt->isCrowd); + unsigned char isCrowd = itgt->isCrowd ? 1 : 0; + rleIou(&(itDetection->region), &(itgt->region), 1, 1, &isCrowd, &iouValue); + //std::cout << "Bbox Gt: " << itgt->region.x << " " << itgt->region.y << " " << itgt->region.width << " " << itgt->region.height << '\n'; + //std::cout << "Bbox Dt: " << itDetection->region.x << " " << itDetection->region.y << " " << itDetection->region.width << " " << itDetection->region.height << '\n'; + //std::cout << iouValue << '\n'; + detectionIOUclassRow.push_back(iouValue); + + } + + evalmatrix[classID].push_back(detectionIOUclassRow); + + } + + } + + +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.h b/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.h new file mode 100644 index 00000000..02735877 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/StatsUtils.h @@ -0,0 +1,19 @@ +// +// Created by frivas on 7/02/17. +// + +#ifndef SAMPLERGENERATOR_STATSUTILS_H +#define SAMPLERGENERATOR_STATSUTILS_H + +#include +#include +#include + +class StatsUtils { +public: + static double getIOU(const cv::Rect_ >, const cv::Rect_ &detection, bool isCrowd); + static void computeIOUMatrix(Sample gt, Sample detection, Eval::EvalMatrix& evalmatrix, bool isIouTypeBbox); +}; + + +#endif //SAMPLERGENERATOR_STATSUTILS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/StringHandler.h b/DetectionMetrics/DetectionMetricsLib/Utils/StringHandler.h new file mode 100644 index 00000000..5b0927af --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/StringHandler.h @@ -0,0 +1,35 @@ +// +// Created by frivas on 30/01/17. +// + +#ifndef SAMPLERGENERATOR_STRINGHANDLER_H +#define SAMPLERGENERATOR_STRINGHANDLER_H + + +#include +#include +#include + + +class StringHandler { +private: + template + static void split(const std::string &s, char delim, Out result) { + std::stringstream ss; + ss.str(s); + std::string item; + while (std::getline(ss, item, delim)) { + *(result++) = item; + } + } + +public: + static std::vector split(const std::string &s, char delim) { + std::vector elems; + split(s, delim, std::back_inserter(elems)); + return elems; + } +}; + + +#endif //SAMPLERGENERATOR_STRINGHANDLER_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/addclass.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.cpp new file mode 100644 index 00000000..ed1d0147 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.cpp @@ -0,0 +1,52 @@ +#include "addclass.h" +#include "ui_addclass.h" + +// Classic constructor function , where all the buttons are given some action to +// check for , then the corresponding callback functions are called. +AddClass::AddClass(QWidget *parent) : QMainWindow(parent),ui(new Ui::AddClass){ + ui->setupUi(this); + connect(ui->pushButton_ok , SIGNAL (clicked()),this, SLOT (HandlePushButton_ok())); + connect(ui->pushButton_cancel, SIGNAL (clicked()),this, SLOT (HandlePushButton_cancel())); + connect(ui->checkBox, SIGNAL (clicked()),this, SLOT (HandleCheckbox())); + AddClass::HandleCheckbox(); +} + +// Destructor +AddClass::~AddClass(){ + delete ui; +} + +// Don't exit until all the required parameters are +void AddClass::HandlePushButton_ok(){ + *(this->name_f) = ui->checkBox->isChecked() ? ui->comboBox->currentText().toUtf8().constData() : + ui->lineEdit->text().toUtf8().constData() ; + *(this->probability) = ui->probability->text().toDouble(); + if(!this->name_f->length() || !ui->probability->text().length()) + return; + delete this; +} + +// Delete this Q_OBJECT +void AddClass::HandlePushButton_cancel(){ + delete this; +} + +// load the classes from the classNames files and other parameters like probability and final names +void AddClass::SetInit(std::vector*classNames,std::string *name_f,double *probability){ + for(unsigned int i=0;isize();i++) + ui->comboBox->addItem(QString::fromStdString(classNames->at(i))); + this->name_f=name_f; + this->probability = probability; +} + +// Wait untill the user finish interacting with user +void AddClass::wait(){ + QEventLoop loop; + connect(this, SIGNAL(destroyed()), &loop, SLOT(quit())); + loop.exec(); +} + +void AddClass::HandleCheckbox(){ + ui->comboBox->setDisabled(!ui->checkBox->isChecked()); + ui->lineEdit->setDisabled(ui->checkBox->isChecked()); +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/addclass.h b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.h new file mode 100644 index 00000000..ad7c8163 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.h @@ -0,0 +1,35 @@ +#ifndef ADDCLASS_H +#define ADDCLASS_H + +#include +#include +#include + +namespace Ui { +class AddClass; +} + +class AddClass : public QMainWindow +{ + Q_OBJECT + +public: + explicit AddClass(QWidget *parent = 0); + ~AddClass(); + void SetInit(std::vector *classNames,std::string *name_f,double *probability); + void wait(); + +private slots: + void HandlePushButton_ok(); + void HandlePushButton_cancel(); + void HandleCheckbox(); + +private: + Ui::AddClass *ui; + // Final name + std::string *name_f; + // Final probability + double *probability; +}; + +#endif // ADDCLASS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/addclass.ui b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.ui new file mode 100644 index 00000000..85ab5c8c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/addclass.ui @@ -0,0 +1,146 @@ + + + AddClass + + + + 0 + 0 + 400 + 300 + + + + AddClass + + + + + + 60 + 90 + 261 + 31 + + + + + + + Select from the list : + + + + + + + + + + + + 50 + 10 + 291 + 29 + + + + + + + Enter the class name : + + + + + + + + + + + + 190 + 210 + 206 + 29 + + + + + + + Ok + + + + + + + Cancel + + + + + + + + + 90 + 50 + 201 + 22 + + + + choose from class names + + + + + + 50 + 150 + 291 + 29 + + + + + + + Enter Class Probability : + + + + + + + + + + + + + 0 + 0 + 400 + 25 + + + + + + TopToolBarArea + + + false + + + + + + + + diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.cpp new file mode 100644 index 00000000..b8f355f6 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.cpp @@ -0,0 +1,47 @@ +#include "pop_up.h" +#include "ui_pop_up.h" +#include +#include + +// Classic constructor function , where all the buttons are given some action to +// check for , then the corresponding callback functions are called. +pop_up::pop_up(QWidget *parent) : QMainWindow(parent), ui(new Ui::pop_up){ + ui->setupUi(this); + connect(ui->toolButton_1, SIGNAL (clicked()),this, SLOT (HandleToolButton_1())); + connect(ui->pushButton_ok, SIGNAL (clicked()),this, SLOT (HandlePushButton_ok())); +} + +// Destructor +pop_up::~pop_up() +{ + delete ui; +} + +// Selecte the missing file +void pop_up::HandleToolButton_1(){ + QString dir_name = QFileDialog::getExistingDirectory(this,"Select config file","~/"); + ui->lineEdit->setText(dir_name); + *(this->path) = dir_name.toUtf8().constData(); +} + +// If selected proceed further else return warning +void pop_up::HandlePushButton_ok(){ + if(!ui->lineEdit->text().size()){ + QMessageBox::warning(this,"Warning","Please provide " + this->name + + " parameter to continue"); + return; + } + QApplication::quit(); + QCoreApplication::quit(); +} + +void pop_up::SetName(std::string Name){ + // Convert from string to QString + this->name = QString::fromStdString(Name); + ui->label->setText(this->name); +} + +// Set the path member variable to the selected path +void pop_up::SetPath(std::string *path){ + this->path = path; +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.h b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.h new file mode 100644 index 00000000..f8d8c679 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.h @@ -0,0 +1,36 @@ +#ifndef POP_UP_H +#define POP_UP_H + +// This is triggered if any of the required parameters are missing from the config file + +#include +#include "QMainWindow" + +#include +#include + +namespace Ui { +class pop_up; +} + +class pop_up : public QMainWindow +{ + Q_OBJECT + +public: + explicit pop_up(QWidget *parent = 0); + void SetName(std::string Name); + void SetPath(std::string *path); + ~pop_up(); +private slots: + void HandleToolButton_1(); + void HandlePushButton_ok(); + +private: + Ui::pop_up *ui; + YAML::Node node; + QString name; + std::string *path; +}; + +#endif // POP_UP_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.ui b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.ui new file mode 100644 index 00000000..d44b9cce --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/pop_up.ui @@ -0,0 +1,93 @@ + + + pop_up + + + + 0 + 0 + 397 + 253 + + + + pop_up + + + + + + 50 + 70 + 253 + 31 + + + + + + + TextLabel + + + + + + + + + + + + ... + + + + + + + + + + + 230 + 150 + 121 + 29 + + + + + + + OK + + + + + + + + + + 0 + 0 + 397 + 25 + + + + + + TopToolBarArea + + + false + + + + + + + + diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/setclass.cpp b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.cpp new file mode 100644 index 00000000..c53ee3a5 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.cpp @@ -0,0 +1,43 @@ +#include "setclass.h" +#include "ui_setclass.h" + +// Classic constructor function , where all the buttons are given some action to +// check for , then the corresponding callback functions are called. +SetClass::SetClass(QWidget *parent) : QMainWindow(parent),ui(new Ui::SetClass){ + ui->setupUi(this); + connect(ui->pushButton_ok , SIGNAL (clicked()),this, SLOT (HandlePushButton_ok())); + connect(ui->pushButton_cancel, SIGNAL (clicked()),this, SLOT (HandlePushButton_cancel())); +} + +// Destructor +SetClass::~SetClass(){ + delete ui; +} + +// Set the selected class name and delete "this" Q_OBJECT +void SetClass::HandlePushButton_ok(){ + *(this->name_f)= ui->comboBox->currentText().toUtf8().constData(); + delete this; +} + +// Delete this Q_OBJECT +void SetClass::HandlePushButton_cancel(){ + delete this; +} + +// Set the current class name of the file which will be changed later by the user. +void SetClass::SetInit(std::string *str , std::vector*classNames,std::string *name_f){ + ui->lineEdit->setText(QString::fromStdString(*str)); + // Loop through all the avialable classes present in the classnames file + for(unsigned int i=0;isize();i++) + ui->comboBox->addItem(QString::fromStdString(classNames->at(i))); + + this->name_f=name_f; +} + +// Wait until the user selects a class, i.e stop everything else , including DetectionsSuite +void SetClass::wait(){ + QEventLoop loop; + connect(this, SIGNAL(destroyed()), &loop, SLOT(quit())); + loop.exec(); +} diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/setclass.h b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.h new file mode 100644 index 00000000..439f1306 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.h @@ -0,0 +1,30 @@ +#ifndef SETCLASS_H +#define SETCLASS_H +// This is used to change the class names of the wrongly detected classes. +#include +#include +#include + +namespace Ui { +class SetClass; +} + +class SetClass : public QMainWindow{ + Q_OBJECT + +public: + explicit SetClass(QWidget *parent = 0); + ~SetClass(); + void SetInit(std::string *name, std::vector *classNames,std::string *name_f); + void wait(); + +private slots: + void HandlePushButton_ok(); + void HandlePushButton_cancel(); + +private: + Ui::SetClass *ui; + std::string *name_f; +}; + +#endif // SETCLASS_H diff --git a/DetectionMetrics/DetectionMetricsLib/Utils/setclass.ui b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.ui new file mode 100644 index 00000000..272b0c2d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/Utils/setclass.ui @@ -0,0 +1,92 @@ + + + SetClass + + + + 0 + 0 + 400 + 300 + + + + SetClass + + + + + + 10 + 70 + 383 + 29 + + + + + + + + + + will be converted to + + + + + + + + + + + + 170 + 180 + 178 + 29 + + + + + + + OK + + + + + + + CANCEL + + + + + + + + + + 0 + 0 + 400 + 25 + + + + + + TopToolBarArea + + + false + + + + + + + + diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_detect.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_detect.py new file mode 100644 index 00000000..a52ec839 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_detect.py @@ -0,0 +1,61 @@ +import numpy as np +import sys +import time +import cv2 + +from PIL import Image + +from keras import backend as K +from keras.models import load_model +from keras.preprocessing import image + +from keras_utils.keras_ssd_loss import SSDLoss +from keras_utils.keras_layer_AnchorBoxes import AnchorBoxes +from keras_utils.keras_layer_DecodeDetections import DecodeDetections +from keras_utils.keras_layer_L2Normalization import L2Normalization + + +class KerasDetector: + + def __init__(self, path_to_hdf5): + ssd_loss = SSDLoss(neg_pos_ratio=3, n_neg_min=0, alpha=1.0) + + K.clear_session() # Clear previous models from memory. + + self.model = load_model(path_to_hdf5, custom_objects={'AnchorBoxes': AnchorBoxes, + 'L2Normalization': L2Normalization, + 'DecodeDetections': DecodeDetections, + 'compute_loss': ssd_loss.compute_loss}) + + input_size = self.model.input.shape.as_list() + self.img_height = input_size[1] + self.img_width = input_size[2] + print(self.img_width, self.img_height) + + def detect(self, img, threshold): + print("Starting inference") + input_images = [] + + as_image = Image.fromarray(img) + resized = as_image.resize((self.img_width, self.img_height), Image.NEAREST) + + img_r = image.img_to_array(resized) + input_images.append(img_r) + input_images = np.array(input_images) + + start_time = time.time() + + y_pred = self.model.predict(input_images) + + y_pred_thresh = [y_pred[k][y_pred[k, :, 1] >= threshold] for k in range(y_pred.shape[0])] + + y_thresh_array = np.array(y_pred_thresh[0]) + + y_thresh_array[:, 2] /= self.img_width + y_thresh_array[:, 3] /= self.img_height + y_thresh_array[:, 4] /= self.img_width + y_thresh_array[:, 5] /= self.img_height + + print("Inference Time: " + str(time.time() - start_time) + " seconds") + + return y_thresh_array diff --git a/detectionmetrics/__init__.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/__init__.py similarity index 100% rename from detectionmetrics/__init__.py rename to DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/__init__.py diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/bounding_box_utils.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/bounding_box_utils.py new file mode 100644 index 00000000..a4f23a66 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/bounding_box_utils.py @@ -0,0 +1,356 @@ +''' +Includes: +* Function to compute the IoU similarity for axis-aligned, rectangular, 2D bounding boxes +* Function for coordinate conversion for axis-aligned, rectangular, 2D bounding boxes + +Copyright (C) 2017 Pierluigi Ferrari + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +''' + +from __future__ import division +import numpy as np + + +def convert_coordinates(tensor, start_index, conversion): + ''' + Convert coordinates for axis-aligned 2D boxes between two coordinate formats. + + Creates a copy of `tensor`, i.e. does not operate in place. Currently there are + three supported coordinate formats that can be converted from and to each other: + 1) (xmin, xmax, ymin, ymax) - the 'minmax' format + 2) (xmin, ymin, xmax, ymax) - the 'corners' format + 2) (cx, cy, w, h) - the 'centroids' format + + Arguments: + tensor (array): A Numpy nD array containing the four consecutive coordinates + to be converted somewhere in the last axis. + start_index (int): The index of the first coordinate in the last axis of `tensor`. + conversion (str, optional): The conversion direction. Can be 'minmax2centroids', + 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', + or 'corners2minmax'. + + Returns: + A Numpy nD array, a copy of the input tensor with the converted coordinates + in place of the original coordinates and the unaltered elements of the original + tensor elsewhere. + ''' + ind = start_index + tensor1 = np.copy(tensor).astype(np.float) + if conversion == 'minmax2centroids': + tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind + 1]) / 2.0 # Set cx + tensor1[..., ind + 1] = (tensor[..., ind + 2] + tensor[..., ind + 3]) / 2.0 # Set cy + tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind] # Set w + tensor1[..., ind + 3] = tensor[..., ind + 3] - tensor[..., ind + 2] # Set h + elif conversion == 'centroids2minmax': + tensor1[..., ind] = tensor[..., ind] - tensor[..., ind + 2] / 2.0 # Set xmin + tensor1[..., ind + 1] = tensor[..., ind] + tensor[..., ind + 2] / 2.0 # Set xmax + tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind + 3] / 2.0 # Set ymin + tensor1[..., ind + 3] = tensor[..., ind + 1] + tensor[..., ind + 3] / 2.0 # Set ymax + elif conversion == 'corners2centroids': + tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind + 2]) / 2.0 # Set cx + tensor1[..., ind + 1] = (tensor[..., ind + 1] + tensor[..., ind + 3]) / 2.0 # Set cy + tensor1[..., ind + 2] = tensor[..., ind + 2] - tensor[..., ind] # Set w + tensor1[..., ind + 3] = tensor[..., ind + 3] - tensor[..., ind + 1] # Set h + elif conversion == 'centroids2corners': + tensor1[..., ind] = tensor[..., ind] - tensor[..., ind + 2] / 2.0 # Set xmin + tensor1[..., ind + 1] = tensor[..., ind + 1] - tensor[..., ind + 3] / 2.0 # Set ymin + tensor1[..., ind + 2] = tensor[..., ind] + tensor[..., ind + 2] / 2.0 # Set xmax + tensor1[..., ind + 3] = tensor[..., ind + 1] + tensor[..., ind + 3] / 2.0 # Set ymax + elif (conversion == 'minmax2corners') or (conversion == 'corners2minmax'): + tensor1[..., ind + 1] = tensor[..., ind + 2] + tensor1[..., ind + 2] = tensor[..., ind + 1] + else: + raise ValueError( + "Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.") + + return tensor1 + + +def convert_coordinates2(tensor, start_index, conversion): + ''' + A matrix multiplication implementation of `convert_coordinates()`. + Supports only conversion between the 'centroids' and 'minmax' formats. + + This function is marginally slower on average than `convert_coordinates()`, + probably because it involves more (unnecessary) arithmetic operations (unnecessary + because the two matrices are sparse). + + For details please refer to the documentation of `convert_coordinates()`. + ''' + ind = start_index + tensor1 = np.copy(tensor).astype(np.float) + if conversion == 'minmax2centroids': + M = np.array([[0.5, 0., -1., 0.], + [0.5, 0., 1., 0.], + [0., 0.5, 0., -1.], + [0., 0.5, 0., 1.]]) + tensor1[..., ind:ind + 4] = np.dot(tensor1[..., ind:ind + 4], M) + elif conversion == 'centroids2minmax': + M = np.array([[1., 1., 0., 0.], + [0., 0., 1., 1.], + [-0.5, 0.5, 0., 0.], + [0., 0., -0.5, 0.5]]) # The multiplicative inverse of the matrix above + tensor1[..., ind:ind + 4] = np.dot(tensor1[..., ind:ind + 4], M) + else: + raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.") + + return tensor1 + + +def intersection_area(boxes1, boxes2, coords='centroids', mode='outer_product'): + ''' + Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes. + + Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively. + + In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible + combinations of the boxes in `boxes1` and `boxes2`. + + In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation + of the `mode` argument for details. + + Arguments: + boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the + format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes. + If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`. + boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the + format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes. + If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`. + coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format + `(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format + `(xmin, ymin, xmax, ymax)`. + mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an + `(m,n)` matrix with the intersection areas for all possible combinations of the `m` boxes in `boxes1` with the + `n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2` + must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of + length `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`. + + Returns: + A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with + the intersection areas of the boxes in `boxes1` and `boxes2`. + ''' + + # Make sure the boxes have the right shapes. + if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim)) + if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim)) + + if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0) + if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0) + + if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError( + "All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format( + boxes1.shape[1], boxes2.shape[1])) + if not mode in {'outer_product', 'element-wise'}: raise ValueError( + "`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.", format(mode)) + + # Convert the coordinates if necessary. + if coords == 'centroids': + boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners') + boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners') + coords = 'corners' + elif not (coords in {'minmax', 'corners'}): + raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.") + + m = boxes1.shape[0] # The number of boxes in `boxes1` + n = boxes2.shape[0] # The number of boxes in `boxes2` + + # Set the correct coordinate indices for the respective formats. + if coords == 'corners': + xmin = 0 + ymin = 1 + xmax = 2 + ymax = 3 + elif coords == 'minmax': + xmin = 0 + xmax = 1 + ymin = 2 + ymax = 3 + + # Compute the intersection areas. + + if mode == 'outer_product': + + # For all possible box combinations, get the greater xmin and ymin values. + # This is a tensor of shape (m,n,2). + min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]], axis=1), reps=(1, n, 1)), + np.tile(np.expand_dims(boxes2[:, [xmin, ymin]], axis=0), reps=(m, 1, 1))) + + # For all possible box combinations, get the smaller xmax and ymax values. + # This is a tensor of shape (m,n,2). + max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]], axis=1), reps=(1, n, 1)), + np.tile(np.expand_dims(boxes2[:, [xmax, ymax]], axis=0), reps=(m, 1, 1))) + + # Compute the side lengths of the intersection rectangles. + side_lengths = np.maximum(0, max_xy - min_xy) + + return side_lengths[:, :, 0] * side_lengths[:, :, 1] + + elif mode == 'element-wise': + + min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]]) + max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]]) + + # Compute the side lengths of the intersection rectangles. + side_lengths = np.maximum(0, max_xy - min_xy) + + return side_lengths[:, 0] * side_lengths[:, 1] + + +def intersection_area_(boxes1, boxes2, coords='corners', mode='outer_product'): + ''' + The same as 'intersection_area()' but for internal use, i.e. without all the safety checks. + ''' + + m = boxes1.shape[0] # The number of boxes in `boxes1` + n = boxes2.shape[0] # The number of boxes in `boxes2` + + # Set the correct coordinate indices for the respective formats. + if coords == 'corners': + xmin = 0 + ymin = 1 + xmax = 2 + ymax = 3 + elif coords == 'minmax': + xmin = 0 + xmax = 1 + ymin = 2 + ymax = 3 + + # Compute the intersection areas. + + if mode == 'outer_product': + + # For all possible box combinations, get the greater xmin and ymin values. + # This is a tensor of shape (m,n,2). + min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]], axis=1), reps=(1, n, 1)), + np.tile(np.expand_dims(boxes2[:, [xmin, ymin]], axis=0), reps=(m, 1, 1))) + + # For all possible box combinations, get the smaller xmax and ymax values. + # This is a tensor of shape (m,n,2). + max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]], axis=1), reps=(1, n, 1)), + np.tile(np.expand_dims(boxes2[:, [xmax, ymax]], axis=0), reps=(m, 1, 1))) + + # Compute the side lengths of the intersection rectangles. + side_lengths = np.maximum(0, max_xy - min_xy) + + return side_lengths[:, :, 0] * side_lengths[:, :, 1] + + elif mode == 'element-wise': + + min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]]) + max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]]) + + # Compute the side lengths of the intersection rectangles. + side_lengths = np.maximum(0, max_xy - min_xy) + + return side_lengths[:, 0] * side_lengths[:, 1] + + +def iou(boxes1, boxes2, coords='centroids', mode='outer_product'): + ''' + Computes the intersection-over-union similarity (also known as Jaccard similarity) + of two sets of axis-aligned 2D rectangular boxes. + + Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively. + + In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible + combinations of the boxes in `boxes1` and `boxes2`. + + In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation + of the `mode` argument for details. + + Arguments: + boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the + format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes. + If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`. + boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the + format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes. + If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`. + coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format + `(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format + `(xmin, ymin, xmax, ymax)`. + mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an + `(m,n)` matrix with the IoU overlaps for all possible combinations of the `m` boxes in `boxes1` with the + `n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2` + must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of + length `m` where the i-th position contains the IoU overlap of `boxes1[i]` with `boxes2[i]`. + + Returns: + A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values in [0,1], + the Jaccard similarity of the boxes in `boxes1` and `boxes2`. 0 means there is no overlap between two given + boxes, 1 means their coordinates are identical. + ''' + + # Make sure the boxes have the right shapes. + if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim)) + if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim)) + + if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0) + if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0) + + if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError( + "All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format( + boxes1.shape[1], boxes2.shape[1])) + if not mode in {'outer_product', 'element-wise'}: raise ValueError( + "`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.".format(mode)) + + # Convert the coordinates if necessary. + if coords == 'centroids': + boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners') + boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners') + coords = 'corners' + elif not (coords in {'minmax', 'corners'}): + raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.") + + # Compute the IoU. + + # Compute the interesection areas. + + intersection_areas = intersection_area_(boxes1, boxes2, coords=coords, mode=mode) + + m = boxes1.shape[0] # The number of boxes in `boxes1` + n = boxes2.shape[0] # The number of boxes in `boxes2` + + # Compute the union areas. + + # Set the correct coordinate indices for the respective formats. + if coords == 'corners': + xmin = 0 + ymin = 1 + xmax = 2 + ymax = 3 + elif coords == 'minmax': + xmin = 0 + xmax = 1 + ymin = 2 + ymax = 3 + + if mode == 'outer_product': + + boxes1_areas = np.tile( + np.expand_dims((boxes1[:, xmax] - boxes1[:, xmin]) * (boxes1[:, ymax] - boxes1[:, ymin]), axis=1), + reps=(1, n)) + boxes2_areas = np.tile( + np.expand_dims((boxes2[:, xmax] - boxes2[:, xmin]) * (boxes2[:, ymax] - boxes2[:, ymin]), axis=0), + reps=(m, 1)) + + elif mode == 'element-wise': + + boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin]) * (boxes1[:, ymax] - boxes1[:, ymin]) + boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin]) * (boxes2[:, ymax] - boxes2[:, ymin]) + + union_areas = boxes1_areas + boxes2_areas - intersection_areas + + return intersection_areas / union_areas diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_AnchorBoxes.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_AnchorBoxes.py new file mode 100644 index 00000000..2863d2be --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_AnchorBoxes.py @@ -0,0 +1,287 @@ +''' +A custom Keras layer to generate anchor boxes. + +Copyright (C) 2017 Pierluigi Ferrari + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +''' + +from __future__ import division +import numpy as np +import keras.backend as K +from keras.engine.topology import InputSpec +from keras.engine.topology import Layer + +from bounding_box_utils import convert_coordinates + + +class AnchorBoxes(Layer): + ''' + A Keras layer to create an output tensor containing anchor box coordinates + and variances based on the input tensor and the passed arguments. + + A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of + the input tensor. The number of anchor boxes created per unit depends on the arguments + `aspect_ratios` and `two_boxes_for_ar1`, in the default case it is 4. The boxes + are parameterized by the coordinate tuple `(xmin, xmax, ymin, ymax)`. + + The logic implemented by this layer is identical to the logic in the module + `ssd_box_encode_decode_utils.py`. + + The purpose of having this layer in the network is to make the model self-sufficient + at inference time. Since the model is predicting offsets to the anchor boxes + (rather than predicting absolute box coordinates directly), one needs to know the anchor + box coordinates in order to construct the final prediction boxes from the predicted offsets. + If the model's output tensor did not contain the anchor box coordinates, the necessary + information to convert the predicted offsets back to absolute coordinates would be missing + in the model output. The reason why it is necessary to predict offsets to the anchor boxes + rather than to predict absolute box coordinates directly is explained in `README.md`. + + Input shape: + 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'` + or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. + + Output shape: + 5D tensor of shape `(batch, height, width, n_boxes, 8)`. The last axis contains + the four anchor box coordinates and the four variance values for each box. + ''' + + def __init__(self, + img_height, + img_width, + this_scale, + next_scale, + aspect_ratios=[0.5, 1.0, 2.0], + two_boxes_for_ar1=True, + this_steps=None, + this_offsets=None, + clip_boxes=False, + variances=[0.1, 0.1, 0.2, 0.2], + coords='centroids', + normalize_coords=False, + **kwargs): + ''' + All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is undefined. + Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class. + + Arguments: + img_height (int): The height of the input images. + img_width (int): The width of the input images. + this_scale (float): A float in [0, 1], the scaling factor for the size of the generated anchor boxes + as a fraction of the shorter side of the input image. + next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if + `self.two_boxes_for_ar1 == True`. + aspect_ratios (list, optional): The list of aspect ratios for which default boxes are to be + generated for this layer. + two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1. + If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated + using the scaling factor for the respective layer, the second one will be generated using + geometric mean of said scaling factor and next bigger scaling factor. + clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries. + variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by + its respective variance value. + coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the input format + of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height), + 'corners' for the format `(xmin, ymin, xmax, ymax)`, or 'minmax' for the format `(xmin, xmax, ymin, ymax)`. + normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates, + i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates. + ''' + if K.backend() != 'tensorflow': + raise TypeError( + "This layer only supports TensorFlow at the moment, but you are using the {} backend.".format( + K.backend())) + + if (this_scale < 0) or (next_scale < 0) or (this_scale > 1): + raise ValueError( + "`this_scale` must be in [0, 1] and `next_scale` must be >0, but `this_scale` == {}, `next_scale` == {}".format( + this_scale, next_scale)) + + if len(variances) != 4: + raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances))) + variances = np.array(variances) + if np.any(variances <= 0): + raise ValueError("All variances must be >0, but the variances given are {}".format(variances)) + + self.img_height = img_height + self.img_width = img_width + self.this_scale = this_scale + self.next_scale = next_scale + self.aspect_ratios = aspect_ratios + self.two_boxes_for_ar1 = two_boxes_for_ar1 + self.this_steps = this_steps + self.this_offsets = this_offsets + self.clip_boxes = clip_boxes + self.variances = variances + self.coords = coords + self.normalize_coords = normalize_coords + # Compute the number of boxes per cell + if (1 in aspect_ratios) and two_boxes_for_ar1: + self.n_boxes = len(aspect_ratios) + 1 + else: + self.n_boxes = len(aspect_ratios) + super(AnchorBoxes, self).__init__(**kwargs) + + def build(self, input_shape): + self.input_spec = [InputSpec(shape=input_shape)] + super(AnchorBoxes, self).build(input_shape) + + def call(self, x, mask=None): + ''' + Return an anchor box tensor based on the shape of the input tensor. + + The logic implemented here is identical to the logic in the module `ssd_box_encode_decode_utils.py`. + + Note that this tensor does not participate in any graph computations at runtime. It is being created + as a constant once during graph creation and is just being output along with the rest of the model output + during runtime. Because of this, all logic is implemented as Numpy array operations and it is sufficient + to convert the resulting Numpy array into a Keras tensor at the very end before outputting it. + + Arguments: + x (tensor): 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'` + or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. The input for this + layer must be the output of the localization predictor layer. + ''' + + # Compute box width and height for each aspect ratio + # The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`. + size = min(self.img_height, self.img_width) + # Compute the box widths and and heights for all aspect ratios + wh_list = [] + for ar in self.aspect_ratios: + if (ar == 1): + # Compute the regular anchor box for aspect ratio 1. + box_height = box_width = self.this_scale * size + wh_list.append((box_width, box_height)) + if self.two_boxes_for_ar1: + # Compute one slightly larger version using the geometric mean of this scale value and the next. + box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size + wh_list.append((box_width, box_height)) + else: + box_height = self.this_scale * size / np.sqrt(ar) + box_width = self.this_scale * size * np.sqrt(ar) + wh_list.append((box_width, box_height)) + wh_list = np.array(wh_list) + + # We need the shape of the input tensor + if K.image_dim_ordering() == 'tf': + batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape + else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future + batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape + + # Compute the grid of box center points. They are identical for all aspect ratios. + + # Compute the step sizes, i.e. how far apart the anchor box center points will be vertically and horizontally. + if (self.this_steps is None): + step_height = self.img_height / feature_map_height + step_width = self.img_width / feature_map_width + else: + if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2): + step_height = self.this_steps[0] + step_width = self.this_steps[1] + elif isinstance(self.this_steps, (int, float)): + step_height = self.this_steps + step_width = self.this_steps + # Compute the offsets, i.e. at what pixel values the first anchor box center point will be from the top and from the left of the image. + if (self.this_offsets is None): + offset_height = 0.5 + offset_width = 0.5 + else: + if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2): + offset_height = self.this_offsets[0] + offset_width = self.this_offsets[1] + elif isinstance(self.this_offsets, (int, float)): + offset_height = self.this_offsets + offset_width = self.this_offsets + # Now that we have the offsets and step sizes, compute the grid of anchor box center points. + cy = np.linspace(offset_height * step_height, (offset_height + feature_map_height - 1) * step_height, + feature_map_height) + cx = np.linspace(offset_width * step_width, (offset_width + feature_map_width - 1) * step_width, + feature_map_width) + cx_grid, cy_grid = np.meshgrid(cx, cy) + cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down + cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down + + # Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)` + # where the last dimension will contain `(cx, cy, w, h)` + boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4)) + + boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx + boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy + boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w + boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h + + # Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)` + boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners') + + # If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries + if self.clip_boxes: + x_coords = boxes_tensor[:, :, :, [0, 2]] + x_coords[x_coords >= self.img_width] = self.img_width - 1 + x_coords[x_coords < 0] = 0 + boxes_tensor[:, :, :, [0, 2]] = x_coords + y_coords = boxes_tensor[:, :, :, [1, 3]] + y_coords[y_coords >= self.img_height] = self.img_height - 1 + y_coords[y_coords < 0] = 0 + boxes_tensor[:, :, :, [1, 3]] = y_coords + + # If `normalize_coords` is enabled, normalize the coordinates to be within [0,1] + if self.normalize_coords: + boxes_tensor[:, :, :, [0, 2]] /= self.img_width + boxes_tensor[:, :, :, [1, 3]] /= self.img_height + + # TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth. + if self.coords == 'centroids': + # Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`. + boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids') + elif self.coords == 'minmax': + # Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax). + boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax') + + # Create a tensor to contain the variances and append it to `boxes_tensor`. This tensor has the same shape + # as `boxes_tensor` and simply contains the same 4 variance values for every position in the last axis. + variances_tensor = np.zeros_like( + boxes_tensor) # Has shape `(feature_map_height, feature_map_width, n_boxes, 4)` + variances_tensor += self.variances # Long live broadcasting + # Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)` + boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1) + + # Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along + # The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)` + boxes_tensor = np.expand_dims(boxes_tensor, axis=0) + boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1)) + + return boxes_tensor + + def compute_output_shape(self, input_shape): + if K.image_dim_ordering() == 'tf': + batch_size, feature_map_height, feature_map_width, feature_map_channels = input_shape + else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future + batch_size, feature_map_channels, feature_map_height, feature_map_width = input_shape + return (batch_size, feature_map_height, feature_map_width, self.n_boxes, 8) + + def get_config(self): + config = { + 'img_height': self.img_height, + 'img_width': self.img_width, + 'this_scale': self.this_scale, + 'next_scale': self.next_scale, + 'aspect_ratios': list(self.aspect_ratios), + 'two_boxes_for_ar1': self.two_boxes_for_ar1, + 'clip_boxes': self.clip_boxes, + 'variances': list(self.variances), + 'coords': self.coords, + 'normalize_coords': self.normalize_coords + } + base_config = super(AnchorBoxes, self).get_config() + return dict(list(base_config.items()) + list(config.items())) diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_DecodeDetections.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_DecodeDetections.py new file mode 100644 index 00000000..69d53e45 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_DecodeDetections.py @@ -0,0 +1,295 @@ +''' +A custom Keras layer to decode the raw SSD prediction output. Corresponds to the +`DetectionOutput` layer type in the original Caffe implementation of SSD. + +Copyright (C) 2018 Pierluigi Ferrari + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +''' + +from __future__ import division +import numpy as np +import tensorflow as tf +import keras.backend as K +from keras.engine.topology import InputSpec +from keras.engine.topology import Layer + + +class DecodeDetections(Layer): + ''' + A Keras layer to decode the raw SSD prediction output. + + Input shape: + 3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`. + + Output shape: + 3D tensor of shape `(batch_size, top_k, 6)`. + ''' + + def __init__(self, + confidence_thresh=0.01, + iou_threshold=0.45, + top_k=200, + nms_max_output_size=400, + coords='centroids', + normalize_coords=True, + img_height=None, + img_width=None, + **kwargs): + ''' + All default argument values follow the Caffe implementation. + + Arguments: + confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific + positive class in order to be considered for the non-maximum suppression stage for the respective class. + A lower value will result in a larger part of the selection process being done by the non-maximum suppression + stage, while a larger value will result in a larger part of the selection process happening in the confidence + thresholding stage. + iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold` + with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers + to the box score. + top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the + non-maximum suppression stage. + nms_max_output_size (int, optional): The maximum number of predictions that will be left after performing non-maximum + suppression. + coords (str, optional): The box coordinate format that the model outputs. Must be 'centroids' + i.e. the format `(cx, cy, w, h)` (box center coordinates, width, and height). Other coordinate formats are + currently not supported. + normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1]) + and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs + relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`. + Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect + coordinates. Requires `img_height` and `img_width` if set to `True`. + img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`. + img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`. + ''' + if K.backend() != 'tensorflow': + raise TypeError( + "This layer only supports TensorFlow at the moment, but you are using the {} backend.".format( + K.backend())) + + if normalize_coords and ((img_height is None) or (img_width is None)): + raise ValueError( + "If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format( + img_height, img_width)) + + if coords != 'centroids': + raise ValueError("The DetectionOutput layer currently only supports the 'centroids' coordinate format.") + + # We need these members for the config. + self.confidence_thresh = confidence_thresh + self.iou_threshold = iou_threshold + self.top_k = top_k + self.normalize_coords = normalize_coords + self.img_height = img_height + self.img_width = img_width + self.coords = coords + self.nms_max_output_size = nms_max_output_size + + # We need these members for TensorFlow. + self.tf_confidence_thresh = tf.constant(self.confidence_thresh, name='confidence_thresh') + self.tf_iou_threshold = tf.constant(self.iou_threshold, name='iou_threshold') + self.tf_top_k = tf.constant(self.top_k, name='top_k') + self.tf_normalize_coords = tf.constant(self.normalize_coords, name='normalize_coords') + self.tf_img_height = tf.constant(self.img_height, dtype=tf.float32, name='img_height') + self.tf_img_width = tf.constant(self.img_width, dtype=tf.float32, name='img_width') + self.tf_nms_max_output_size = tf.constant(self.nms_max_output_size, name='nms_max_output_size') + + super(DecodeDetections, self).__init__(**kwargs) + + def build(self, input_shape): + self.input_spec = [InputSpec(shape=input_shape)] + super(DecodeDetections, self).build(input_shape) + + def call(self, y_pred, mask=None): + ''' + Returns: + 3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded + to always yield `top_k` predictions per batch item. The last axis contains + the coordinates for each predicted box in the format + `[class_id, confidence, xmin, ymin, xmax, ymax]`. + ''' + + ##################################################################################### + # 1. Convert the box coordinates from predicted anchor box offsets to predicted + # absolute coordinates + ##################################################################################### + + # Convert anchor box offsets to image offsets. + cx = y_pred[..., -12] * y_pred[..., -4] * y_pred[..., -6] + y_pred[ + ..., -8] # cx = cx_pred * cx_variance * w_anchor + cx_anchor + cy = y_pred[..., -11] * y_pred[..., -3] * y_pred[..., -5] + y_pred[ + ..., -7] # cy = cy_pred * cy_variance * h_anchor + cy_anchor + w = tf.exp(y_pred[..., -10] * y_pred[..., -2]) * y_pred[..., -6] # w = exp(w_pred * variance_w) * w_anchor + h = tf.exp(y_pred[..., -9] * y_pred[..., -1]) * y_pred[..., -5] # h = exp(h_pred * variance_h) * h_anchor + + # Convert 'centroids' to 'corners'. + xmin = cx - 0.5 * w + ymin = cy - 0.5 * h + xmax = cx + 0.5 * w + ymax = cy + 0.5 * h + + # If the model predicts box coordinates relative to the image dimensions and they are supposed + # to be converted back to absolute coordinates, do that. + def normalized_coords(): + xmin1 = tf.expand_dims(xmin * self.tf_img_width, axis=-1) + ymin1 = tf.expand_dims(ymin * self.tf_img_height, axis=-1) + xmax1 = tf.expand_dims(xmax * self.tf_img_width, axis=-1) + ymax1 = tf.expand_dims(ymax * self.tf_img_height, axis=-1) + return xmin1, ymin1, xmax1, ymax1 + + def non_normalized_coords(): + return tf.expand_dims(xmin, axis=-1), tf.expand_dims(ymin, axis=-1), tf.expand_dims(xmax, + axis=-1), tf.expand_dims( + ymax, axis=-1) + + xmin, ymin, xmax, ymax = tf.cond(self.tf_normalize_coords, normalized_coords, non_normalized_coords) + + # Concatenate the one-hot class confidences and the converted box coordinates to form the decoded predictions tensor. + y_pred = tf.concat(values=[y_pred[..., :-12], xmin, ymin, xmax, ymax], axis=-1) + + ##################################################################################### + # 2. Perform confidence thresholding, per-class non-maximum suppression, and + # top-k filtering. + ##################################################################################### + + batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32 + n_boxes = tf.shape(y_pred)[1] + n_classes = y_pred.shape[2] - 4 + class_indices = tf.range(1, n_classes) + + # Create a function that filters the predictions for the given batch item. Specifically, it performs: + # - confidence thresholding + # - non-maximum suppression (NMS) + # - top-k filtering + def filter_predictions(batch_item): + # Create a function that filters the predictions for one single class. + def filter_single_class(index): + # From a tensor of shape (n_boxes, n_classes + 4 coordinates) extract + # a tensor of shape (n_boxes, 1 + 4 coordinates) that contains the + # confidnece values for just one class, determined by `index`. + confidences = tf.expand_dims(batch_item[..., index], axis=-1) + class_id = tf.fill(dims=tf.shape(confidences), value=tf.to_float(index)) + box_coordinates = batch_item[..., -4:] + + single_class = tf.concat([class_id, confidences, box_coordinates], axis=-1) + + # Apply confidence thresholding with respect to the class defined by `index`. + threshold_met = single_class[:, 1] > self.tf_confidence_thresh + single_class = tf.boolean_mask(tensor=single_class, + mask=threshold_met) + + # If any boxes made the threshold, perform NMS. + def perform_nms(): + scores = single_class[..., 1] + + # `tf.image.non_max_suppression()` needs the box coordinates in the format `(ymin, xmin, ymax, xmax)`. + xmin = tf.expand_dims(single_class[..., -4], axis=-1) + ymin = tf.expand_dims(single_class[..., -3], axis=-1) + xmax = tf.expand_dims(single_class[..., -2], axis=-1) + ymax = tf.expand_dims(single_class[..., -1], axis=-1) + boxes = tf.concat(values=[ymin, xmin, ymax, xmax], axis=-1) + + maxima_indices = tf.image.non_max_suppression(boxes=boxes, + scores=scores, + max_output_size=self.tf_nms_max_output_size, + iou_threshold=self.iou_threshold, + name='non_maximum_suppresion') + maxima = tf.gather(params=single_class, + indices=maxima_indices, + axis=0) + return maxima + + def no_confident_predictions(): + return tf.constant(value=0.0, shape=(1, 6)) + + single_class_nms = tf.cond(tf.equal(tf.size(single_class), 0), no_confident_predictions, perform_nms) + + # Make sure `single_class` is exactly `self.nms_max_output_size` elements long. + padded_single_class = tf.pad(tensor=single_class_nms, + paddings=[[0, self.tf_nms_max_output_size - tf.shape(single_class_nms)[0]], + [0, 0]], + mode='CONSTANT', + constant_values=0.0) + + return padded_single_class + + # Iterate `filter_single_class()` over all class indices. + filtered_single_classes = tf.map_fn(fn=lambda i: filter_single_class(i), + elems=tf.range(1, n_classes), + dtype=tf.float32, + parallel_iterations=128, + back_prop=False, + swap_memory=False, + infer_shape=True, + name='loop_over_classes') + + # Concatenate the filtered results for all individual classes to one tensor. + filtered_predictions = tf.reshape(tensor=filtered_single_classes, shape=(-1, 6)) + + # Perform top-k filtering for this batch item or pad it in case there are + # fewer than `self.top_k` boxes left at this point. Either way, produce a + # tensor of length `self.top_k`. By the time we return the final results tensor + # for the whole batch, all batch items must have the same number of predicted + # boxes so that the tensor dimensions are homogenous. If fewer than `self.top_k` + # predictions are left after the filtering process above, we pad the missing + # predictions with zeros as dummy entries. + def top_k(): + return tf.gather(params=filtered_predictions, + indices=tf.nn.top_k(filtered_predictions[:, 1], k=self.tf_top_k, sorted=True).indices, + axis=0) + + def pad_and_top_k(): + padded_predictions = tf.pad(tensor=filtered_predictions, + paddings=[[0, self.tf_top_k - tf.shape(filtered_predictions)[0]], [0, 0]], + mode='CONSTANT', + constant_values=0.0) + return tf.gather(params=padded_predictions, + indices=tf.nn.top_k(padded_predictions[:, 1], k=self.tf_top_k, sorted=True).indices, + axis=0) + + top_k_boxes = tf.cond(tf.greater_equal(tf.shape(filtered_predictions)[0], self.tf_top_k), top_k, + pad_and_top_k) + + return top_k_boxes + + # Iterate `filter_predictions()` over all batch items. + output_tensor = tf.map_fn(fn=lambda x: filter_predictions(x), + elems=y_pred, + dtype=None, + parallel_iterations=128, + back_prop=False, + swap_memory=False, + infer_shape=True, + name='loop_over_batch') + + return output_tensor + + def compute_output_shape(self, input_shape): + batch_size, n_boxes, last_axis = input_shape + return (batch_size, self.tf_top_k, 6) # Last axis: (class_ID, confidence, 4 box coordinates) + + def get_config(self): + config = { + 'confidence_thresh': self.confidence_thresh, + 'iou_threshold': self.iou_threshold, + 'top_k': self.top_k, + 'nms_max_output_size': self.nms_max_output_size, + 'coords': self.coords, + 'normalize_coords': self.normalize_coords, + 'img_height': self.img_height, + 'img_width': self.img_width, + } + base_config = super(DecodeDetections, self).get_config() + return dict(list(base_config.items()) + list(config.items())) diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_L2Normalization.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_L2Normalization.py new file mode 100644 index 00000000..465a6fc7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_layer_L2Normalization.py @@ -0,0 +1,72 @@ +''' +A custom Keras layer to perform L2-normalization. + +Copyright (C) 2017 Pierluigi Ferrari + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +''' + +from __future__ import division +import numpy as np +import keras.backend as K +from keras.engine.topology import InputSpec +from keras.engine.topology import Layer + + +class L2Normalization(Layer): + ''' + Performs L2 normalization on the input tensor with a learnable scaling parameter + as described in the paper "Parsenet: Looking Wider to See Better" (see references) + and as used in the original SSD model. + + Arguments: + gamma_init (int): The initial scaling parameter. Defaults to 20 following the + SSD paper. + + Input shape: + 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'` + or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. + + Returns: + The scaled tensor. Same shape as the input tensor. + + References: + http://cs.unc.edu/~wliu/papers/parsenet.pdf + ''' + + def __init__(self, gamma_init=20, **kwargs): + if K.image_dim_ordering() == 'tf': + self.axis = 3 + else: + self.axis = 1 + self.gamma_init = gamma_init + super(L2Normalization, self).__init__(**kwargs) + + def build(self, input_shape): + self.input_spec = [InputSpec(shape=input_shape)] + gamma = self.gamma_init * np.ones((input_shape[self.axis],)) + self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name)) + self.trainable_weights = [self.gamma] + super(L2Normalization, self).build(input_shape) + + def call(self, x, mask=None): + output = K.l2_normalize(x, self.axis) + return output * self.gamma + + def get_config(self): + config = { + 'gamma_init': self.gamma_init + } + base_config = super(L2Normalization, self).get_config() + return dict(list(base_config.items()) + list(config.items())) diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_ssd_loss.py b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_ssd_loss.py new file mode 100644 index 00000000..5539ffb7 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/keras_utils/keras_ssd_loss.py @@ -0,0 +1,222 @@ +''' +The Keras-compatible loss function for the SSD model. Currently supports TensorFlow only. + +Copyright (C) 2017 Pierluigi Ferrari + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +''' + +from __future__ import division +import tensorflow as tf + + +class SSDLoss: + ''' + The SSD loss, see https://arxiv.org/abs/1512.02325. + ''' + + def __init__(self, + neg_pos_ratio=3, + n_neg_min=0, + alpha=1.0): + ''' + Arguments: + neg_pos_ratio (int, optional): The maximum ratio of negative (i.e. background) + to positive ground truth boxes to include in the loss computation. + There are no actual background ground truth boxes of course, but `y_true` + contains anchor boxes labeled with the background class. Since + the number of background boxes in `y_true` will usually exceed + the number of positive boxes by far, it is necessary to balance + their influence on the loss. Defaults to 3 following the paper. + n_neg_min (int, optional): The minimum number of negative ground truth boxes to + enter the loss computation *per batch*. This argument can be used to make + sure that the model learns from a minimum number of negatives in batches + in which there are very few, or even none at all, positive ground truth + boxes. It defaults to 0 and if used, it should be set to a value that + stands in reasonable proportion to the batch size used for training. + alpha (float, optional): A factor to weight the localization loss in the + computation of the total loss. Defaults to 1.0 following the paper. + ''' + self.neg_pos_ratio = neg_pos_ratio + self.n_neg_min = n_neg_min + self.alpha = alpha + + def smooth_L1_loss(self, y_true, y_pred): + ''' + Compute smooth L1 loss, see references. + + Arguments: + y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data. + In this context, the expected tensor has shape `(batch_size, #boxes, 4)` and + contains the ground truth bounding box coordinates, where the last dimension + contains `(xmin, xmax, ymin, ymax)`. + y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing + the predicted data, in this context the predicted bounding box coordinates. + + Returns: + The smooth L1 loss, a nD-1 Tensorflow tensor. In this context a 2D tensor + of shape (batch, n_boxes_total). + + References: + https://arxiv.org/abs/1504.08083 + ''' + absolute_loss = tf.abs(y_true - y_pred) + square_loss = 0.5 * (y_true - y_pred) ** 2 + l1_loss = tf.where(tf.less(absolute_loss, 1.0), square_loss, absolute_loss - 0.5) + return tf.reduce_sum(l1_loss, axis=-1) + + def log_loss(self, y_true, y_pred): + ''' + Compute the softmax log loss. + + Arguments: + y_true (nD tensor): A TensorFlow tensor of any shape containing the ground truth data. + In this context, the expected tensor has shape (batch_size, #boxes, #classes) + and contains the ground truth bounding box categories. + y_pred (nD tensor): A TensorFlow tensor of identical structure to `y_true` containing + the predicted data, in this context the predicted bounding box categories. + + Returns: + The softmax log loss, a nD-1 Tensorflow tensor. In this context a 2D tensor + of shape (batch, n_boxes_total). + ''' + # Make sure that `y_pred` doesn't contain any zeros (which would break the log function) + y_pred = tf.maximum(y_pred, 1e-15) + # Compute the log loss + log_loss = -tf.reduce_sum(y_true * tf.log(y_pred), axis=-1) + return log_loss + + def compute_loss(self, y_true, y_pred): + ''' + Compute the loss of the SSD model prediction against the ground truth. + + Arguments: + y_true (array): A Numpy array of shape `(batch_size, #boxes, #classes + 12)`, + where `#boxes` is the total number of boxes that the model predicts + per image. Be careful to make sure that the index of each given + box in `y_true` is the same as the index for the corresponding + box in `y_pred`. The last axis must have length `#classes + 12` and contain + `[classes one-hot encoded, 4 ground truth box coordinate offsets, 8 arbitrary entries]` + in this order, including the background class. The last eight entries of the + last axis are not used by this function and therefore their contents are + irrelevant, they only exist so that `y_true` has the same shape as `y_pred`, + where the last four entries of the last axis contain the anchor box + coordinates, which are needed during inference. Important: Boxes that + you want the cost function to ignore need to have a one-hot + class vector of all zeros. + y_pred (Keras tensor): The model prediction. The shape is identical + to that of `y_true`, i.e. `(batch_size, #boxes, #classes + 12)`. + The last axis must contain entries in the format + `[classes one-hot encoded, 4 predicted box coordinate offsets, 8 arbitrary entries]`. + + Returns: + A scalar, the total multitask loss for classification and localization. + ''' + self.neg_pos_ratio = tf.constant(self.neg_pos_ratio) + self.n_neg_min = tf.constant(self.n_neg_min) + self.alpha = tf.constant(self.alpha) + + batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32 + n_boxes = tf.shape(y_pred)[ + 1] # Output dtype: tf.int32, note that `n_boxes` in this context denotes the total number of boxes per image, not the number of boxes per cell. + + # 1: Compute the losses for class and box predictions for every box. + + classification_loss = tf.to_float( + self.log_loss(y_true[:, :, :-12], y_pred[:, :, :-12])) # Output shape: (batch_size, n_boxes) + localization_loss = tf.to_float( + self.smooth_L1_loss(y_true[:, :, -12:-8], y_pred[:, :, -12:-8])) # Output shape: (batch_size, n_boxes) + + # 2: Compute the classification losses for the positive and negative targets. + + # Create masks for the positive and negative ground truth classes. + negatives = y_true[:, :, 0] # Tensor of shape (batch_size, n_boxes) + positives = tf.to_float(tf.reduce_max(y_true[:, :, 1:-12], axis=-1)) # Tensor of shape (batch_size, n_boxes) + + # Count the number of positive boxes (classes 1 to n) in y_true across the whole batch. + n_positive = tf.reduce_sum(positives) + + # Now mask all negative boxes and sum up the losses for the positive boxes PER batch item + # (Keras loss functions must output one scalar loss value PER batch item, rather than just + # one scalar for the entire batch, that's why we're not summing across all axes). + pos_class_loss = tf.reduce_sum(classification_loss * positives, axis=-1) # Tensor of shape (batch_size,) + + # Compute the classification loss for the negative default boxes (if there are any). + + # First, compute the classification loss for all negative boxes. + neg_class_loss_all = classification_loss * negatives # Tensor of shape (batch_size, n_boxes) + n_neg_losses = tf.count_nonzero(neg_class_loss_all, + dtype=tf.int32) # The number of non-zero loss entries in `neg_class_loss_all` + # What's the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification + # loss, we don't just want to know how many negative ground truth boxes there are, but for how many of those there actually is + # a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with + # the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative + # classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive + # boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation. + # We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`, + # is at most the number of negative boxes for which there is a positive classification loss. + + # Compute the number of negative examples we want to account for in the loss. + # We'll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller). + n_negative_keep = tf.minimum(tf.maximum(self.neg_pos_ratio * tf.to_int32(n_positive), self.n_neg_min), + n_neg_losses) + + # In the unlikely case when either (1) there are no negative ground truth boxes at all + # or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`. + def f1(): + return tf.zeros([batch_size]) + + # Otherwise compute the negative loss. + def f2(): + # Now we'll identify the top-k (where k == `n_negative_keep`) boxes with the highest confidence loss that + # belong to the background class in the ground truth data. Note that this doesn't necessarily mean that the model + # predicted the wrong class for those boxes, it just means that the loss for those boxes is the highest. + + # To do this, we reshape `neg_class_loss_all` to 1D... + neg_class_loss_all_1D = tf.reshape(neg_class_loss_all, [-1]) # Tensor of shape (batch_size * n_boxes,) + # ...and then we get the indices for the `n_negative_keep` boxes with the highest loss out of those... + values, indices = tf.nn.top_k(neg_class_loss_all_1D, + k=n_negative_keep, + sorted=False) # We don't need them sorted. + # ...and with these indices we'll create a mask... + negatives_keep = tf.scatter_nd(indices=tf.expand_dims(indices, axis=1), + updates=tf.ones_like(indices, dtype=tf.int32), + shape=tf.shape( + neg_class_loss_all_1D)) # Tensor of shape (batch_size * n_boxes,) + negatives_keep = tf.to_float( + tf.reshape(negatives_keep, [batch_size, n_boxes])) # Tensor of shape (batch_size, n_boxes) + # ...and use it to keep only those boxes and mask all other classification losses + neg_class_loss = tf.reduce_sum(classification_loss * negatives_keep, + axis=-1) # Tensor of shape (batch_size,) + return neg_class_loss + + neg_class_loss = tf.cond(tf.equal(n_neg_losses, tf.constant(0)), f1, f2) + + class_loss = pos_class_loss + neg_class_loss # Tensor of shape (batch_size,) + + # 3: Compute the localization loss for the positive targets. + # We don't compute a localization loss for negative predicted boxes (obviously: there are no ground truth boxes they would correspond to). + + loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1) # Tensor of shape (batch_size,) + + # 4: Compute the total loss. + + total_loss = (class_loss + self.alpha * loc_loss) / tf.maximum(1.0, n_positive) # In case `n_positive == 0` + # Keras has the annoying habit of dividing the loss by the batch size, which sucks in our case + # because the relevant criterion to average our loss over is the number of positive boxes in the batch + # (by which we're dividing in the line above), not the batch size. So in order to revert Keras' averaging + # over the batch size, we'll have to multiply by it. + total_loss = total_loss * tf.to_float(batch_size) + + return total_loss diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/pytorch_detect.py b/DetectionMetrics/DetectionMetricsLib/python_modules/pytorch_detect.py new file mode 100644 index 00000000..bf2f218a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/pytorch_detect.py @@ -0,0 +1,95 @@ +import torch +from torchvision import transforms +from torch.autograd import Variable +import numpy as np +import time +import yaml +import importlib +import sys +import os + + +class PyTorchDetector: + def __init__(self, patch_to_ckpt, configuration_file): + with open(configuration_file, 'r') as stream: + data_loaded = yaml.safe_load(stream) + model_path = data_loaded['modelPath'] + model_name = data_loaded['modelName'] + import_name = data_loaded['importName'] + model_parameters = data_loaded['modelParameters'] + try: + sys.path.append(os.path.dirname(model_path)) + except: + print('Model path undefined') + sys.path.append(os.path.dirname(model_path)) + models = importlib.import_module(import_name) + # Model parameters are converted to actual Python variables + variables = model_parameters.split(',') + for i, var in enumerate(variables): + name = 'variable' + str(i) + try: + value = int(var) + except Exception as e: + if (str(var) != 'True' and str(var) != 'False'): + value = str(var) + else: + value = bool(var) + setattr(self, name, value) + # The number of parameters modifies the way the function gets called + self.model = eval(self.get_model_function(len(variables))) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.model = self.model.to(device) + self.model.eval() + + def get_model_function(self, x): + return { + 1: 'getattr(models, model_name)(self.variable0)', + 2: 'getattr(models, model_name)(self.variable0, self.variable1)', + 3: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2)', + 4: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2, self.variable3)', + 5: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2, self.variable3, self.variable4)', + 6: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2, self.variable3, self.variable4, self.variable5)', + 7: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2, self.variable3, self.variable4, self.variable5, self.variable6)', + 8: 'getattr(models, model_name)(self.variable0, self.variable1, self.variable2, self.variable3, self.variable4, self.variable5, self.variable6, self.variable7)', + }[x] + + def run_inference_for_single_image(self, image): + self.model.eval() + print('Starting inference') + try: + # Try with image tensor as list + start_time = time.time() + detections = self.model([image]) + print("Inference Time: " + str(time.time() - start_time) + " seconds") + except Exception as e: + # Try with image tensor alone + start_time = time.time() + image = image.unsqueeze(1) + detections = self.model(image) + print("Inference Time: " + str(time.time() - start_time) + " seconds") + output_dict = {} + output_dict['num_detections'] = len(detections[0]['labels']) + output_dict['detection_classes'] = detections[0]['labels'].cpu().numpy() + output_dict['detection_boxes'] = detections[0]['boxes'].detach().cpu().numpy() + output_dict['detection_scores'] = detections[0]['scores'].detach().cpu().numpy() + return output_dict + + def detect(self, img, threshold): + Tensor = torch.cuda.FloatTensor + img_transforms = transforms.Compose([transforms.ToTensor(), ]) + image_tensor = img_transforms(img) + input_img = Variable(image_tensor.type(Tensor)) + output_dict = self.run_inference_for_single_image(input_img) + + new_dict = {} + new_dict['detection_scores'] = output_dict['detection_scores'][output_dict['detection_scores'] >= threshold] + new_dict['detection_boxes'] = output_dict['detection_boxes'][0:len(new_dict['detection_scores']), :] + new_dict['detection_boxes'] = [ + [i[0] / img.shape[1], i[1] / img.shape[0], i[2] / img.shape[1], i[3] / img.shape[0]] for i in + list(new_dict['detection_boxes'])] + new_dict['detection_boxes'] = np.float32(new_dict['detection_boxes']) + new_dict['detection_classes'] = output_dict['detection_classes'][0:len(new_dict['detection_scores'])] + new_dict['detection_classes'] = np.int8(new_dict['detection_classes']) + new_dict['num_detections'] = len(new_dict['detection_scores']) + + return new_dict diff --git a/DetectionMetrics/DetectionMetricsLib/python_modules/tensorflow_detect.py b/DetectionMetrics/DetectionMetricsLib/python_modules/tensorflow_detect.py new file mode 100644 index 00000000..7857508c --- /dev/null +++ b/DetectionMetrics/DetectionMetricsLib/python_modules/tensorflow_detect.py @@ -0,0 +1,102 @@ +import numpy as np +import sys +from distutils.version import StrictVersion + +if not hasattr(sys, 'argv'): + sys.argv = [''] + +import tensorflow as tf +import time + +np.set_printoptions(threshold=sys.maxsize) + +if StrictVersion(tf.__version__) < StrictVersion('1.4.0'): + raise ImportError('Please upgrade your TensorFlow installation to v1.4.* or later!') + + +class TensorFlowDetector: + + def __init__(self, path_to_ckpt): + detection_graph = tf.Graph() + with detection_graph.as_default(): + od_graph_def = tf.compat.v1.GraphDef() + with tf.compat.v2.io.gfile.GFile(path_to_ckpt, 'rb') as fid: + serialized_graph = fid.read() + od_graph_def.ParseFromString(serialized_graph) + tf.import_graph_def(od_graph_def, name='') + + ops = tf.compat.v1.get_default_graph().get_operations() + all_tensor_names = {output.name for op in ops for output in op.outputs} + self.tensor_dict = {} + for key in [ + 'num_detections', 'detection_boxes', 'detection_scores', + 'detection_classes', 'detection_masks' + ]: + tensor_name = key + ':0' + if tensor_name in all_tensor_names: + self.tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name( + tensor_name) + + self.image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0') + + self.sess = tf.compat.v1.Session(graph=detection_graph) + print("Initializing") + dummy_tensor = np.zeros((1, 1, 1, 3), dtype=np.int32) + self.sess.run(self.tensor_dict, feed_dict={self.image_tensor: dummy_tensor}) + + def run_inference_for_single_image(self, image): + + image.setflags(write=1) + image_expanded = np.expand_dims(image, axis=0) + + output_dict = self.sess.run( + self.tensor_dict, + feed_dict={self.image_tensor: image_expanded}) + + output_dict['num_detections'] = int(output_dict['num_detections'][0]) + output_dict['detection_classes'] = output_dict[ + 'detection_classes'][0].astype(np.uint8) + output_dict['detection_boxes'] = output_dict['detection_boxes'][0] + output_dict['detection_scores'] = output_dict['detection_scores'][0] + if 'detection_masks' in output_dict: + output_dict['detection_masks'] = output_dict['detection_masks'][0] + + return output_dict + + def detect(self, img, threshold): + + print("Starting inference") + start_time = time.time() + + # the array based representation of the image will be used later in order to prepare the + # result image with boxes and labels on it. + # image_np = load_image_into_numpy_array(image) + image_passed = img + # print image_np.shape + # print image_passed.shape + + # detection_graph = load_graph(model_path) + + # Expand dimensions since the model expects images to have shape: [1, None, None, 3] + # image_np_expanded = np.expand_dims(image_np, axis=0) + # Actual detection. + + start_time = time.time() + output_dict = self.run_inference_for_single_image(image_passed) + print("Inference Time: " + str(time.time() - start_time) + " seconds") + + new_dict = {} + + new_dict['detection_scores'] = output_dict['detection_scores'][output_dict['detection_scores'] >= threshold] + new_dict['detection_boxes'] = output_dict['detection_boxes'][0:len(new_dict['detection_scores']), :] + new_dict['detection_classes'] = output_dict['detection_classes'][0:len(new_dict['detection_scores'])] + new_dict['num_detections'] = len(new_dict['detection_scores']) + if 'detection_masks' in output_dict: + new_dict['detection_masks'] = output_dict['detection_masks'][0:len(new_dict['detection_scores']), :] + + # mask = new_dict['detection_masks'][0] + # mask = mask > 0.5 + # cv2.imshow("my mask", mask) + # cv2.waitKey(0) + + return new_dict diff --git a/DetectionMetrics/DetectionMetricsROS/CMakeLists.txt b/DetectionMetrics/DetectionMetricsROS/CMakeLists.txt new file mode 100644 index 00000000..2ac06e0d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/CMakeLists.txt @@ -0,0 +1,111 @@ +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTOUIC ON) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + + +cmake_minimum_required(VERSION 2.8.3) +project(DetectionMetricsROS) + +find_package(catkin REQUIRED COMPONENTS + roscpp + rospy + std_msgs + OpenCV + sensor_msgs + cv_bridge + image_transport + sensor_msgs + message_generation +) + +add_message_files( + FILES + object.msg + objects.msg +) + +generate_messages( + DEPENDENCIES + std_msgs +) + +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES DetectionMetricsROS + CATKIN_DEPENDS message_runtime +# DEPENDS system_lib +) + + +# include_directories( +# # include +# ${catkin_INCLUDE_DIRS} +# # ${OpenCV_INCLUDE_DIRS} +# ) + + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${comm_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} + ${ros_INCLUDE_DIRS} + ${PYTHON_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${jderobottypes_INCLUDE_DIRS} + ${GLOG_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${QT_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${CMAKE_CURRENT_LIST_DIR}/include/detectionmetrics + ${DetectionMetrics_INCLUDE_DIR} + ${catkin_INCLUDE_DIRS} +) + +add_executable(code src/code.cpp) +target_link_libraries(code + DetectionMetrics + ${OpenCV_LIBRARIES} + ${JderobotInterfaces_LIBRARIES} + ${EXTRA_LIBS} + ${Boost_LIBRARIES} + ${QT_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto +) + +add_executable(detection_node src/detection_node.cpp) +target_link_libraries(detection_node + DetectionMetrics + ${OpenCV_LIBRARIES} + ${JderobotInterfaces_LIBRARIES} + ${EXTRA_LIBS} + ${Boost_LIBRARIES} + ${QT_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto +) + +add_executable(test src/test.cpp include/detectionmetrics/DeployerNode.hpp src/DeployerNode.cpp) +add_dependencies(test ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +target_link_libraries(test + DetectionMetrics + ${OpenCV_LIBRARIES} + ${JderobotInterfaces_LIBRARIES} + ${EXTRA_LIBS} + ${Boost_LIBRARIES} + ${QT_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${PYTHON_LIBRARIES} + ${catkin_LIBRARIES} + ssl + crypto +) diff --git a/DetectionMetrics/DetectionMetricsROS/include/DetectionMetricsROS/DeployerNode.hpp b/DetectionMetrics/DetectionMetricsROS/include/DetectionMetricsROS/DeployerNode.hpp new file mode 100644 index 00000000..1298d28a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/include/DetectionMetricsROS/DeployerNode.hpp @@ -0,0 +1,40 @@ +#ifndef DEPLOYER_NODE +#define DEPLOYER_NODE +#include "std_msgs/String.h" +#include "DetectionMetricsROS/objects.h" +#include "DetectionMetricsROS/object.h" + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +class DeployerNode{ +public: + DeployerNode(int argc, char *argv[]); + ~DeployerNode(); + static void ros_to_cv(const sensor_msgs::ImageConstPtr &ros_img , DeployerNode *node); + +private: + cv::Mat cv_frame; + ros::NodeHandle *node; + ros::Subscriber sub; + ros::Publisher pub; + GenericInferencer *inferencer; + MassInferencer *massInferencer; + std::string topic; + DetectionMetricsROS::object detection; + DetectionMetricsROS::objects detections; +}; + +#endif diff --git a/DetectionMetrics/DetectionMetricsROS/msg/object.msg b/DetectionMetrics/DetectionMetricsROS/msg/object.msg new file mode 100644 index 00000000..c8055b3a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/msg/object.msg @@ -0,0 +1,6 @@ +string className +float64 confidence +int64 x +int64 y +int64 height +int64 width diff --git a/DetectionMetrics/DetectionMetricsROS/msg/objects.msg b/DetectionMetrics/DetectionMetricsROS/msg/objects.msg new file mode 100644 index 00000000..db34fa4a --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/msg/objects.msg @@ -0,0 +1 @@ +object[] objects diff --git a/DetectionMetrics/DetectionMetricsROS/package.xml b/DetectionMetrics/DetectionMetricsROS/package.xml new file mode 100644 index 00000000..51b8647d --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/package.xml @@ -0,0 +1,69 @@ + + + DetectionMetricsROS + 0.0.0 + The DetectionMetricsROS package + + + + + issiki + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + message_generation + + + + + + + message_runtime + + + + + catkin + roscpp + rospy + std_msgs + roscpp + rospy + std_msgs + roscpp + rospy + std_msgs + + + + + + + + diff --git a/DetectionMetrics/DetectionMetricsROS/src/DeployerNode.cpp b/DetectionMetrics/DetectionMetricsROS/src/DeployerNode.cpp new file mode 100644 index 00000000..38664cd4 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/src/DeployerNode.cpp @@ -0,0 +1,79 @@ +#include "DeployerNode.hpp" +#include + +DeployerNode::DeployerNode(int argc, char *argv[]){ + std::string path; + ros::init(argc, argv, "deployer_node"); + ros::NodeHandle nh("~"); + nh.getParam("topic", this->topic); + nh.getParam("configfile", path); + + ROS_INFO("Will subscribe to ROS TOPIC : %s", this->topic.c_str()); + + YAML::Node config = YAML::LoadFile(path); + const std::string &netConfigList = (const std::string) config["netConfigList"].as(), + &weights = (const std::string) config["weights"].as(), + &inferencerImp = (const std::string) config["inferencerImp"].as(), + &inferencerNames = (const std::string) config["inferencerNames"].as(), + &outputFolder = ""; + // &outputFolder = (const std::string) config["outputFolder"].as(); + // std::map *inferencerParamsMap = config["outputFolder"].as(); + std::map* inferencerParamsMap = new std::map(); + double* confidence_threshold = new double(0.2); + this->inferencer = new GenericInferencer(netConfigList,weights,inferencerNames,inferencerImp, inferencerParamsMap); + this->massInferencer = new MassInferencer(inferencer->getInferencer(),outputFolder, confidence_threshold, true); + this->node = new ros::NodeHandle(); + this->sub = this->node->subscribe(this->topic, 10,boost::bind(&DeployerNode::ros_to_cv, _1, this)); + ros::NodeHandle *pub = new ros::NodeHandle(); + this->pub = pub->advertise ("my_topic", 10); + ros::spin(); +} + +void DeployerNode::ros_to_cv(const sensor_msgs::ImageConstPtr& img ,DeployerNode *node){ + cv_bridge::CvImagePtr cv_ptr; + try{ + cv_ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::BGR8); + } + catch (cv_bridge::Exception& e){ + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + cv::waitKey(3); + node->massInferencer->process(false,cv_ptr->image); + // RectRegionsPtr data = node->massInferencer->detections(); + // for (auto it = data->getRegions().begin(); it != data->getRegions().end(); it++){ + // node->detection.className = it->classID ; + // node->detection.confidence = it->confidence_score ; + // node->detection.x = it->region.x; + // node->detection.y = it->region.y; + // node->detection.height = it->region.height; + // node->detection.width = it->region.width; + // node->detections.objects.push_back(node->detection); + // } + node->detections.objects.clear(); + Sample CurrFrame = node->massInferencer->getSample(); + CurrFrame.print(); + std::vector regionsToPrint = CurrFrame.getRectRegions()->getRegions(); + for (auto it = regionsToPrint.begin(); it != regionsToPrint.end(); it++) { + node->detection.className = it->classID ;//<< '\n';node->detection.confidence = it->confidence_score ; + node->detection.confidence = it->confidence_score ; + node->detection.x = it->region.x; + node->detection.y = it->region.y; + node->detection.height = it->region.height; + node->detection.width = it->region.width; + node->detections.objects.push_back(node->detection); + } + node->pub.publish(node->detections); + // ros::spinOnce(); + // Sample frame = node->massInferencer->getSample(); + // frame.print(); +} + + + + +DeployerNode::~DeployerNode(){ + delete inferencer; + delete massInferencer; + delete node; +} diff --git a/DetectionMetrics/DetectionMetricsROS/src/code.cpp b/DetectionMetrics/DetectionMetricsROS/src/code.cpp new file mode 100644 index 00000000..f0a6738f --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/src/code.cpp @@ -0,0 +1,8 @@ +#include +#include +#include "opencv2/highgui/highgui.hpp" +#include +int main(){ + + return 0; +} diff --git a/DetectionMetrics/DetectionMetricsROS/src/detection_node.cpp b/DetectionMetrics/DetectionMetricsROS/src/detection_node.cpp new file mode 100644 index 00000000..68a69290 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/src/detection_node.cpp @@ -0,0 +1,77 @@ +#include "std_msgs/String.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +GenericInferencer *inferencer; +MassInferencer *massInferencer; + + +void chatterCallback(const sensor_msgs::ImageConstPtr& img){ + + cv_bridge::CvImagePtr cv_ptr; + try{ + cv_ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::BGR8); + } + catch (cv_bridge::Exception& e){ + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + cv::waitKey(3); + + massInferencer->process(false,cv_ptr->image); + +} + +void chatterCallback(const cv::Mat& img){ + massInferencer->process(false,img); +} + + +int main(int argc, char *argv[]){ + std::string path,topic; + ros::init(argc, argv, "deployer_node"); + ros::NodeHandle nh("~"); + nh.getParam("topic", topic); + nh.getParam("configfile", path); + + ROS_INFO("Will subscribe to ROS TOPIC : %s", topic.c_str()); + ROS_INFO("Will subscribe to ROS TOPIC : %s", path.c_str()); + + YAML::Node config = YAML::LoadFile(path); + std::cout << config["netConfigList"].as() << std::endl; + const std::string &netConfigList = (const std::string) config["netConfigList"].as(), + &inferencerNames = (const std::string) config["inferencerNames"].as(), + &weights = (const std::string) config["weights"].as(), + &inferencerImp = (const std::string) config["inferencerImp"].as(), + &outputFolder = ""; + + // std::string temp = config["netConfigList"].as(); + // if(config["outputFolder"]) + // temp = config["outputFolder"].as() ; + // else + // temp = ""; + // const std::string &outputFolder = (const std::string )temp; + // std::map *inferencerParamsMap = config["outputFolder"].as(); + std::map* inferencerParamsMap = new std::map(); + double* confidence_threshold = new double(0.2); + inferencer = new GenericInferencer(netConfigList,weights,inferencerNames,inferencerImp, inferencerParamsMap); + massInferencer = new MassInferencer(inferencer->getInferencer(),outputFolder, confidence_threshold, true); + ros::NodeHandle n; + ros::Subscriber sub = n.subscribe(topic, 10, chatterCallback); + + ros::spin(); + + return 0; +} diff --git a/DetectionMetrics/DetectionMetricsROS/src/image_converter.hpp b/DetectionMetrics/DetectionMetricsROS/src/image_converter.hpp new file mode 100644 index 00000000..3377be06 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/src/image_converter.hpp @@ -0,0 +1,49 @@ +#include +#include +#include +#include +#include +#include + +static const std::string OPENCV_WINDOW = "Image window"; + +class ImageConverter{ + ros::NodeHandle nh_; + image_transport::ImageTransport it_; + image_transport::Subscriber image_sub_; + image_transport::Publisher image_pub_; + +public: + ImageConverter():it_(nh_){ + // Subscrive to input video feed and publish output video feed + image_sub_ = it_.subscribe("/camera/image_raw", 1, &ImageConverter::imageCb, this); + image_pub_ = it_.advertise("/image_converter/output_video", 1); + cv::namedWindow(OPENCV_WINDOW); + } + + ~ImageConverter(){ + cv::destroyWindow(OPENCV_WINDOW); + } + + void imageCb(const sensor_msgs::ImageConstPtr& msg){ + cv_bridge::CvImagePtr cv_ptr; + try{ + cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); + } + catch (cv_bridge::Exception& e){ + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Draw an example circle on the video stream + if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60) + cv::circle(cv_ptr->image, cv::Point(50, 50), 10, CV_RGB(255,0,0)); + + // Update GUI Window + cv::imshow(OPENCV_WINDOW, cv_ptr->image); + cv::waitKey(3); + + // Output modified video stream + image_pub_.publish(cv_ptr->toImageMsg()); + } +}; diff --git a/DetectionMetrics/DetectionMetricsROS/src/test.cpp b/DetectionMetrics/DetectionMetricsROS/src/test.cpp new file mode 100644 index 00000000..08cbf5a8 --- /dev/null +++ b/DetectionMetrics/DetectionMetricsROS/src/test.cpp @@ -0,0 +1,5 @@ +#include "DeployerNode.hpp" +int main(int argc, char *argv[]){ + DeployerNode node(argc,argv); + return 0; +} diff --git a/DetectionMetrics/Dockerfile/Dockerfile b/DetectionMetrics/Dockerfile/Dockerfile new file mode 100644 index 00000000..32b9543f --- /dev/null +++ b/DetectionMetrics/Dockerfile/Dockerfile @@ -0,0 +1,48 @@ +FROM ubuntu:18.04 + +RUN apt-get update && apt-get upgrade -y + +RUN apt install -y build-essential git cmake rapidjson-dev libssl-dev python-dev python-numpy \ + libboost-dev libboost-filesystem-dev libboost-system-dev libboost-program-options-dev + +RUN apt install -y libgoogle-glog-dev libyaml-cpp-dev qt5-default libqt5svg5-dev wget unzip vim python-pip + +RUN pip install --upgrade pip && \ + rm -rf /var/lib/apt/lists/* + +RUN pip install tensorflow \ + && pip install keras \ + && pip install torch + +RUN cd ~ \ + && wget -O opencv.zip https://github.com/opencv/opencv/archive/4.2.0.zip \ + && wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/4.2.0.zip \ + && unzip opencv.zip \ + && unzip opencv_contrib.zip \ + && mv opencv-4.2.0 opencv \ + && mv opencv_contrib-4.2.0 opencv_contrib \ + && cd ~/opencv \ + && mkdir build \ + && cd build \ + && cmake -D WITH_QT=ON -D WITH_GTK=OFF -D ENABLE_FAST_MATH=1 -D WITH_CUBLAS=1 .. \ + && make -j4 \ + && make install + +RUN cd ~ \ + && git clone https://github.com/JdeRobot/DetectionMetrics \ + && cd DetectionMetrics/DetectionMetrics \ + && mkdir build && cd build \ + && cmake .. \ + && make -j4 + +RUN mkdir /root/volume \ + && mkdir /root/volume/datasets \ + && mkdir /root/volume/datasets/cfg \ + && mkdir /root/volume/datasets/eval \ + && mkdir /root/volume/datasets/names \ + && mkdir /root/volume/datasets/weights + +COPY ./appConfig.yml /root/DetectionMetrics/DetectionMetrics/build/DatasetEvaluationApp/appConfig.yml +WORKDIR /root/DetectionMetrics/DetectionMetrics/build/DatasetEvaluationApp + +CMD ["./DatasetEvaluationApp"] diff --git a/DetectionMetrics/Dockerfile/appConfig.yml b/DetectionMetrics/Dockerfile/appConfig.yml new file mode 100644 index 00000000..90adc791 --- /dev/null +++ b/DetectionMetrics/Dockerfile/appConfig.yml @@ -0,0 +1,11 @@ +datasetPath: /root/volume/datasets/ + +evaluationsPath: /root/volume/datasets/eval + +weightsPath: /root/volume/datasets/weights + +netCfgPath: /root/volume/datasets/cfg + +namesPath: /root/volume/datasets/names + +inferencesPath: /root/volume/datasets/ diff --git a/DetectionMetrics/SampleGenerationApp/CMakeLists.txt b/DetectionMetrics/SampleGenerationApp/CMakeLists.txt new file mode 100644 index 00000000..915a52ef --- /dev/null +++ b/DetectionMetrics/SampleGenerationApp/CMakeLists.txt @@ -0,0 +1,28 @@ +include_directories( + ${JdeRobot_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + ${depthLib_INCLUDE_DIRS} +) + +add_executable (SampleGenerationApp generator.cpp) + +TARGET_LINK_LIBRARIES(SampleGenerationApp + DetectionMetrics + ${EXTRA_LIBS} + ${config_LIBRARIES} + ${JdeRobot_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${GLOG_LIBRARIES} + ${QT_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/SampleGenerationApp/generator.cpp b/DetectionMetrics/SampleGenerationApp/generator.cpp new file mode 100644 index 00000000..c9849c79 --- /dev/null +++ b/DetectionMetrics/SampleGenerationApp/generator.cpp @@ -0,0 +1,211 @@ + + +#include +#include +#include +#include + +#include +#include "DatasetConverters/liveReaders/RecorderReader.h" +#include "GenerationUtils/DepthForegroundSegmentator.h" +#include "GenerationUtils/DetectionsValidator.h" +#include +#include +#include +#include +#include + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.emplace_back("outputPath"); + this->requiredArguments.emplace_back("reader"); + this->requiredArguments.emplace_back("detector"); + + + }; + virtual void operator()(){ + YAML::Node outputPath=this->config.getNode("outputPath"); + YAML::Node reader=this->config.getNode("reader"); + YAML::Node detectorKey = this->config.getNode("detector"); + YAML::Node colorImagesPathKey; + YAML::Node depthImagesPathKey; + YAML::Node dataPath; + + + + if (reader.as() == "recorder"){ + colorImagesPathKey = this->config.getNode("colorImagesPath"); + depthImagesPathKey = this->config.getNode("depthImagesPath"); + } + else{ + dataPath = this->config.getNode("dataPath"); + } + + + + + //todo include in upper class + std::vector detectorOptions; + detectorOptions.push_back("pentalo-bg"); + detectorOptions.push_back("deepLearning"); + detectorOptions.push_back("datasetReader"); + + + + if (std::find(detectorOptions.begin(),detectorOptions.end(),detectorKey.as())== detectorOptions.end()){ + LOG(ERROR) << detectorKey.as() << " is nor supported"; + exit(1); + } + + + if (detectorKey.as()=="pentalo-bg") { + + RecorderReader converter(colorImagesPathKey.as(), depthImagesPathKey.as()); + DepthForegroundSegmentator segmentator; + + + DetectionsValidator validator(outputPath.as()); + cv::Mat previousImage; + int counter = 0; + int maxElements = converter.getNumSamples(); + Sample sample; + while (converter.getNextSample(sample)) { + counter++; + std::stringstream ss; + ss << counter << "/" << maxElements; + LOG(INFO) << "Processing [" + ss.str() + "]"; + cv::Mat colorImage = sample.getColorImage().clone(); + cv::cvtColor(colorImage, colorImage, cv::COLOR_RGB2BGR); + if (!previousImage.empty()) { + cv::Mat diff; + cv::absdiff(colorImage, previousImage, diff); + auto val = cv::sum(cv::sum(diff)); + if (val[0] < 1000) { + continue; + } + } + colorImage.copyTo(previousImage); + cv::Mat depthImage = sample.getDepthImage().clone(); + std::vector> detections = segmentator.process(depthImage); + + validator.validate(colorImage, depthImage, detections); + } + } + else if (detectorKey.as()=="deepLearning") { + YAML::Node inferencerImplementationKey=this->config.getNode("inferencerImplementation"); + YAML::Node inferencerNamesKey=this->config.getNode("inferencerNames"); + YAML::Node inferencerConfigKey=this->config.getNode("inferencerConfig"); + YAML::Node inferencerWeightsKey=this->config.getNode("inferencerWeights"); + + + RecorderReaderPtr converter; + if (reader.as() == "recorder-rgbd") { + converter=RecorderReaderPtr( new RecorderReader(dataPath.as())); + } + else{ + converter=RecorderReaderPtr( new RecorderReader(colorImagesPathKey.as(), depthImagesPathKey.as())); + } + + FrameworkInferencerPtr inferencer; + + if (inferencerImplementationKey.as()=="yolo") { + inferencer = DarknetInferencerPtr( new DarknetInferencer(inferencerConfigKey.as(), inferencerWeightsKey.as(), inferencerNamesKey.as())); + } + else{ + LOG(WARNING) << inferencerImplementationKey.as() + " is not a valid inferencer implementation"; + } + + DetectionsValidator validator(outputPath.as()); + int maxElements = converter->getNumSamples(); + Sample sample; + int counter=0; + int skipSamples=10; + std::random_device rd; // obtain a random number from hardware + std::mt19937 eng(rd()); // seed the generator + std::uniform_int_distribution<> distr(5, skipSamples); + + if (maxElements==0){ + LOG(ERROR) << "Empty sample data"; + exit(1); + } + + while (converter->getNextSample(sample)) { + int samples_to_skip=distr(eng); + LOG(WARNING) << "Skipping. " << samples_to_skip << std::endl; + bool validSample=false; + for (size_t i = 0; i < samples_to_skip; i++){ + validSample=converter->getNextSample(sample); + } + if (!validSample) + break; + + + counter++; + std::stringstream ss; + ss << counter << "/" << maxElements; + LOG(INFO) << "Processing [" + ss.str() + "]"; + + double thresh = 0.2; + Sample detectedSample = inferencer->detect(sample.getColorImage(), thresh); + detectedSample.setColorImage(sample.getColorImage()); + detectedSample.setDepthImage(sample.getDepthImage()); + + + validator.validate(detectedSample); + + + } + } + else if(detectorKey.as()=="datasetReader"){ + YAML::Node readerNamesKey=this->config.getNode("readerNames"); + //readerImplementationGT + GenericDatasetReaderPtr readerImp(new GenericDatasetReader(dataPath.as(),readerNamesKey.as(), reader.as(), true)); + + + DetectionsValidator validator(outputPath.as(),1.5); + + int maxElements = -1; + Sample sample; + int counter=0; + int skipSamples=10; + std::random_device rd; // obtain a random number from hardware + std::mt19937 eng(rd()); // seed the generator + std::uniform_int_distribution<> distr(5, skipSamples); + + while (readerImp->getReader()->getNextSample(sample)) { + int samples_to_skip=distr(eng); + LOG(WARNING) << "Skipping. " << samples_to_skip << std::endl; + bool validSample=false; + for (size_t i = 0; i < samples_to_skip; i++){ + validSample=readerImp->getReader()->getNextSample(sample); + } + if (!validSample) + break; + + + counter++; + std::stringstream ss; + ss << counter << "/" << maxElements; + LOG(INFO) << "Processing [" + ss.str() + "]"; + + + + + validator.validate(sample); + + + } + + } + }; +}; + + + +int main (int argc, char* argv[]) +{ + + MyApp myApp(argc,argv); + myApp.process(); +} diff --git a/DetectionMetrics/Tools/AutoEvaluator/CMakeLists.txt b/DetectionMetrics/Tools/AutoEvaluator/CMakeLists.txt new file mode 100644 index 00000000..6389b7ac --- /dev/null +++ b/DetectionMetrics/Tools/AutoEvaluator/CMakeLists.txt @@ -0,0 +1,38 @@ +IF (DARKNET_ACTIVE) + SET(EXTRA_LIBS ${EXTRA_LIBS} + ${DARKNET_LIBRARIES} + ${CUDA_LIBRARIES} + ${CUDA_LIBRARIES} + ${CUDA_CUBLAS_LIBRARIES} + ${CUDA_curand_LIBRARY}) + + include_directories(${DARKNET_INCLUDE_DIR}) +ENDIF() + + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${PYTHON_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (autoEvaluator autoEvaluator.cpp) + +TARGET_LINK_LIBRARIES(autoEvaluator + DetectionMetrics + ${EXTRA_LIBS} + ${QT_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/AutoEvaluator/autoEvaluator.cpp b/DetectionMetrics/Tools/AutoEvaluator/autoEvaluator.cpp new file mode 100644 index 00000000..75f5fca4 --- /dev/null +++ b/DetectionMetrics/Tools/AutoEvaluator/autoEvaluator.cpp @@ -0,0 +1,182 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("Datasets.inputPath"); + this->requiredArguments.push_back("Datasets.readerImplementation"); + this->requiredArguments.push_back("Datasets.readerNames"); + this->requiredArguments.push_back("Inferencers.inferencerImplementation"); + this->requiredArguments.push_back("Inferencers.inferencerConfig"); + this->requiredArguments.push_back("Inferencers.inferencerWeights"); + this->requiredArguments.push_back("Inferencers.inferencerNames"); + this->requiredArguments.push_back("Inferencers.iouType"); + this->requiredArguments.push_back("outputCSVPath"); + + + /*this->requiredArguments.push_back("readerImplementationGT"); + this->requiredArguments.push_back("readerImplementationDetection"); + this->requiredArguments.push_back("readerNames");*/ + }; + + void operator()(){ + YAML::Node datasetsNode=this->config.getNode("Datasets"); + YAML::Node inferencersNode=this->config.getNode("Inferencers"); + /*YAML::Node readerImplementationNode = this->config.getNode("readerImplementation"); + YAML::Node infererImplementationNode = this->config.getNode("inferencerImplementation"); + YAML::Node inferencerConfigNode = this->config.getNode("inferencerConfig"); + YAML::Node inferencerWeightsNode = this->config.getNode("inferencerWeights"); + YAML::Node inferencerNamesNode = this->config.getNode("inferencerNames"); + YAML::Node readerNamesNode = this->config.getNode("readerNames"); + */ + YAML::Node outputCSVNode = this->config.getNode("outputCSVPath"); + + if (outputCSVNode.IsSequence()) + throw std::invalid_argument("Provided 'outputCSVPath' must be a single Directory, not multiple"); + + auto boostPath= boost::filesystem::path(outputCSVNode.as()); + if (boost::filesystem::exists(boostPath)) { + if (!boost::filesystem::is_directory(boostPath)) { + throw std::invalid_argument("Provided 'outputCSVPath' must be a Directory, not a file"); + } + } else { + boost::filesystem::create_directories(boostPath); + } + + + /*std::vector inputPaths = inputPathsNode.IsSequence() + ? inputPathsNode.as>() + : std::vector(1, inputPathsNode.as()); + + std::vector inferencerWeights = inferencerWeightsNode.IsSequence() + ? inferencerWeightsNode.as>() + : std::vector(1, inferencerWeightsNode.as()); + + */ + GenericDatasetReaderPtr reader; + + int count = 0; + + for (auto it = datasetsNode.begin(); it != datasetsNode.end(); it++) { + + + if(!((*it)["inputPath"] && (*it)["readerNames"] && (*it)["readerImplementation"])) + throw std::invalid_argument("Invalid Config file, Error Detected in Datasets Configuration"); + + + std::string inputPath = (*it)["inputPath"].as(); + + std::string readerNames = (*it)["readerNames"].as(); + + std::string readerImplementation = (*it)["readerImplementation"].as(); + + + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPath, readerNames, readerImplementation, true)); + + + int count2 = 0; + + std::string mywriterFile(outputCSVNode.as() + "/Dataset" + std::to_string(++count) + ".csv" ); + + StatsWriter writer(reader->getReader(), mywriterFile); + + + for (auto iter = inferencersNode.begin(); iter != inferencersNode.end(); iter++) { + + + + DatasetReaderPtr readerDetection ( new DatasetReader(true) ); + + if(!((*iter)["inferencerConfig"] && (*iter)["inferencerNames"] && (*iter)["inferencerImplementation"])) + throw std::invalid_argument("Invalid Config file, Error Detected in Datasets Configuration"); + + std::string inferencerConfig = (*iter)["inferencerConfig"].as(); + + std::string inferencerNames = (*iter)["inferencerNames"].as(); + + std::string inferencerWeights = (*iter)["inferencerWeights"].as(); + + std::string inferencerImplementation = (*iter)["inferencerImplementation"].as(); + + std::string inferencerIouType = (*iter)["iouType"].as(); + + bool isIouTypeBbox; + + if (inferencerIouType == "segm" || inferencerIouType == "bbox") { + isIouTypeBbox = inferencerIouType == "bbox"; + } else { + throw std::invalid_argument("Evaluation iouType can either be 'segm' or 'bbox'\n"); + } + + bool useDepth = (*iter)["useDepth"] ? (*iter)["useDepth"].as() : false; + + reader->getReader()->resetReaderCounter(); + + GenericInferencerPtr inferencer(new GenericInferencer(inferencerConfig, inferencerWeights, inferencerNames, inferencerImplementation)); + MassInferencer massInferencer(reader->getReader(),inferencer->getInferencer(), false); + massInferencer.process(useDepth, readerDetection); + + /*std::vector::iterator iter; + std::cout << samples.size() << '\n'; + + for(iter = samples.begin(); iter != samples.end(); iter++) { + RectRegionsPtr myrectregions = iter->getRectRegions(); + std::vector vec_regions = myrectregions->getRegions(); + + for (auto it = vec_regions.begin(), end= vec_regions.end(); it != end; ++it){ + std::cout << "ClassID: " << it->classID.c_str() << '\n'; + + } + + }*/ + + reader->getReader()->resetReaderCounter(); + + //GenericDatasetReaderPtr readerGT(new GenericDatasetReader(inputPathGT.as(),readerNamesNode.as(), readerImplementationGTNode.as())); + + DetectionsEvaluatorPtr evaluator(new DetectionsEvaluator(reader->getReader(),readerDetection,true)); + + evaluator->evaluate(isIouTypeBbox); + evaluator->accumulateResults(); + /*Extract weights name with folder*/ + std::string path = inferencerWeights; + std::size_t a = path.find_last_of("/"); + std::size_t b = path.substr(0, a).find_last_of("/"); + a = path.find_last_of("."); + + writer.writeInferencerResults(path.substr(b + 1, a - (b+1)), evaluator,massInferencer.getInferencer()->getMeanDurationTime()); + + + count2++; + + } + + writer.saveFile(); + + count++; + + + } + + }; +}; + + + +int main (int argc, char* argv[]) { + + MyApp myApp(argc,argv); + myApp.process(); + + LOG(INFO) << "Auto Evaluation Successfull \n" ; +} diff --git a/DetectionMetrics/Tools/CMakeLists.txt b/DetectionMetrics/Tools/CMakeLists.txt new file mode 100644 index 00000000..de398a22 --- /dev/null +++ b/DetectionMetrics/Tools/CMakeLists.txt @@ -0,0 +1,6 @@ +add_subdirectory(AutoEvaluator) +add_subdirectory(Converter) +add_subdirectory(Detector) +add_subdirectory(Evaluator) +add_subdirectory(Splitter) +add_subdirectory(Viewer) diff --git a/DetectionMetrics/Tools/Converter/CMakeLists.txt b/DetectionMetrics/Tools/Converter/CMakeLists.txt new file mode 100644 index 00000000..80a54c36 --- /dev/null +++ b/DetectionMetrics/Tools/Converter/CMakeLists.txt @@ -0,0 +1,26 @@ + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${config_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (converter converter.cpp) + + +TARGET_LINK_LIBRARIES(converter + DetectionMetrics + ${EXTRA_LIBS} + ${QT_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/Converter/converter.cpp b/DetectionMetrics/Tools/Converter/converter.cpp new file mode 100644 index 00000000..098272a3 --- /dev/null +++ b/DetectionMetrics/Tools/Converter/converter.cpp @@ -0,0 +1,110 @@ +// +// Created by frivas on 21/01/17. +// + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include + + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("inputPath"); + this->requiredArguments.push_back("outputPath"); + this->requiredArguments.push_back("readerImplementation"); + this->requiredArguments.push_back("writerImplementation"); + this->requiredArguments.push_back("readerNames"); + this->requiredArguments.push_back("writeImages"); + + + }; + void operator()(){ + YAML::Node inputPathNode=this->config.getNode("inputPath"); + YAML::Node readerImplementationNode = this->config.getNode("readerImplementation"); + YAML::Node writerImplementationNode = this->config.getNode("writerImplementation"); + YAML::Node outputPathNode = this->config.getNode("outputPath"); + YAML::Node readerNamesNode = this->config.getNode("readerNames"); + YAML::Node writeImages = this->config.getNode("writeImages"); + + GenericDatasetReaderPtr reader; + if (inputPathNode.IsSequence()) { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as>(), readerNamesNode.as(), readerImplementationNode.as(), writeImages.as())); + } + else { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as(),readerNamesNode.as(), readerImplementationNode.as(), writeImages.as())); + } + + auto readerPtr = reader->getReader(); + +// std::vector idsToFilter; +// idsToFilter.push_back("person"); +// idsToFilter.push_back("person-falling"); +// idsToFilter.push_back("person-fall"); +// readerPtr->filterSamplesByID(idsToFilter); +// readerPtr->printDatasetStats(); + + + GenericDatasetWriterPtr writer( new GenericDatasetWriter(outputPathNode.as(),readerPtr,writerImplementationNode.as())); + writer->getWriter()->process(writeImages.as()); + }; +}; + +int main (int argc, char* argv[]) +{ + + MyApp myApp(argc,argv); + myApp.process(); +} + +/*void extractPersonsFromYolo(const std::string& dataSetPath){ + YoloDatasetReader reader(dataSetPath); + + std::vector idsToFilter; + idsToFilter.push_back("person"); + + + std::cout << "Samples before: " << reader.getNumberOfElements() << std::endl; + reader.filterSamplesByID(idsToFilter); + std::cout << "Samples after: " << reader.getNumberOfElements() << std::endl; + YoloDatasetWriter converter("converter_output", reader); + converter.process(true); +} + + + +int main (int argc, char* argv[]) { + + ViewerAguments args; + parse_arguments(argc,argv,args); + + + Logger::getInstance()->setLevel(Logger::INFO); + Logger::getInstance()->info("Reviewing " + args.path); + + + extractPersonsFromYolo(args.path); + + OwnDatasetReader reader(args.path); + + YoloDatasetWriter converter("converter_output", reader); + converter.process(true); +} +*/ diff --git a/DetectionMetrics/Tools/Detector/CMakeLists.txt b/DetectionMetrics/Tools/Detector/CMakeLists.txt new file mode 100644 index 00000000..cfa7ceba --- /dev/null +++ b/DetectionMetrics/Tools/Detector/CMakeLists.txt @@ -0,0 +1,27 @@ + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${PYTHON_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (detector detector.cpp) + +TARGET_LINK_LIBRARIES(detector + DetectionMetrics + ${EXTRA_LIBS} + ${QT_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${PYTHON_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/Detector/detector.cpp b/DetectionMetrics/Tools/Detector/detector.cpp new file mode 100644 index 00000000..757dbaaf --- /dev/null +++ b/DetectionMetrics/Tools/Detector/detector.cpp @@ -0,0 +1,61 @@ +// +// Created by frivas on 1/02/17. +// + + +#include +#include +#include +#include +#include +#include + + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("inputPath"); + this->requiredArguments.push_back("outputPath"); + this->requiredArguments.push_back("readerImplementation"); + this->requiredArguments.push_back("inferencerImplementation"); + this->requiredArguments.push_back("inferencerConfig"); + this->requiredArguments.push_back("inferencerWeights"); + this->requiredArguments.push_back("inferencerNames"); + this->requiredArguments.push_back("readerNames"); + }; + void operator()(){ + YAML::Node inputPath=this->config.getNode("inputPath"); + YAML::Node outputPath=this->config.getNode("outputPath"); + + YAML::Node readerImplementationKey = this->config.getNode("readerImplementation"); + YAML::Node infererImplementationKey = this->config.getNode("inferencerImplementation"); + YAML::Node inferencerConfigKey = this->config.getNode("inferencerConfig"); + YAML::Node inferencerWeightsKey = this->config.getNode("inferencerWeights"); + YAML::Node inferencerNamesKey = this->config.getNode("inferencerNames"); + YAML::Node readerNamesKey = this->config.getNode("readerNames"); + + GenericDatasetReaderPtr reader; + if (inputPath.IsSequence()) { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPath.as>(),readerNamesKey.as(), readerImplementationKey.as(), true)); + } + else { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPath.as(),readerNamesKey.as(), readerImplementationKey.as(), true)); + } + + + GenericInferencerPtr inferencer(new GenericInferencer(inferencerConfigKey.as(),inferencerWeightsKey.as(),inferencerNamesKey.as(),infererImplementationKey.as())); + MassInferencer massInferencer(reader->getReader(),inferencer->getInferencer(),outputPath.as(), true); + massInferencer.process(false); + + }; +}; + + + +int main (int argc, char* argv[]) { + + MyApp myApp(argc,argv); + myApp.process(); +} diff --git a/DetectionMetrics/Tools/Evaluator/CMakeLists.txt b/DetectionMetrics/Tools/Evaluator/CMakeLists.txt new file mode 100644 index 00000000..04d3c951 --- /dev/null +++ b/DetectionMetrics/Tools/Evaluator/CMakeLists.txt @@ -0,0 +1,25 @@ + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (evaluator evaluator.cpp) + +TARGET_LINK_LIBRARIES(evaluator + DetectionMetrics + ${EXTRA_LIBS} + ${OpenCV_LIBRARIES} + ${QT_LIBRARIES} + ${Boost_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/Evaluator/evaluator.cpp b/DetectionMetrics/Tools/Evaluator/evaluator.cpp new file mode 100644 index 00000000..2a5f51ca --- /dev/null +++ b/DetectionMetrics/Tools/Evaluator/evaluator.cpp @@ -0,0 +1,75 @@ +// +// Created by frivas on 1/02/17. +// + + +#include +#include +#include +#include +#include +#include + + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("outputPath"); + this->requiredArguments.push_back("inputPathGT"); + this->requiredArguments.push_back("inputPathDetection"); + this->requiredArguments.push_back("readerImplementationGT"); + this->requiredArguments.push_back("readerImplementationDetection"); + this->requiredArguments.push_back("readerNames"); + this->requiredArguments.push_back("iouType"); + + + }; + void operator()(){ + YAML::Node outputPath=this->config.getNode("outputPath"); + YAML::Node inputPathGT=this->config.getNode("inputPathGT"); + YAML::Node inputPathDetection=this->config.getNode("inputPathDetection"); + YAML::Node readerImplementationGTKey=this->config.getNode("readerImplementationGT"); + YAML::Node readerImplementationDetectionKey=this->config.getNode("readerImplementationDetection"); + YAML::Node readerNamesKey=this->config.getNode("readerNames"); + std::string iouType = this->config.asString("iouType"); + + + GenericDatasetReaderPtr readerGT(new GenericDatasetReader(inputPathGT.as(),readerNamesKey.as(), readerImplementationGTKey.as(), false)); + GenericDatasetReaderPtr readerDetection(new GenericDatasetReader(inputPathDetection.as(),readerNamesKey.as(), readerImplementationDetectionKey.as(), false)); + + + DetectionsEvaluatorPtr evaluator(new DetectionsEvaluator(readerGT->getReader(),readerDetection->getReader(),true)); + //todo ñapa + + bool isIouTypeBbox; + + if (iouType == "segm" || iouType == "bbox") { + isIouTypeBbox = iouType == "bbox"; + } else { + throw std::invalid_argument("Evaluation iouType can either be 'segm' or 'bbox'\n"); + } + + evaluator->evaluate(isIouTypeBbox); + evaluator->accumulateResults(); + + + std::string mywriterFile("evaluation_results.csv"); + + StatsWriter writer(readerGT->getReader(), mywriterFile); + + writer.writeInferencerResults("Detection Dataset", evaluator); + + writer.saveFile(); + + + }; +}; + + + + + +int main (int argc, char* argv[]) { + MyApp myApp(argc,argv); + myApp.process(); +} diff --git a/DetectionMetrics/Tools/Splitter/CMakeLists.txt b/DetectionMetrics/Tools/Splitter/CMakeLists.txt new file mode 100644 index 00000000..8bed6767 --- /dev/null +++ b/DetectionMetrics/Tools/Splitter/CMakeLists.txt @@ -0,0 +1,25 @@ + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (splitter splitter.cpp) + +TARGET_LINK_LIBRARIES(splitter + DetectionMetrics + ${EXTRA_LIBS} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${QT_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/Splitter/splitter.cpp b/DetectionMetrics/Tools/Splitter/splitter.cpp new file mode 100644 index 00000000..de67d0cb --- /dev/null +++ b/DetectionMetrics/Tools/Splitter/splitter.cpp @@ -0,0 +1,105 @@ +// +// Created by frivas on 5/02/17. +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("inputPath"); + this->requiredArguments.push_back("readerImplementation"); + this->requiredArguments.push_back("writerImplementation"); + this->requiredArguments.push_back("outputPath"); + this->requiredArguments.push_back("trainRatio"); + this->requiredArguments.push_back("readerNames"); + + + + }; + void operator()(){ + YAML::Node inputPathNode=this->config.getNode("inputPath"); + YAML::Node outputPathNode=this->config.getNode("outputPath"); + YAML::Node readerImplementationNode = this->config.getNode("readerImplementation"); + YAML::Node writerImplementationNode = this->config.getNode("writerImplementation"); + YAML::Node trainRatioNode = this->config.getNode("trainRatio"); + YAML::Node readerNamesNode = this->config.getNode("readerNames"); + + + + std::string trainPath=outputPathNode.as() + "/train"; + std::string testPath=outputPathNode.as() + "/test"; + + + GenericDatasetReaderPtr reader; + if (inputPathNode.IsSequence()) { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as>(),readerNamesNode.as(), readerImplementationNode.as(), true)); + } + else { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as(),readerNamesNode.as(), + readerImplementationNode.as(), true)); + } + + + auto readerPtr = reader->getReader(); + + std::vector idsToFilter; + idsToFilter.push_back("person"); + idsToFilter.push_back("person-falling"); + idsToFilter.push_back("person-fall"); + readerPtr->filterSamplesByID(idsToFilter); + readerPtr->printDatasetStats(); + + + + + DatasetReaderPtr readerTest(new DatasetReader(true)); + DatasetReaderPtr readerTrain(new DatasetReader(true)); + + + double ratio=trainRatioNode.as(); + + Sample sample; + int counter=0; + while (readerPtr->getNextSample(sample)){ + if (counter < ratio){ + readerTrain->addSample(sample); + } + else{ + readerTest->addSample(sample); + } + counter++; + counter= counter % 10; + } + + LOG(INFO) << "Train: " << '\n'; + readerTrain->printDatasetStats(); + LOG(INFO) << "Test: " << '\n'; + readerTest->printDatasetStats(); + + + GenericDatasetWriterPtr writerTest(new GenericDatasetWriter(testPath,readerTest,writerImplementationNode.as())); + writerTest->getWriter()->process(); + + GenericDatasetWriterPtr writerTrain(new GenericDatasetWriter(trainPath,readerTrain,writerImplementationNode.as())); + writerTrain->getWriter()->process(); + + }; +}; + +int main (int argc, char* argv[]) { + + MyApp myApp(argc, argv); + myApp.process(); +} diff --git a/DetectionMetrics/Tools/Viewer/CMakeLists.txt b/DetectionMetrics/Tools/Viewer/CMakeLists.txt new file mode 100644 index 00000000..083dec6c --- /dev/null +++ b/DetectionMetrics/Tools/Viewer/CMakeLists.txt @@ -0,0 +1,25 @@ + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${Boost_INCLUDE_DIR} + ${config_INCLUDE_DIRS} + ${CMAKE_CURRENT_LIST_DIR} + ${QT_INCLUDE_DIRS} + ${DetectionMetrics_INCLUDE_DIR} + +) + +add_executable (viewer viewer.cpp) + +TARGET_LINK_LIBRARIES(viewer + DetectionMetrics + ${EXTRA_LIBS} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} + ${GLOG_LIBRARIES} + ${Ice_LIBRARIES} + ${QT_LIBRARIES} + ${PYTHON_LIBRARIES} + ssl + crypto + ) diff --git a/DetectionMetrics/Tools/Viewer/viewer.cpp b/DetectionMetrics/Tools/Viewer/viewer.cpp new file mode 100644 index 00000000..c52c81bc --- /dev/null +++ b/DetectionMetrics/Tools/Viewer/viewer.cpp @@ -0,0 +1,55 @@ +// +// Created by frivas on 21/01/17. +// + + +#include +#include +#include +#include +#include + +class MyApp:public SampleGenerationApp{ +public: + MyApp(int argc, char* argv[]):SampleGenerationApp(argc,argv){ + this->requiredArguments.push_back("inputPath"); + this->requiredArguments.push_back("readerImplementation"); + this->requiredArguments.push_back("readerNames"); + + + }; + void operator()(){ + YAML::Node inputPathNode=this->config.getNode("inputPath"); + YAML::Node readerImplementationNode = this->config.getNode("readerImplementation"); + YAML::Node readerNamesNode = this->config.getNode("readerNames"); + + + GenericDatasetReaderPtr reader; + if (inputPathNode.IsSequence()) { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as>(),readerNamesNode.as(), readerImplementationNode.as(), true)); + } + else { + reader = GenericDatasetReaderPtr( + new GenericDatasetReader(inputPathNode.as(),readerNamesNode.as(), readerImplementationNode.as(), true)); + } + + + + Sample sample; + while (reader->getReader()->getNextSample(sample)){ + LOG(INFO) << "number of elements: " << sample.getRectRegions()->getRegions().size() << std::endl; + cv::Mat image =sample.getSampledColorImage(); + cv::imshow("Viewer", image); + cv::waitKey(0); + } + + }; +}; + +int main (int argc, char* argv[]) +{ + + MyApp myApp(argc,argv); + myApp.process(); +} diff --git a/DetectionMetrics/libs/CMakeLists.txt b/DetectionMetrics/libs/CMakeLists.txt new file mode 100644 index 00000000..3673c32d --- /dev/null +++ b/DetectionMetrics/libs/CMakeLists.txt @@ -0,0 +1,6 @@ +add_subdirectory(interfaces) +add_subdirectory(types) +add_subdirectory(config) +add_subdirectory(utils) +add_subdirectory(depthLib) +add_subdirectory(comm) diff --git a/DetectionMetrics/libs/comm/CMakeLists.txt b/DetectionMetrics/libs/comm/CMakeLists.txt new file mode 100644 index 00000000..345e8d80 --- /dev/null +++ b/DetectionMetrics/libs/comm/CMakeLists.txt @@ -0,0 +1,95 @@ +cmake_minimum_required(VERSION 2.8) +if(roscpp_FOUND) + MESSAGE("ROSCPP found") + SET(JDEROBOTCOM_ROS ON) +ELSE() + SET(JDEROBOTCOM_ROS OFF) +endif() + + +project(comm) + + +IF (JDEROBOTCOM_ROS) + catkin_package( + CATKIN_DEPENDS roscpp std_msgs message_runtime + ) +ENDIF() + +set(binname ${PROJECT_NAME}) + +### Project config +include_directories( + include + ${jderobottypes_INCLUDE_DIRS} + ${Ice_INCLUDE_DIR} + ${OpenCV_INCLUDE_DIRS} + ${DetectionMetrics_LIBS_DIR} + ${ros_INCLUDE_DIRS} + ${INTERFACES_CPP_DIR} + ${config_INCLUDE_DIRS} + ${utils_INCLUDE_DIRS} +) + + +set(HEADERS + include/comm/communicator.hpp + include/comm/tools.hpp + include/comm/cameraClient.hpp + include/comm/interfaces/cameraClient.hpp +) + +set(SOURCES + src/communicator.cpp + src/tools.cpp + src/cameraClient.cpp +) + + +IF (ZeroCIce_FOUND) + set(HEADERS + ${HEADERS} + include/comm/ice/cameraIceClient.hpp + ) + set(SOURCES + ${SOURCES} + src/ice/cameraIceClient.cpp + ) +ENDIF() + +IF(JDEROBOTCOM_ROS) + set(HEADERS + ${HEADERS} + include/comm/ros/translators.hpp + include/comm/ros/listenerCamera.hpp + ) + + set(SOURCES + ${SOURCES} + src/ros/translators.cpp + src/ros/listenerCamera.cpp + ) + +ENDIF() + +## Adding shared library for common usage +add_library(${PROJECT_NAME} SHARED ${SOURCES} ${HEADERS}) + +#add_dependencies(${PROJECT_NAME} ${config_LIBRARIES}) + +target_link_libraries(${PROJECT_NAME} + ${ZeroCIce_LIBRARIES} + ${Boost_LIBRARIES} + ${ros_LIBRARIES} + ${GLOG_LIBRARIES} + colorspacesmm + config + DetectionMetricsUtils + ) + + +## Export library variables (like find_package) +set(${PROJECT_NAME}_FOUND 1 CACHE BOOL "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARY_DIRS "${CMAKE_CURRENT_BINARY_DIR}" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARIES "${PROJECT_NAME}" CACHE STRINGS "Find(${PROJECT_NAME})") diff --git a/DetectionMetrics/libs/comm/include/comm/cameraClient.hpp b/DetectionMetrics/libs/comm/include/comm/cameraClient.hpp new file mode 100644 index 00000000..b3fd8675 --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/cameraClient.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (C) 1997-2016 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_CAMERACLIENT_H +#define JDEROBOTCOMM_CAMERACLIENT_H + +#include +#include +#include +#include +#include +#ifdef ICE +#include +#include +#include +#endif +#ifdef JDERROS +#include +#endif + + + + + +namespace Comm { + + /** + * @brief make a CameraClient using propierties + * + * + * @param communicator that contains properties + * @param prefix of client Propierties (example: "carViz.Camera") + * + * + * @return null if propierties are wrong + */ + CameraClient* getCameraClient(Comm::Communicator* jdrc, std::string prefix); + + +} //NS + +#endif // JDEROBOTCOMM_CAMERACLIENT_H diff --git a/DetectionMetrics/libs/comm/include/comm/communicator.hpp b/DetectionMetrics/libs/comm/include/comm/communicator.hpp new file mode 100644 index 00000000..4b9fb79e --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/communicator.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_COMMUNICATOR_H +#define JDEROBOTCOMM_COMMUNICATOR_H + +#ifdef ICE +#include +#include +#endif +#include +#include + + +namespace Comm { + + class Communicator { + public: + Communicator(Config::Properties config); + ~Communicator(); + + Config::Properties getConfig(); +#ifdef ICE + Ice::CommunicatorPtr getIceComm(); +#endif + + private: + Config::Properties config; +#ifdef ICE + Ice::CommunicatorPtr ic; +#endif + }; + + +} //NS +#endif // JDEROBOTCOMM_COMMUNICATOR_H diff --git a/DetectionMetrics/libs/comm/include/comm/ice/cameraIceClient.hpp b/DetectionMetrics/libs/comm/include/comm/ice/cameraIceClient.hpp new file mode 100644 index 00000000..a185ff2a --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/ice/cameraIceClient.hpp @@ -0,0 +1,75 @@ +/* + * Copyright (C) 1997-2013 JDE Developers TeamkinectViewer.camRGB + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Author : Jose María Cañas + Francisco Miguel Rivas Montero + + */ + +#ifndef JDEROBOTCOMM_CAMERAICECLIENT_H_ +#define JDEROBOTCOMM_CAMERAICECLIENT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Comm { + +class CameraIceClient: public IceUtil::Thread, public Comm::CameraClient { +public: + CameraIceClient(Comm::Communicator* jdrc, std::string prefix); + virtual ~CameraIceClient(); + virtual void run(); + + //callbacks + virtual JdeRobotTypes::Image getImage(); + virtual int getRefreshRate(); + + + void pause(); + void resume(); + void reset(); + void stop_thread(); + bool getPause(){return pauseStatus;}; + + jderobot::ImageFormat getImageFormat(); + void setImageFormat (std::string format); + + +private: + jderobot::CameraPrx prx; + long long int cycle; + + IceUtil::Mutex controlMutex; + std::string prefix; + + bool pauseStatus; + + IceUtil::Cond semWait; + std::string mImageFormat; + +}; + +} /* namespace Comm */ +#endif /* JDEROBOTCOMM_CAMERAICECLIENT_H_ */ diff --git a/DetectionMetrics/libs/comm/include/comm/interfaces/cameraClient.hpp b/DetectionMetrics/libs/comm/include/comm/interfaces/cameraClient.hpp new file mode 100644 index 00000000..8ea683ad --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/interfaces/cameraClient.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (C) 1997-2016 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_CAMERACLIENT_INTERFACE_H +#define JDEROBOTCOMM_CAMERACLIENT_INTERFACE_H + +#include + + +namespace Comm { + + class CameraClient { + public: + virtual JdeRobotTypes::Image getImage() = 0; + virtual int getRefreshRate() = 0; + bool on = false; + protected: + JdeRobotTypes::Image image; + int refreshRate; + }; + +} //NS + +#endif // JDEROBOTCOMM_CAMERACLIENT_INTERFACE_H diff --git a/DetectionMetrics/libs/comm/include/comm/ros/listenerCamera.hpp b/DetectionMetrics/libs/comm/include/comm/ros/listenerCamera.hpp new file mode 100644 index 00000000..866902e1 --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/ros/listenerCamera.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (C) 1997-2016 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_LISTENERCAMERA_H_ +#define JDEROBOTCOMM_LISTENERCAMERA_H_ + + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Comm { + class ListenerCamera: public Comm::CameraClient { + + public: + ListenerCamera(int argc, char** argv, std::string nodeName, std::string topic); + ~ListenerCamera(); + + void start(); + void stop(); + virtual JdeRobotTypes::Image getImage(); + virtual int getRefreshRate(); + + + private: + pthread_mutex_t mutex; + ros::Subscriber sub; + std::string topic; + std::string nodeName; + + int cont = 0; //used to count Frames per seconds + time_t timer; // used to save time for FPS + + ros::AsyncSpinner* spinner; + + + void imagecallback (const sensor_msgs::ImageConstPtr& image_msg); + + + + + + };//class + +} //NS +#endif /* JDEROBOTCOMM_LISTENERCAMERA_H_ */ diff --git a/DetectionMetrics/libs/comm/include/comm/ros/translators.hpp b/DetectionMetrics/libs/comm/include/comm/ros/translators.hpp new file mode 100644 index 00000000..261479ef --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/ros/translators.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 1997-2016 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_TRANSLATORSROS_H_ +#define JDEROBOTCOMM_TRANSLATORSROS_H_ + +#include +#include + +#include + +#include +#include +#include "image_transport/image_transport.h" +#include "cv_bridge/cv_bridge.h" +#include "sensor_msgs/image_encodings.h" + + +namespace Comm { + + /** + * @brief translate ROS Image messages to JdeRobot Image + * + * + * @param ROS Image Message + * + * + * @return Image translated from ROS Message + */ + JdeRobotTypes::Image translate_image_messages(const sensor_msgs::ImageConstPtr& image_msg); + + + /** + * @brief translate ROS images messages to JdeRobot Rgbd + * + * + * @param ROS Image Message + * @param ROS Image Message + * + * + * @return Rgbd translated from ROS Messages + */ + JdeRobotTypes::Rgbd translate_rgbd(const sensor_msgs::ImageConstPtr& rgb,const sensor_msgs::ImageConstPtr& d); + + /** + * @brief Translates from 32FC1 Image format to RGB. Inf values are represented by NaN, when converting to RGB, NaN passed to 0 + * + * + * @param ROS Image Message + * + * + * @return Image translated from ROS Message + */ + void depthToRGB(const cv::Mat& float_img, cv::Mat& rgb_img); + +} /* NS */ +#endif //JDEROBOTCOMM_TRANSLATORSROS_H_ diff --git a/DetectionMetrics/libs/comm/include/comm/tools.hpp b/DetectionMetrics/libs/comm/include/comm/tools.hpp new file mode 100644 index 00000000..f266857b --- /dev/null +++ b/DetectionMetrics/libs/comm/include/comm/tools.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (C) 1997-2018 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTCOMM_TOOLS_H +#define JDEROBOTCOMM_TOOLS_H + +#include + + +namespace Comm { + + int server2int (std::string server); + + +} //NS + +#endif // JDEROBOTCOMM_TOOLS_H diff --git a/DetectionMetrics/libs/comm/package.xml b/DetectionMetrics/libs/comm/package.xml new file mode 100644 index 00000000..8c098e26 --- /dev/null +++ b/DetectionMetrics/libs/comm/package.xml @@ -0,0 +1,66 @@ + + + comm + 0.0.1 + The jderobotcomm library + + + + + shady + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + roscpp + std_msgs + cv_bridge + image_transport + nav_msgs + geometry_msgs + kobuki_msgs + + message_runtime + roscpp + std_msgs + cv_bridge + image_transport + nav_msgs + geometry_msgs + kobuki_msgs + + + + + + + + \ No newline at end of file diff --git a/DetectionMetrics/libs/comm/src/cameraClient.cpp b/DetectionMetrics/libs/comm/src/cameraClient.cpp new file mode 100644 index 00000000..878f655d --- /dev/null +++ b/DetectionMetrics/libs/comm/src/cameraClient.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (C) 1997-2016 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ +#include +#include + +namespace Comm { + +CameraClient* +getCameraClient(Comm::Communicator* jdrc, std::string prefix){ + CameraClient* client = 0; + + int server; + std::string server_name = jdrc->getConfig().asString(prefix+".Server"); + server = server2int(server_name); + + switch (server){ + case 0: + { + LOG(ERROR) << "Camera disabled" << std::endl; + break; + } + case 1: + { + #ifdef ICE + LOG(INFO) << "Receiving Image from ICE interfaces" << std::endl; + CameraIceClient* cl; + cl = new CameraIceClient(jdrc, prefix); + cl->start(); + client = (Comm::CameraClient*) cl; + #else + throw "ERROR: ICE is not available"; + #endif + break; + } + case 2: + { + #ifdef JDERROS + LOG(INFO) << "Receiving Image from ROS messages" << std::endl; + std::string nodeName; + nodeName = jdrc->getConfig().asStringWithDefault(prefix+".Name", "LaserNode"); + std::string topic; + topic = jdrc->getConfig().asStringWithDefault(prefix+".Topic", ""); + ListenerCamera* lc; + lc = new ListenerCamera(0, nullptr, nodeName, topic); + lc->start(); + client = (Comm::CameraClient*) lc; + #else + throw "ERROR: ROS is not available"; + #endif + + break; + } + default: + { + std::cerr << "Wrong " + prefix+".Server property" << std::endl; + break; + } + + } + + return client; + + +} + +}//NS diff --git a/DetectionMetrics/libs/comm/src/communicator.cpp b/DetectionMetrics/libs/comm/src/communicator.cpp new file mode 100644 index 00000000..585a2315 --- /dev/null +++ b/DetectionMetrics/libs/comm/src/communicator.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ +#include + +namespace Comm { + +Communicator::Communicator(Config::Properties config){ + this->config = config; + #ifdef ICE + this->ic = Ice::initialize(); + #endif +} + + +Communicator::~Communicator(){ + #ifdef ICE + this->ic->destroy(); + #endif +} + + + +Config::Properties +Communicator::getConfig(){ + return this->config; +} + +#ifdef ICE +Ice::CommunicatorPtr +Communicator::getIceComm(){ + return this->ic; +} +#endif + +}//NS diff --git a/DetectionMetrics/libs/comm/src/ice/cameraIceClient.cpp b/DetectionMetrics/libs/comm/src/ice/cameraIceClient.cpp new file mode 100644 index 00000000..9f1f4d82 --- /dev/null +++ b/DetectionMetrics/libs/comm/src/ice/cameraIceClient.cpp @@ -0,0 +1,218 @@ +/* + * Copyright (C) 1997-2016 JDE Developers TeamkinectViewer.camRGB + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Author : Jose María Cañas + Francisco Miguel Rivas Montero + + */ + +#include +#include +#include + + +namespace Comm { + + +CameraIceClient::CameraIceClient(Comm::Communicator* jdrc, std::string prefix) { + + this->prefix=prefix; + Ice::ObjectPrx baseCamera; + this->refreshRate=0; + this->mImageFormat.empty(); + + + + float fps=jdrc->getConfig().asFloatWithDefault(prefix+".Fps", 30); + this->cycle=(1/fps)*1000000; + try{ + std::string proxy = jdrc->getConfig().asString(prefix+".Proxy"); + baseCamera = jdrc->getIceComm()->stringToProxy(proxy); + if (0==baseCamera){ + this->on = false; + throw prefix + "Could not create proxy with Camera"; + } + else { + this->prx= jderobot::CameraPrx::checkedCast(baseCamera); + this->on = true; + if (0==this->prx){ + this->on = false; + throw "Invalid " + prefix + ".Proxy"; + } + } + }catch (const Ice::Exception& ex) { + LOG(ERROR) << ex << std::endl; + } + catch (const char* msg) { + LOG(ERROR) << msg << std::endl; + LOG(ERROR) <getConfig().asStringWithDefault(prefix+".Format", "RGB8"); + + this->mImageFormat = CameraUtils::negotiateDefaultFormat(this->prx,definedFormat); + + jderobot::ImageDataPtr data = this->prx->getImageData(this->mImageFormat); + + this->pauseStatus=false; +} + + +CameraIceClient::~CameraIceClient() { + this->on=false; +} + + +jderobot::ImageFormat CameraIceClient::getImageFormat() +{ + return (this->prx->getImageFormat()); +} + +void CameraIceClient::setImageFormat (std::string format) +{ + mImageFormat = format; + + LOG(INFO) <<"Changed format " + this->mImageFormat + " for camera " + this->prx->getCameraDescription()->name; +}; + + +void CameraIceClient::reset(){ + this->prx->reset(); +} + +void CameraIceClient::pause(){ + this->pauseStatus=true; +} + +void CameraIceClient::resume(){ + this->controlMutex.lock(); + this->pauseStatus=false; + this->semWait.broadcast(); + this->controlMutex.unlock(); +} + + +void +CameraIceClient::run(){ + jderobot::ImageDataPtr dataPtr; + colorspaces::Image::FormatPtr fmt; + IceUtil::Time last; + + int iterIndex = 0; + int totalRefreshRate = 0; + int refrRate = 0; + + JdeRobotTypes::Image img; + + last=IceUtil::Time::now(); + while (this->on){ + + iterIndex ++; + if (pauseStatus){ + IceUtil::Mutex::Lock sync(this->controlMutex); + this->semWait.wait(sync); + } + + try{ + + + dataPtr = this->prx->getImageData(this->mImageFormat); + + + + + + // Putting image data + img.data = CameraUtils::getImageFromCameraProxy(dataPtr); + + img.format = dataPtr->description->format; + img.width = dataPtr->description->width; + img.height = dataPtr->description->height; + img.timeStamp = dataPtr->timeStamp.seconds + dataPtr->timeStamp.useconds * 1e-6; + + + + + } + catch(std::exception& e){ + LOG(WARNING) << prefix +"error during request (connection error): " << e.what() << std::endl; + usleep(50000); + + } + + int process = (IceUtil::Time::now().toMicroSeconds() - last.toMicroSeconds()); + + + + if (process > (int)cycle ){ + DLOG(WARNING) << "--------" + prefix + " adquisition timeout-"; + } + else{ + int delay = (int)cycle - process; + if (delay <1 || delay > (int)cycle) + delay = 1; + + usleep(delay); + } + + + int rate =(int)(1000000/(IceUtil::Time::now().toMicroSeconds() - last.toMicroSeconds())); + totalRefreshRate = totalRefreshRate + rate; + refrRate = totalRefreshRate / iterIndex; + last=IceUtil::Time::now(); + + if (iterIndex == INT_MAX) + { + iterIndex = 0; + DLOG(INFO) << "*** Counter reset"; + } + + this->controlMutex.lock(); + this->image = img; + this->refreshRate = refrRate; + this->controlMutex.unlock(); + + } + + this->image.data.release(); +} + +void CameraIceClient::stop_thread() +{ + this->on=false; +} + +JdeRobotTypes::Image CameraIceClient::getImage(){ + JdeRobotTypes::Image img; + + this->controlMutex.lock(); + img = this->image; + this->controlMutex.unlock(); + + return img; +} + +int CameraIceClient::getRefreshRate(){ + int rr; + this->controlMutex.lock(); + rr = this->refreshRate; + this->controlMutex.unlock(); + + return rr; +}; + +} /* namespace jderobot */ diff --git a/DetectionMetrics/libs/comm/src/ros/listenerCamera.cpp b/DetectionMetrics/libs/comm/src/ros/listenerCamera.cpp new file mode 100644 index 00000000..758a0646 --- /dev/null +++ b/DetectionMetrics/libs/comm/src/ros/listenerCamera.cpp @@ -0,0 +1,81 @@ +#include +#include +namespace Comm { + + ListenerCamera::ListenerCamera(int argc, char** argv, std::string nodeName, std::string topic){ + pthread_mutex_init(&mutex, NULL); + if ("" == topic){ + this->on = false; + std::cerr <<"Invalid camera topic" <on = true; + this->topic = topic; + this->nodeName = nodeName; + + const std::string name = std::string(this->nodeName); + + time(&timer); + int a = 0; + ros::init(a, nullptr, name); + ros::NodeHandle nh; + this->sub = nh.subscribe(this->topic, 1001, &ListenerCamera::imagecallback, this); + LOG(INFO) << "listen from "+ this->topic << std::endl; + + this->spinner = new ros::AsyncSpinner(1); + } + } + + + + ListenerCamera::~ListenerCamera(){ + this->stop(); + } + + void + ListenerCamera::start(){ + this->spinner->start(); + } + + void + ListenerCamera::stop(){ + this->spinner->stop(); + ros::shutdown(); + } + + void + ListenerCamera::imagecallback(const sensor_msgs::ImageConstPtr& image_msg){ + this->cont++; + time_t now; + time(&now); + pthread_mutex_lock(&mutex); + this->image = Comm::translate_image_messages(image_msg); + if (difftime(this->timer, now)>=1){ + this->refreshRate = this->cont; + this->cont = 0; + this->timer = now; + } + pthread_mutex_unlock(&mutex); + + } + + JdeRobotTypes::Image ListenerCamera::getImage(){ + JdeRobotTypes::Image img; + pthread_mutex_lock(&mutex); + img = this->image; + pthread_mutex_unlock(&mutex); + return img; + } + + int ListenerCamera::getRefreshRate(){ + + int rr; + pthread_mutex_lock(&mutex); + rr = this->refreshRate; + pthread_mutex_unlock(&mutex); + + return rr; + } + + + +}//NS diff --git a/DetectionMetrics/libs/comm/src/ros/translators.cpp b/DetectionMetrics/libs/comm/src/ros/translators.cpp new file mode 100644 index 00000000..6e3d487f --- /dev/null +++ b/DetectionMetrics/libs/comm/src/ros/translators.cpp @@ -0,0 +1,65 @@ +#include +namespace Comm { + + float PI = 3.1415; + + int MAXRANGEIMGD = 8; //max length received from imageD + + + + void + depthToRGB(const cv::Mat& float_img, cv::Mat& rgb_img, std::string type ){ + //Process images + cv::Mat mono8_img; + if (type.substr(type.length() - 3, 1) == "U"){ + mono8_img = float_img; + rgb_img = cv::Mat(float_img.size(), CV_8UC3); + }else{ + cv::Mat mono8_img = cv::Mat(float_img.size(), CV_8UC1); + if(rgb_img.rows != float_img.rows || rgb_img.cols != float_img.cols){ + rgb_img = cv::Mat(float_img.size(), CV_8UC3); + } + cv::convertScaleAbs(float_img, mono8_img, 255/MAXRANGEIMGD, 0.0); + } + + cv::cvtColor(mono8_img, rgb_img, CV_GRAY2RGB); + + } + + + JdeRobotTypes::Image + translate_image_messages(const sensor_msgs::ImageConstPtr& image_msg){ + JdeRobotTypes::Image img; + cv_bridge::CvImagePtr cv_ptr; + + img.timeStamp = image_msg->header.stamp.sec + (image_msg->header.stamp.nsec *1e-9); + img.format = "RGB8"; // we convert img_msg to RGB8 format + img.width = image_msg->width; + img.height = image_msg->height; + cv::Mat img_data; + + try { + + //std::cout << image_msg->encoding << std::endl; + //if (image_msg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)==0 || image_msg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)==0){ + + if (image_msg->encoding.substr(image_msg->encoding.length() - 2 ) == "C1"){ + cv_ptr = cv_bridge::toCvCopy(image_msg); + depthToRGB(cv_ptr->image, img_data, image_msg->encoding); + + + }else{ + cv_ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::RGB8); + img_data = cv_ptr->image; + } + } catch (cv_bridge::Exception& e) { + + ROS_ERROR("cv_bridge exception: %s", e.what()); + } + + img.data = img_data; + + return img; + } + +} /* NS */ diff --git a/DetectionMetrics/libs/comm/src/tools.cpp b/DetectionMetrics/libs/comm/src/tools.cpp new file mode 100644 index 00000000..5298f035 --- /dev/null +++ b/DetectionMetrics/libs/comm/src/tools.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ +#include + +namespace Comm { + int server2int(std::string server){ + std::transform(server.begin(), server.end(), server.begin(), ::tolower); + if(server == "ice" || server == "1"){ + return 1; + } + else if(server == "ros"|| server == "2"){ + return 2; + } + else return 0; + } + +}//NS diff --git a/DetectionMetrics/libs/config/CMakeLists.txt b/DetectionMetrics/libs/config/CMakeLists.txt new file mode 100644 index 00000000..ed3aa18e --- /dev/null +++ b/DetectionMetrics/libs/config/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 2.8) +project(config) + +### Project config +include_directories( + include + ${CMAKE_CURRENT_BINARY_DIR}/include +) + +set(HEADERS + include/config/config.h + include/config/loader.hpp + include/config/properties.hpp + include/config/stdutils.hpp +) + +set(SOURCES + src/loader.cpp + src/properties.cpp +) + + +add_library(${PROJECT_NAME} SHARED ${SOURCES} ${HEADERS}) +target_link_libraries(${PROJECT_NAME} ${YAML_CPP_LIBRARIES} ${GLOG_LIBRARIES}) + +## Export library variables (like find_package) +set(${PROJECT_NAME}_FOUND 1 CACHE BOOL "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARY_DIRS "${CMAKE_CURRENT_BINARY_DIR}" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARIES "${PROJECT_NAME}" CACHE STRINGS "Find(${PROJECT_NAME})") diff --git a/DetectionMetrics/libs/config/include/config/config.h b/DetectionMetrics/libs/config/include/config/config.h new file mode 100644 index 00000000..ba4ea025 --- /dev/null +++ b/DetectionMetrics/libs/config/include/config/config.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOT_CONFIG_CONFIG_H +#define JDEROBOT_CONFIG_CONFIG_H + +/** + * @mainpage Config + * JdeRobot COnfig library + * + * @author Aitor Martinez Fernandez , .aitor.martinez.fernandez@gmail.com + * @date September 2017 + * @version 0.9.0 (alpha) + */ + +#include +#include +#include +#include + +namespace Config{ + + +/** + * @brief loads propierties from a file + * + * + * @param filename + * + * + * @return config class with all properties + */ +inline Config::Properties load(int argc, char* argv[]){ + std::string filename (argv[1]); + return jderobotconfig::loader::load(filename); + } +inline Config::Properties load(YAML::Node node){ + return jderobotconfig::loader::load(node); + } +} //NS +#endif // JDEROBOT_CONFIG_CONFIG_H diff --git a/DetectionMetrics/libs/config/include/config/loader.hpp b/DetectionMetrics/libs/config/include/config/loader.hpp new file mode 100644 index 00000000..6c1b82bf --- /dev/null +++ b/DetectionMetrics/libs/config/include/config/loader.hpp @@ -0,0 +1,59 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOT_CONFIG_LOADER_H +#define JDEROBOT_CONFIG_LOADER_H + +#include +#include +#include +#include +#include +//#include +#include + + +namespace jderobotconfig { +namespace loader { + +const std::string CONFIG_PATH_NAME = "JDEROBOT_CONFIG_PATHS"; + + +/** + * @brief Find filename into all defined search paths. + * Order is: + * 1. current dir + * 2. jderobot paths (*) + * + * @return empty if file was not found. + */ +std::string findConfigFile(const std::string& filename); + +/** + * @brief Loads File configuration from passed file. + * + * @return new Config::Config or passed one. + */ +Config::Properties load(std::string filename); +Config::Properties load(YAML::Node node); +Config::Properties load(std::string filepath,bool isPath); +}}//NS + + +#endif // JDEROBOT_CONFIG_LOADER_H diff --git a/DetectionMetrics/libs/config/include/config/properties.hpp b/DetectionMetrics/libs/config/include/config/properties.hpp new file mode 100644 index 00000000..e9a96f3c --- /dev/null +++ b/DetectionMetrics/libs/config/include/config/properties.hpp @@ -0,0 +1,161 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOT_CONFIG_CLASS_H +#define JDEROBOT_CONFIG_CLASS_H + +/** + * @mainpage Config + * JdeRobot Config library + * + * @author Aitor Martinez Fernandez , .aitor.martinez.fernandez@gmail.com + * @date September 2017 + * @version 0.9.0 (alpha) + */ + +#include +#include +#include +#include +#include +#include +#include + +namespace Config{ + +class Properties { +public: + Properties(); + Properties(YAML::Node node); + //~Properties(); + + void showConfig(); + + void printNode(YAML::Node node, int nesting_level); + + bool keyExists(std::string element); + + bool NodeExists(YAML::Node n, std::vector names); + + YAML::Node getNode(std::string element); + /** + * @brief returns as string the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * + */ + std::string asString(std::string element); + + + /** + * @brief returns as string the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * @param default value + * + */ + std::string asStringWithDefault(std::string element, std::string dataDefault); + + /** + * @brief returns as float the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * + */ + float asFloat(std::string element); + + /** + * @brief returns as float the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * @param default value + * + */ + float asFloatWithDefault(std::string element, float dataDefault); + + /** + * @brief returns as integer the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * + */ + int asInt(std::string element); + + /** + * @brief returns as integer the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * @param default value + * + */ + int asIntWithDefault(std::string element, int dataDefault); + + /** + * @brief returns as double the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * + */ + double asDouble(std::string element); + + /** + * @brief returns as double the propery given + * + * @param route to element separated by dots (example: "carViz.Camera.proxy") + * @param default value + * + */ + double asDoubleWithDefault(std::string element, double dataDefault); + + + + YAML::Node getNode(); + + void SetProperty(std::string key,std::string value); + +private: + YAML::Node node; + + /** + * @brief makes recursively sear for element given in names + * + * + * @param yaml node in which search + * @param vector of elements names (route to element of last position of vector) + * + * + * @return yaml node of element + */ + YAML::Node searchNode(YAML::Node n, std::vector names); + +}; + + +/** + * @brief function to make printable config class + */ +inline +std::ostream& operator<< (std::ostream & out, Properties & data) { + out << data.getNode(); + return out ; +} + +}//NS + +#endif // JDEROBOT_CONFIG_CLASS_H diff --git a/DetectionMetrics/libs/config/include/config/stdutils.hpp b/DetectionMetrics/libs/config/include/config/stdutils.hpp new file mode 100644 index 00000000..860a31e7 --- /dev/null +++ b/DetectionMetrics/libs/config/include/config/stdutils.hpp @@ -0,0 +1,66 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Victor Arribas Raigadas <.varribas.urjc@gmail.com> + */ + +#ifndef STDUTILS_HPP +#define STDUTILS_HPP + +//// string wrapper for stdlib.h::getenv + +#include +#include + +inline +std::string getEnvironmentVariable(std::string var){ + char* _env = getenv(var.c_str()); + return std::string((_env)?_env:""); +} + + +//// Fallback std::split +/// source: http://stackoverflow.com/questions/5607589/right-way-to-split-an-stdstring-into-a-vectorstring + +#include +#include +#include + +namespace std { +inline +vector split(string str, string del){ + vector vstrings; + boost::split(vstrings, str, boost::is_any_of(del)); + return vstrings; +} +}//NS + + + +//// Check if file exists +/// For Linux works for files and directories +/// source: http://www.cplusplus.com/forum/general/1796/ +#include + +namespace std { +inline +bool fileexists(std::string filepath){ + ifstream ifile(filepath.c_str(), ios_base::in); + return ifile.is_open(); +} +}//NS + +#endif // STDUTILS_HPP diff --git a/DetectionMetrics/libs/config/src/loader.cpp b/DetectionMetrics/libs/config/src/loader.cpp new file mode 100644 index 00000000..cc629569 --- /dev/null +++ b/DetectionMetrics/libs/config/src/loader.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (C) 1997-2015 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Victor Arribas Raigadas <.varribas.urjc@gmail.com> + */ + + +#include +#include +namespace jderobotconfig{ +namespace loader{ + +std::string +findConfigFile(const std::string& filename){ + if (std::fileexists(filename)) + return filename; + + std::string path_holders[] = {getEnvironmentVariable(CONFIG_PATH_NAME)}; + + for (int i=0; i<2; i++){ + if (path_holders[i].empty()) continue; + for (std::string path : std::split(path_holders[i], ":")){ + if (path.empty()) continue; + std::string filepath(path+"/"+filename); + if (std::fileexists(filepath)) + return filepath; + } + } + + return ""; +} + +Config::Properties +load(std::string filename){ + std::string filepath = findConfigFile(filename); + // load_path(filepath); + if (filepath.empty()){ + YAML::Exception e(YAML::Mark(),"jderobot/config/loader.cpp: file " + filepath + " Not Found"); + throw e; + } + YAML::Node nodeConfig = YAML::LoadFile(filepath); + + Config::Properties config(nodeConfig); + LOG(INFO)<<"[Info] loaded YAML Config file: "<setProperty("Ice.Config", filepath); + return config; +} + +Config::Properties +load(std::string filepath,bool isPath){ + YAML::Node nodeConfig = YAML::LoadFile(filepath); + Config::Properties config(nodeConfig); + LOG(INFO)<<"[Info] loaded YAML Config file: "<setProperty("Ice.Config", filepath); + return config; +} + + +}}//NS diff --git a/DetectionMetrics/libs/config/src/properties.cpp b/DetectionMetrics/libs/config/src/properties.cpp new file mode 100644 index 00000000..910ea220 --- /dev/null +++ b/DetectionMetrics/libs/config/src/properties.cpp @@ -0,0 +1,230 @@ +/* + * Copyright (C) 1997-2017 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#include +#include +namespace Config{ + + +Properties::Properties(){ +} + +Properties::Properties(YAML::Node node){ + this->node = node; +} + +void +Properties::showConfig() { + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + + for (YAML::const_iterator it = this->node.begin(); it != this->node.end(); ++it){ + LOG(INFO) << it->first.as() << ": "; + printNode(it->second, 0); + LOG(INFO) << '\n'; + // it->second.as(); // can't do this until it's type is checked!! + } + + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + LOG(INFO) << "------------------------------------------------------------------" << std::endl; + +} + +void +Properties::printNode(YAML::Node node_passed, int nesting_level) { + switch (node_passed.Type()) { + case YAML::NodeType::Null: + return; + case YAML::NodeType::Scalar: + LOG(INFO) << node_passed.as() << '\n'; + break; + case YAML::NodeType::Sequence: + LOG(INFO) << '\n'; + for (YAML::const_iterator it = node_passed.begin(); it != node_passed.end(); ++it){ + LOG(INFO) << std::string(nesting_level, ' ') << "-" << '\n'; + printNode(*it, nesting_level + 2); + + // it->second.as(); // can't do this until it's type is checked!! + } + break; + case YAML::NodeType::Map: + for (YAML::const_iterator it = node_passed.begin(); it != node_passed.end(); ++it){ + LOG(INFO) << std::string(nesting_level, ' ') << it->first.as() << ": "; + printNode(it->second, nesting_level + 2); + // it->second.as(); // can't do this until it's type is checked!! + } + break; + case YAML::NodeType::Undefined: // ... + return; + } +} + +bool +Properties::keyExists(std::string element) { + std::vector v = std::split(element, "."); + + return this->NodeExists(this->node, v); + +} + +bool +Properties::NodeExists(YAML::Node n, std::vector names) { + YAML::Node nod = n[names[0]]; + names.erase(names.begin()); + + if (names.size() > 0) { + if (nod.IsSequence()) { + for (YAML::const_iterator it=nod.begin();it!=nod.end();++it) { + if (!this->NodeExists(*it, names)) + return false; + } + } else { + + return this->searchNode(nod, names); + } + + } else { + return nod ? true : false; + + } + + +} + +YAML::Node +Properties::getNode(std::string element) { + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + return nod; +} + +std::string +Properties::asString(std::string element){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + return nod.as(); +} + +std::string +Properties::asStringWithDefault(std::string element, std::string dataDefault){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + std::string data; + try{ + data = nod.as(); + }catch(YAML::BadConversion e){ + data = dataDefault; + } + return data; +} + +float +Properties::asFloat(std::string element){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + return nod.as(); +} + +float +Properties::asFloatWithDefault(std::string element, float dataDefault){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + float data; + try{ + data = nod.as(); + }catch(YAML::BadConversion e){ + data = dataDefault; + } + return data; +} + +int +Properties::asInt(std::string element){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + return nod.as(); +} + +int +Properties::asIntWithDefault(std::string element, int dataDefault){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + int data; + try{ + data = nod.as(); + }catch(YAML::BadConversion e){ + data = dataDefault; + } + return data; +} + +double +Properties::asDouble(std::string element){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + return nod.as(); +} + +double +Properties::asDoubleWithDefault(std::string element, double dataDefault){ + std::vector v = std::split(element, "."); + + YAML::Node nod = this->searchNode(this->node, v); + double data; + try{ + data = nod.as(); + }catch(YAML::BadConversion e){ + data = dataDefault; + } + return data; +} + +YAML::Node +Properties::getNode(){ + + return node; +} + + + +YAML::Node +Properties::searchNode(YAML::Node n, std::vector names){ + YAML::Node nod = n[names[0]]; + names.erase(names.begin()); + + if (names.size()>0){ + return this->searchNode(nod, names); + }else{ + return nod; + } +} + +void Properties::SetProperty(std::string key, std::string value){ + this->node[key] = value; +} + +}//NS diff --git a/DetectionMetrics/libs/depthLib/CMakeLists.txt b/DetectionMetrics/libs/depthLib/CMakeLists.txt new file mode 100644 index 00000000..869705bb --- /dev/null +++ b/DetectionMetrics/libs/depthLib/CMakeLists.txt @@ -0,0 +1,20 @@ +include_directories(${SLICE_DIR}) # Aquí se alojan las cabeceras de las interfaces ICE en C++ +include_directories(${INTERFACES_CPP_DIR}) # Aquí se alojan las cabeceras de las interfaces ICE en C++ +include_directories(${LIBS_DIR}) # Aquí se alojan las cabeceras de las interfaces ICE en C++ + +project(depthLib) + +include_directories( ${OpenCV_INCLUDE_DIRS}) + +ADD_LIBRARY (depthLib STATIC DepthFilter.cpp DepthFilter.h DepthSampler.cpp DepthSampler.h) +TARGET_LINK_LIBRARIES(depthLib ${Boost_LIBRARIES}) + +ADD_LIBRARY (depthLibshare SHARED DepthFilter.cpp DepthFilter.h DepthSampler.cpp DepthSampler.h) +TARGET_LINK_LIBRARIES(depthLibshare ${Boost_LIBRARIES} ${OpenCV_LIBRARIES} ${GLOG_LIBRARIES}) + + +## Export library variables (like find_package) +set(${PROJECT_NAME}_FOUND 1 CACHE BOOL "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARY_DIRS "${CMAKE_CURRENT_BINARY_DIR}" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARIES "${PROJECT_NAME}" CACHE STRINGS "Find(${PROJECT_NAME})") diff --git a/DetectionMetrics/libs/depthLib/DepthFilter.cpp b/DetectionMetrics/libs/depthLib/DepthFilter.cpp new file mode 100644 index 00000000..650a54e8 --- /dev/null +++ b/DetectionMetrics/libs/depthLib/DepthFilter.cpp @@ -0,0 +1,343 @@ +/* + * DepthFilter.cpp + * + * Created on: 04/01/2014 + * Author: frivas + */ + +#include +#include "DepthFilter.h" +#include + +namespace jderobot { + +DepthFilter::DepthFilter(){ + //default parameters + this->type=3; + this->buffer.resize(0); + this->buffSize=7; + this->erodeSize=0; + this->threshold=10; +} + +DepthFilter::DepthFilter(int type, int buffSize, int erodeSize, int threshold) { + this->type=type; + this->buffSize=buffSize; + this->buffer.resize(0); + this->erodeSize=erodeSize; + this->threshold=threshold; +} + +DepthFilter::~DepthFilter() { + // TODO Auto-generated destructor stub +} +//filtrado bilateral, elimina ruido y mantiene los bordes +void +DepthFilter::filterDakkak(cv::Mat imageIn, cv::Mat& imageOut){ + + int d=20; + double sigmaColor=21; + double sigmaSpace=3; + + std::vector layers; + cv::split(imageIn, layers); + cv::bilateralFilter(layers[0], imageOut, d,sigmaColor,sigmaSpace); + cv::cvtColor(imageOut,imageOut,cv::COLOR_GRAY2RGB); +} + +void +DepthFilter::filterMeanNonMotion3Channels(cv::Mat imageIn, cv::Mat& imageOut){ + cv::Mat localSource,meanNonZeroImage; + + cv::Mat zeros = cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(0,0,0)); + cv::Mat ones= cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(1,1,1)); + + imageIn.copyTo(localSource); + this->m.lock(); + /*for (int i=0; i< this->erodeSize;i++){ + cv::erode(localSource,localSource,cv::Mat()); + } + for (int i=0; i< this->erodeSize;i++){ + cv::dilate(localSource,localSource,cv::Mat()); + }*/ + + this->buffer.push_back(localSource); + while ((int)this->buffer.size()>this->buffSize) + this->buffer.pop_front(); + cv::Mat onesAcc; //acumulado de unos en al buffer + cv::Mat resultAcc; //resultado acumulado + + zeros.copyTo(imageOut); + zeros.copyTo(onesAcc); + zeros.copyTo(resultAcc); + + resultAcc.convertTo(resultAcc,CV_32FC3); + + for ( std::list::iterator it=this->buffer.begin(); it!= this->buffer.end();it++){ + cv::Mat localOnes; + + cv::Mat localItFloat; + it->convertTo(localItFloat,CV_32FC3); + resultAcc=resultAcc+localItFloat; + ones.copyTo(localOnes,*it); + onesAcc=onesAcc+localOnes; + } + onesAcc.convertTo(onesAcc,CV_32FC3); + resultAcc=resultAcc/onesAcc; + resultAcc.copyTo(meanNonZeroImage); + + + + zeros.copyTo(onesAcc); + zeros.copyTo(resultAcc); + resultAcc.convertTo(resultAcc,CV_32FC3); + cv::Mat accAndMask; + cv::Mat localDiff, localMask; + for ( std::list::iterator it=this->buffer.begin(); it!= this->buffer.end();it++){ + if (it==this->buffer.begin()) + it->copyTo(accAndMask); + + cv::Mat localOnes; + cv::Mat localItFloat; + it->convertTo(localItFloat,CV_32FC3); + cv::absdiff(localItFloat,meanNonZeroImage,localDiff); + localDiff.convertTo(localDiff,CV_8UC3); + cv::threshold(localDiff,localMask,this->threshold, 255, cv::THRESH_BINARY_INV); + resultAcc=resultAcc+localItFloat; + localMask.convertTo(localMask,CV_8UC3); + ones.copyTo(localOnes,localMask); + onesAcc=onesAcc+localOnes; + accAndMask=accAndMask & localMask; + } + onesAcc.convertTo(onesAcc,CV_32FC3); + resultAcc=resultAcc/onesAcc; + resultAcc.convertTo(resultAcc,CV_8UC3); + + resultAcc.copyTo(imageOut, accAndMask); + zeros.copyTo(ones, accAndMask); + + imageIn.copyTo(imageOut,ones); + + //copio la parte sin movimiento del promedio de imágenes y la parte de movimiento de la última captura + + + meanNonZeroImage.convertTo(this->globalMeanImage,CV_8UC3); + localDiff.convertTo(this->globalDiffImage,CV_8UC3); + + accAndMask.copyTo(this->globalFirstMask); + ones.copyTo(this->globalSecondMask); + this->m.unlock(); +} + +//filtra únicamente las zonas de la imagen donde no hay movimiento +void DepthFilter::filterMeanNonMotion1Channels(cv::Mat imageIn, cv::Mat& imageOut){ + cv::Mat localSource,meanNonZeroImage; + + cv::Mat zeros3c = cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(0,0,0)); + cv::Mat ones3c= cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(1,1,1)); + cv::Mat zeros1c = cv::Mat(imageIn.rows,imageIn.cols, CV_8UC1, cv::Scalar(0)); + cv::Mat ones1c= cv::Mat(imageIn.rows,imageIn.cols, CV_8UC1, cv::Scalar(1)); + + imageIn.copyTo(localSource); + + std::vector layers; + cv::split(localSource, layers); + + /*for (int i=0; i< this->erodeSize;i++){ + cv::erode(localSource,localSource,cv::Mat()); + } + for (int i=0; i< this->erodeSize;i++){ + cv::dilate(localSource,localSource,cv::Mat()); + }*/ + this->m.lock(); + this->buffer.push_back(localSource); + this->bufferGray.push_back(layers[0]); + while ((int)this->buffer.size()>this->buffSize) + this->buffer.pop_front(); + while ((int)this->bufferGray.size()>this->buffSize) + this->bufferGray.pop_front(); + cv::Mat onesAcc; //acumulado de unos en al buffer + cv::Mat resultAcc; //resultado acumulado + + zeros3c.copyTo(imageOut); + zeros3c.copyTo(onesAcc); + zeros3c.copyTo(resultAcc); + + resultAcc.convertTo(resultAcc,CV_32FC1); + + //calculo la imagen promedio para luego hacer la comparación contra todas las imágenes individuales + //y obtener las zonas comunes (sin movimiento y que podemos suavizar). + for ( std::list::iterator it=this->buffer.begin(); it!= this->buffer.end();it++){ + cv::Mat localOnes; + + cv::Mat localItFloat; + it->convertTo(localItFloat,CV_32FC3); + resultAcc=resultAcc+localItFloat; + ones3c.copyTo(localOnes,*it); + onesAcc=onesAcc+localOnes; + } + onesAcc.convertTo(onesAcc,CV_32FC3); + resultAcc=resultAcc/onesAcc; + resultAcc.copyTo(meanNonZeroImage); //imagen promedio sin promediar zonas que son iguales a cero + + meanNonZeroImage.convertTo(meanNonZeroImage,CV_8UC3); + + //imagen promedio en escala de grises para el cálculo de las máscaras: + std::vector layersMean; + cv::split(meanNonZeroImage, layersMean); + + + + zeros1c.copyTo(onesAcc); + zeros1c.copyTo(resultAcc); + resultAcc.convertTo(resultAcc,CV_32FC1); + cv::Mat accAndMask; + + ones1c.copyTo(accAndMask); + cv::Mat localDiff, localMask; + //comparamos todas las imagenes con el promedio y obtenemos zonas comunes + for ( std::list::iterator it=this->bufferGray.begin(); it!= this->bufferGray.end();it++){ + + cv::Mat localOnes; + cv::absdiff(*it,layersMean[0],localDiff); + localDiff.convertTo(localDiff,CV_8UC1); + cv::threshold(localDiff,localMask,this->threshold, 255, cv::THRESH_BINARY_INV); + localMask.convertTo(localMask,CV_8UC1); + ones1c.copyTo(localOnes,localMask); + accAndMask=accAndMask & localMask; + } + + onesAcc.convertTo(onesAcc,CV_32FC3); + zeros3c.copyTo(ones3c, accAndMask); + for (int i=0; i< this->erodeSize;i++){ + cv::erode(accAndMask,accAndMask,cv::Mat()); + //cv::erode(ones3c,ones3c,cv::Mat()); + + } + for (int i=0; i< this->erodeSize;i++){ + cv::dilate(accAndMask,accAndMask,cv::Mat()); + //cv::dilate(ones3c,ones3c,cv::Mat()); + + } + + meanNonZeroImage.copyTo(imageOut, accAndMask); + imageIn.copyTo(imageOut,ones3c); + + //copio la parte sin movimiento del promedio de imágenes y la parte de movimiento de la última captura + + + meanNonZeroImage.copyTo(this->globalMeanImage); + cv::cvtColor(localDiff,globalDiffImage,cv::COLOR_GRAY2RGB); + accAndMask=accAndMask*255; + cv::cvtColor(accAndMask,this->globalFirstMask,cv::COLOR_GRAY2RGB); + ones3c=ones3c*255; + ones3c.copyTo(this->globalSecondMask); + this->m.unlock(); + + + + +} + +//funcion que implementa el filtrado por media con valors no nulos +void +DepthFilter::filterMeanNonZero(cv::Mat imageIn, cv::Mat& imageOut){ + cv::Mat localSource,result; + + cv::Mat zeros = cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(0,0,0)); + cv::Mat ones= cv::Mat(imageIn.rows,imageIn.cols, CV_8UC3, cv::Scalar(1,1,1)); + + imageIn.copyTo(localSource); + + for (int i=0; i< this->erodeSize;i++){ + cv::erode(localSource,localSource,cv::Mat()); + } + for (int i=0; i< this->erodeSize;i++){ + cv::dilate(localSource,localSource,cv::Mat()); + } + //localSource.convertTo(localSource,CV_32FC3); + this->buffer.push_back(localSource); + while ((int)this->buffer.size()>this->buffSize) + this->buffer.pop_front(); + cv::Mat onesAcc; //acumulado de unos en al buffer + cv::Mat resultAcc; //resultado acumulado + + zeros.copyTo(onesAcc); + zeros.copyTo(resultAcc); + + resultAcc.convertTo(resultAcc,CV_32FC3); + + + for ( std::list::iterator it=this->buffer.begin(); it!= this->buffer.end();it++){ + cv::Mat localOnes; + + cv::Mat localItFloat; + it->convertTo(localItFloat,CV_32FC3); + + resultAcc=resultAcc+localItFloat; + std::vector layers; + //cv::split(*it, layers); + ones.copyTo(localOnes,*it); + onesAcc=onesAcc+localOnes; + } + onesAcc.convertTo(onesAcc,CV_32FC3); + resultAcc=resultAcc/onesAcc; + resultAcc.convertTo(resultAcc,CV_8UC3); + resultAcc.copyTo(imageOut); + +} + + +void DepthFilter::getMeanImage(cv::Mat& out){ + this->m.lock(); + this->globalMeanImage.copyTo(out); + this->m.unlock(); +} +void DepthFilter::getDiffImage(cv::Mat& out){ + this->m.lock(); + this->globalDiffImage.copyTo(out); + this->m.unlock(); +} +void DepthFilter::getFistMask(cv::Mat& out){ + this->m.lock(); + this->globalFirstMask.copyTo(out); + this->m.unlock(); +} +void DepthFilter::getSecondMask(cv::Mat& out){ + this->m.lock(); + this->globalSecondMask.copyTo(out); + this->m.unlock(); +} + +void DepthFilter::clear(){ + this->m.lock(); + this->buffer.resize(0); + this->bufferGray.resize(0); + this->m.unlock(); + +}; + + +//función iterativa que realimenta el filtrado +void +DepthFilter::update(cv::Mat imageIn, cv::Mat& imageOut){ + switch(type){ + case 0: + filterMeanNonZero(imageIn,imageOut); + break; + case 1: + filterDakkak(imageIn,imageOut); + break; + case 2: + filterMeanNonMotion3Channels(imageIn, imageOut); + break; + case 3: + filterMeanNonMotion1Channels(imageIn, imageOut); + break; + default: + LOG(ERROR) << "Filter method: " << this->type << "not implemented" << std::endl; + break; + } +} + +} /* namespace jderobot */ diff --git a/DetectionMetrics/libs/depthLib/DepthFilter.h b/DetectionMetrics/libs/depthLib/DepthFilter.h new file mode 100644 index 00000000..705830e2 --- /dev/null +++ b/DetectionMetrics/libs/depthLib/DepthFilter.h @@ -0,0 +1,63 @@ +/* + * DepthFilter.h + * + * Created on: 04/01/2014 + * Author: frivas + */ + +#ifndef DEPTHFILTER_H_ +#define DEPTHFILTER_H_ + +#include +#include +#include +#include + +namespace jderobot { + +class DepthFilter { +public: + DepthFilter(int type, int buffSize, int erodeSize, int threshold); + DepthFilter(); + virtual ~DepthFilter(); + void update(cv::Mat imageIn, cv::Mat& imageOut); + int getBufferSize(){return this->buffSize;}; + void setBufferSize(int size){ this->buffSize=size;}; + int getErodeSize(){return this->erodeSize;}; + void setErodeSize(int size){ this->erodeSize=size;}; + int getThreshold(){return this->threshold;}; + void setThreshold(int value){this->threshold=value;}; + void setFilterType(int value){this->type=value;}; + int getFilterType(){return this->type;}; + void clear(); + void getMeanImage(cv::Mat& out); + void getDiffImage(cv::Mat& out); + void getFistMask(cv::Mat& out); + void getSecondMask(cv::Mat& out); + +private: + int type; //tipo de filtro a aplicar + int buffSize; //tamaño del buffer de imágenes + std::list buffer; //buffer de imágenes + std::list bufferGray; //buffer de imágenes en escala de grises + int erodeSize; //numero de erosiones a realizar en el filtrado + int threshold; //umbral para el filtrado + std::mutex m; //control de datos compartidos + + //callbacks + void filterMeanNonZero(cv::Mat imageIn, cv::Mat& imageOut); + void filterDakkak(cv::Mat imageIn, cv::Mat& imageOut); + void filterMeanNonMotion3Channels(cv::Mat imageIn, cv::Mat& imageOut); + void filterMeanNonMotion1Channels(cv::Mat imageIn, cv::Mat& imageOut); + + //tempImages + cv::Mat globalMeanImage; + cv::Mat globalDiffImage; + cv::Mat globalFirstMask; + cv::Mat globalSecondMask; + +}; + +} /* namespace jderobot */ + +#endif /* DEPTHFILTER_H_ */ diff --git a/DetectionMetrics/libs/depthLib/DepthSampler.cpp b/DetectionMetrics/libs/depthLib/DepthSampler.cpp new file mode 100644 index 00000000..5b0f858d --- /dev/null +++ b/DetectionMetrics/libs/depthLib/DepthSampler.cpp @@ -0,0 +1,175 @@ +/* + * DepthSampler.cpp + * + * Created on: 08/01/2014 + * Author: frivas + */ + +#include +#include "DepthSampler.h" +#include + +namespace jderobot { +DepthSampler::DepthSampler(int nBins, int maxDistance, int minInd, float step){ + this->nBins=nBins; + this->maxDistance=maxDistance; + this->minInd=minInd; + this->step=step; +} + +DepthSampler::DepthSampler() { + this->nBins=1; + this->maxDistance=10000; + this->minInd=3; + this->step=1; +} + +DepthSampler::~DepthSampler() { + // TODO Auto-generated destructor stub +} + +//devuelve en out, los nBins layers con la profundidad discretizada hasta maxDistance +void DepthSampler::calculateLayers(cv::Mat source, std::vector& layers){ + std::vector imgLayers; + cv::Mat localSource; + + layers.resize(0); + + source.convertTo(localSource,CV_32FC3); + cv::split(localSource, imgLayers); + cv::Mat dM(localSource.rows,localSource.cols, CV_32FC1); + dM=(imgLayers[1]*256) + imgLayers[2]; + + cv::Mat unosc1(localSource.rows, localSource.cols, CV_8UC1, cv::Scalar(255)); + cv::Mat zerosc1(localSource.rows, localSource.cols, CV_8UC1, cv::Scalar(0)); + + for (int i=0; i layers, int samplingRate, cv::Mat &outSNormal, cv::Mat &outSLayers){ + cv::Mat imgNormalSample=cv::Mat(source.rows,source.cols, CV_8UC1, cv::Scalar(0)); + cv::Mat imgNlayerSample=cv::Mat(source.rows,source.cols, CV_8UC1, cv::Scalar(0));; + + std::vector normalSample; + int outlierNormal=0; + std::vector layerSample; + + int nLayers=layers.size(); + normalSample.resize(nLayers); + layerSample.resize(nLayers); + + cv::Mat localSource; + std::vector tempLayers; + source.convertTo(localSource,CV_32FC3); + cv::split(localSource, tempLayers); + cv::Mat dM(localSource.rows,localSource.cols, CV_32FC1); + dM=(tempLayers[1]*256) + tempLayers[2]; + + + for (int i=0; i< nLayers;i++){ + normalSample[i]=0; + layerSample[i]=0; + } + + + int start_s=clock(); + for (int xIm=0; xIm< source.cols; xIm+=samplingRate) { + for (int yIm=0; yIm(yIm,xIm); + if (d != 0){ + imgNormalSample.at(yIm,xIm)=(char)255; + double pos= d/maxDistance; + if (pos>1){ + outlierNormal++; + } + else{ + normalSample[floor(pos*nLayers)]=normalSample[floor(pos*nLayers)]++; + } + } + } + } + + int stop_s=clock(); + LOG(INFO) << "Time for normal sampling: " << (stop_s-start_s)/double(CLOCKS_PER_SEC)*1000 << std::endl; + LOG(INFO) << "Layer sampling Size : " << std::accumulate(normalSample.begin(), normalSample.end(), 0) << ", result:" << std::endl; + for (std::vector::iterator it= normalSample.begin(); it!= normalSample.end(); it++){ + LOG(INFO) << *it << std::endl; + } + start_s=clock(); + + float localStep=minInd+(step*nLayers); + for ( std::vector::iterator it= layers.begin(); it != layers.end(); it++){ + LOG(INFO) << "step: " << (int)localStep << std::endl; + for (int xIm=0; xIm< source.cols; xIm+=(int)localStep) { + for (int yIm=0; yImat(yIm,xIm) != 0){ + float d=dM.at(yIm,xIm); + + if (d != 0){ + imgNlayerSample.at(yIm,xIm)=(char)255; + double pos= d/maxDistance; + if (pos>1){ + outlierNormal++; + } + else{ + layerSample[floor(pos*nLayers)]=layerSample[floor(pos*nLayers)]++; + } + } + } + } + } + + localStep=localStep-step; + } + + stop_s=clock(); + LOG(INFO) << "Time for layers sampling: " << (stop_s-start_s)/double(CLOCKS_PER_SEC)*1000 << std::endl; + LOG(INFO) << "Layer sampling Size : " << std::accumulate(layerSample.begin(), layerSample.end(), 0) << ", result:" << std::endl; + for (std::vector::iterator it= layerSample.begin(); it!= layerSample.end(); it++){ + LOG(INFO) << *it << std::endl; + } + + /*cv::imshow("normal", imgNormalSample); + cv::imshow("layers", imgNlayerSample); + cv::waitKey(0);*/ + imgNormalSample.copyTo(outSNormal); + imgNlayerSample.copyTo(outSLayers); +} + +void DepthSampler::sample(cv::Mat source, std::vector layers, std::vector& out){ + + cv::Mat localSource; + std::vector tempLayers; + source.convertTo(localSource,CV_32FC3); + cv::split(localSource, tempLayers); + cv::Mat dM(localSource.rows,localSource.cols, CV_32FC1); + dM=(tempLayers[1]*256) + tempLayers[2]; + out.resize(0); + + int nLayers=layers.size(); + float localStep=minInd+(step*(double)nLayers); + for ( std::vector::iterator it= layers.begin(); it != layers.end(); it++){ + for (int xIm=0; xIm< source.cols; xIm+=(int)localStep) { + for (int yIm=0; yImat(yIm,xIm) != 0){ + float d=dM.at(yIm,xIm); + if ((d != 0)&&(d +#include +#include +#include +#include +#include + +namespace jderobot { + +class DepthSampler { +public: + DepthSampler(int nBins, int maxDistance, int minInd, float step); + DepthSampler(); + virtual ~DepthSampler(); + void calculateLayers(cv::Mat source, std::vector& layers); + void evalSample(cv::Mat source, std::vector layers, int samplingRate, cv::Mat &outSNormal, cv::Mat &outSLayers); + void sample(cv::Mat source, std::vector layers, std::vector& out); + + void setnBins(int value){this->nBins=value;}; + int getnBins(){return this->nBins;}; + void setMaxDistance(int value){this->maxDistance=value;}; + int getMaxDistance(){return this->maxDistance;}; + void setMinInd(int value){this->minInd=value;}; + int getMinInd(){return this->minInd;}; + void setStep(double value){this->step=value; LOG(INFO) << "SETTING STEP TO: " << value << std::endl;}; + double getStep(){return this->step;}; + + + +private: + int nBins, maxDistance, minInd; + double step; + +}; + +} /* namespace jderobot */ + +#endif /* DEPTHSAMPLER_H_ */ diff --git a/DetectionMetrics/libs/interfaces/CMakeLists.txt b/DetectionMetrics/libs/interfaces/CMakeLists.txt new file mode 100644 index 00000000..aa1e794b --- /dev/null +++ b/DetectionMetrics/libs/interfaces/CMakeLists.txt @@ -0,0 +1,86 @@ +if (ZeroCIce_FOUND) + project(JderobotInterfaces) + + set(SLICE_NEW_STYLE "ON") + if (NOT DEFINED SLICE_NEW_STYLE) + + include (${CMAKE_CURRENT_LIST_DIR}/slice/CMakeLists.txt) + + include_directories( + ${CMAKE_CURRENT_LIST_DIR}/cpp/jderobot + ${CMAKE_CURRENT_LIST_DIR}/cpp/ + ) + add_library (JderobotInterfaces SHARED ${SOURCE_FILES} ${ICE_FILES}) + TARGET_LINK_LIBRARIES(JderobotInterfaces ${ice2_LIBRARIES}) + + else() + + ### CMakeCache build control + + set(build_interfaces_cpp "ON" CACHE BOOL "Build Ice interfaces for CPP") + + + message(STATUS "Building ICE Interfaces for:") + if (build_interfaces_cpp) + message(STATUS "\tCPP") + endif() + + set(SLICE_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/slice) + set(SLICE_CPP_DIR ${CMAKE_CURRENT_BINARY_DIR}/cpp) + + FILE(GLOB_RECURSE SLICE_FILES + RELATIVE ${SLICE_DIRECTORY} + "*.ice" + ) + + ### Trick to publich .ice files into CMake's sources + file(GLOB_RECURSE SLICE_DEFINITIONS "slice/**.ice") + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.c "") + add_library(JderobotInterfaces_Definitions ${CMAKE_CURRENT_BINARY_DIR}/dummy.c ${SLICE_DEFINITIONS}) + + + foreach(slice_rel_path ${SLICE_FILES}) + get_filename_component(slice_name ${slice_rel_path} NAME_WE) + get_filename_component(slice_file_name ${slice_rel_path} NAME) + get_filename_component(slice_rel_dir ${slice_rel_path} PATH) # PATH for cmake <= 2.8.11 + set(slice_abs_path ${SLICE_DIRECTORY}/${slice_rel_path}) # get_filename_component() relative to absolute relies in cmake_current_source_dir. Not valid fot his setup + + + ### CPP + if (build_interfaces_cpp) + set(target_directory ${SLICE_CPP_DIR}/${slice_rel_dir}) + file(MAKE_DIRECTORY ${target_directory}) + execute_process( + WORKING_DIRECTORY ${target_directory} + COMMAND slice2cpp ${slice_abs_path} -I${SLICE_DIRECTORY} --output-dir ${target_directory} --include-dir ${slice_rel_dir} + INPUT_FILE ${slice_abs_path} + ) + #LIST(APPEND SLICE_CPP_GENERATED ${target_directory}/${slice_name}.h) # fetching .h files relies on include_directories() + LIST(APPEND SLICE_CPP_GENERATED ${target_directory}/${slice_name}.cpp) + endif() + + endforeach() + + + unset(slice_abs_path) + unset(slice_rel_dir) + unset(slice_file_name) + unset(slice_name) + unset(target_directory) + unset(SLICE_DIRECTORY) + + + ### CPP + if (build_interfaces_cpp) + include_directories(${SLICE_CPP_DIR}) + add_library (JderobotInterfaces SHARED ${SLICE_CPP_GENERATED}) + target_link_libraries(JderobotInterfaces ${ice2_LIBRARIES}) + endif() + + set(ice_interfaces_INCLUDE_DIRS "${SLICE_CPP_DIR}" CACHE PATH "Find(ice_interfaces)") + + endif(NOT DEFINED SLICE_NEW_STYLE) + + set(${PROJECT_NAME}_LIBRARIES "${PROJECT_NAME}" CACHE STRINGS "Find(${PROJECT_NAME})") + +endif() diff --git a/DetectionMetrics/libs/interfaces/slice/CMakeLists.txt b/DetectionMetrics/libs/interfaces/slice/CMakeLists.txt new file mode 100644 index 00000000..0a6d52d8 --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/CMakeLists.txt @@ -0,0 +1,4 @@ +include(${CMAKE_CURRENT_LIST_DIR}/jderobot/CMakeLists.txt) + + + diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/CMakeLists.txt b/DetectionMetrics/libs/interfaces/slice/jderobot/CMakeLists.txt new file mode 100644 index 00000000..74010b95 --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/CMakeLists.txt @@ -0,0 +1,36 @@ + +# Check the ice binaries + +unset(ICE_FILES CACHE) +FILE(GLOB ICE_FILES "slice/jderobot/*.ice") + +INCLUDE_DIRECTORIES( + ${CMAKE_CURRENT_SOURCE_DIR}/.. + ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/ + ${CMAKE_CURRENT_SOURCE_DIR}/../../cpp/jderobot/ + ${ice_path_storm} + ) + + + +FOREACH(currentSourceFile ${ICE_FILES}) + + + string(REGEX REPLACE ".*/(.*)" "\\1" new_source ${currentSourceFile}) + string(REGEX REPLACE ".*/(.*).ice" "\\1.cpp" new_source1 ${currentSourceFile}) + string(REGEX REPLACE ".*/(.*).ice" "\\1.h" new_source2 ${currentSourceFile}) + string(REGEX REPLACE ".*/(.*).ice" "\\1" new_source_name ${currentSourceFile}) + LIST(APPEND SOURCE_FILES cpp/jderobot/${new_source1}) + LIST(APPEND SOURCE_FILES cpp/jderobot/${new_source2}) + + #MESSAGE(STATUS, "source1= ${new_source1} source2= ${new_source2} source=${new_source}") + # C++ + execute_process(COMMAND slice2cpp -I${CMAKE_CURRENT_LIST_DIR}/.. -I${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/${new_source} + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../../cpp/jderobot/ + ) + + message(COMMAND slice2cpp -I${CMAKE_CURRENT_LIST_DIR}/.. -I${CMAKE_CURRENT_LIST_DIR} ${CMAKE_CURRENT_LIST_DIR}/${new_source} + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../../cpp/jderobot/ + ) + +ENDFOREACH(currentSourceFile) diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/camera.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/camera.ice new file mode 100644 index 00000000..dc0b52fe --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/camera.ice @@ -0,0 +1,70 @@ +/* + * + * Copyright (C) 1997-2010 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Author : David Lobato Bravo + * + */ + + +#ifndef CAMERA_ICE +#define CAMERA_ICE + + +#include + +module jderobot{ + /** + * Static description of a camera + */ + class CameraDescription + { + string name; + string shortDescription; + string streamingUri; + float fdistx; + float fdisty; + float u0; + float v0; + float skew; + float posx; + float posy; + float posz; + float foax; + float foay; + float foaz; + float roll; + }; + + /** + * Camera interface + */ + interface Camera extends ImageProvider + { + idempotent CameraDescription getCameraDescription(); + int setCameraDescription(CameraDescription description); + + string startCameraStreaming(); + + void stopCameraStreaming(); + + void reset(); + + }; + +}; /*module*/ + +#endif /*CAMERA_ICE*/ diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/common.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/common.ice new file mode 100644 index 00000000..30da108f --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/common.ice @@ -0,0 +1,11 @@ +#ifndef COMMON_ICE +#define COMMON_ICE + +#include +#include +#include + +module jderobot{ +}; /*module*/ + +#endif /*COMMON_ICE*/ diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/containers.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/containers.ice new file mode 100644 index 00000000..9a4d30a3 --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/containers.ice @@ -0,0 +1,17 @@ + +#ifndef CONTAINERS_ICE +#define CONTAINERS_ICE + +module jderobot{ + + //! A sequence of bytes. + sequence ByteSeq; + + //! A sequence of ints. + sequence IntSeq; + + //! A sequence of floats + sequence seqFloat; +}; /*module*/ + +#endif /*CONTAINERS_ICE*/ diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/datetime.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/datetime.ice new file mode 100644 index 00000000..830b5a8d --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/datetime.ice @@ -0,0 +1,15 @@ +#ifndef DATETIME_ICE +#define DATETIME_ICE + +module jderobot{ + + struct Time + { + //! Number of seconds + long seconds; + //! Number of microseconds + long useconds; + }; +}; /*module*/ + +#endif /*DATETIME_ICE*/ diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/exceptions.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/exceptions.ice new file mode 100644 index 00000000..3ed988a1 --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/exceptions.ice @@ -0,0 +1,36 @@ +#ifndef EXCEPTIONS_ICE +#define EXCEPTIONS_ICE + +module jderobot{ + + exception JderobotException + { + //! Error description. + string what; + }; + + //! Server failed to configure itself as requrested by client. + exception ConfigurationNotExistException extends JderobotException {}; + + /*! + Raised when the server does not have the requested data. + + Typically, this is because the server has not fully initialized yet. + */ + exception DataNotExistException extends JderobotException {}; + + //! Indicates a problem with robot hardware, e.g. sensors and actuators. + exception HardwareFailedException extends JderobotException {}; + + //! Raised when the server is unable to return a topic for subscription. + exception NoTopicException extends JderobotException {}; + + //! Raised when the server fails to subscribe client for periodic updates. + exception SubscriptionFailedException extends JderobotException {}; + + //! Raised when the server fails to push initial data to a new subscriber. + exception SubscriptionPushFailedException extends JderobotException {}; + +}; /*module*/ + +#endif /*EXCEPTIONS_ICE*/ diff --git a/DetectionMetrics/libs/interfaces/slice/jderobot/image.ice b/DetectionMetrics/libs/interfaces/slice/jderobot/image.ice new file mode 100644 index 00000000..4fbea7fc --- /dev/null +++ b/DetectionMetrics/libs/interfaces/slice/jderobot/image.ice @@ -0,0 +1,87 @@ +/* + * + * Copyright (C) 1997-2015 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Author: Roberto Calvo + * David Lobato Bravo + * Sara Marugán Alonso + * + */ + +#ifndef IMAGE_ICE +#define IMAGE_ICE + +#include + + +module jderobot{ + + + /** + * Static description of the image source. + */ + class ImageDescription + { + int width; /**< %Image width [pixels] */ + int height; /**< %Image height [pixels] */ + int size; /**< %Image size [bytes] */ + string format; /**< %Image format string */ + string md5sum; + }; + + + /** + * A single image served as a sequence of bytes + */ + class ImageData + { + Time timeStamp; /**< TimeStamp of Data */ + ImageDescription description; /**< ImageDescription of Data, for convienence purposes */ + ByteSeq pixelData; /**< The image data itself. The structure of this byte sequence depends on the image format and compression. */ + }; + + + //! Interface to the image consumer. + interface ImageConsumer + { + //! Transmits the data to the consumer. + void report( ImageData obj ); + }; + + sequence ImageFormat; + + /** + * Interface to the image provider. + */ + interface ImageProvider + { + /** + * Returns the image source description. + */ + idempotent ImageDescription getImageDescription(); + + idempotent ImageFormat getImageFormat(); + + /** + * Returns the latest data. + */ + ["amd"] idempotent ImageData getImageData(string format) + throws DataNotExistException, HardwareFailedException; + }; + +}; //module + +#endif //IMAGE_ICE diff --git a/DetectionMetrics/libs/types/CMakeLists.txt b/DetectionMetrics/libs/types/CMakeLists.txt new file mode 100644 index 00000000..618888d6 --- /dev/null +++ b/DetectionMetrics/libs/types/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 2.8) +project(jderobottypes) + + +### Project config +include_directories( + include +) + +set(HEADERS + include/jderobottypes/image.h + include/jderobottypes/rgbd.h +) + + +## Export library variables (like find_package) +set(${PROJECT_NAME}_FOUND 1 CACHE BOOL "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARY_DIRS "${CMAKE_CURRENT_BINARY_DIR}" CACHE PATH "Find(${PROJECT_NAME})") +set(${PROJECT_NAME}_LIBRARIES "${PROJECT_NAME}" CACHE STRINGS "Find(${PROJECT_NAME})") diff --git a/DetectionMetrics/libs/types/include/jderobottypes/image.h b/DetectionMetrics/libs/types/include/jderobottypes/image.h new file mode 100644 index 00000000..4138ae6b --- /dev/null +++ b/DetectionMetrics/libs/types/include/jderobottypes/image.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 1997-2016 JDE JdeRobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTTYPES_IMAGE_H +#define JDEROBOTTYPES_IMAGE_H + +#include + +namespace JdeRobotTypes { + + class Image { + public: + + int height = 0; /**< %Image height [pixels] */ + int width = 0; /**< %Image width [pixels] */ + double timeStamp = 0; /**< %Time stamp [s] */ + std::string format = ""; /**< %Image format string (RGB8, BGR,...) */ + cv::Mat data = cv::Mat::zeros(3,3, CV_8UC3); /**< The image data itself */ + }; + + +} //NS JdeRobotTypes + +#endif // JDEROBOTTYPES_IMAGE_H diff --git a/DetectionMetrics/libs/types/include/jderobottypes/rgbd.h b/DetectionMetrics/libs/types/include/jderobottypes/rgbd.h new file mode 100644 index 00000000..1ac8dae6 --- /dev/null +++ b/DetectionMetrics/libs/types/include/jderobottypes/rgbd.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 1997-2017 JDE JdeRobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * Authors : + * Aitor Martinez Fernandez + */ + +#ifndef JDEROBOTTYPES_RGBD_H +#define JDEROBOTTYPES_RGBD_H + +#include + +namespace JdeRobotTypes { + + class Rgbd { + public: + + Image color; /**< %color image */ + Image depth; /**< %depth image */ + double timeStamp = 0; /**< %Time stamp [s] */ + }; + + +} //NS JdeRobotTypes + +#endif // JDEROBOTTYPES_RGBD_H diff --git a/DetectionMetrics/libs/utils/CMakeLists.txt b/DetectionMetrics/libs/utils/CMakeLists.txt new file mode 100644 index 00000000..5de149f0 --- /dev/null +++ b/DetectionMetrics/libs/utils/CMakeLists.txt @@ -0,0 +1,36 @@ +include_directories(${SLICE_DIR}) +include_directories(${INTERFACES_CPP_DIR}) +include_directories(${CMAKE_CURRENT_LIST_DIR}) + +project(utils) + +if (ZeroCIce_FOUND) +SET(UTILS_SOURCES + CameraUtils + ) +endif() + +set(${PROJECT_NAME}_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/colorspaces" CACHE PATH "Find(${PROJECT_NAME})") + +ADD_LIBRARY (colorspaces STATIC colorspaces/rgb2hsv.c colorspaces/rgb2yuv.c colorspaces/colorspaces.h colorspaces/colorspacesmm.h) + +TARGET_LINK_LIBRARIES(colorspaces ${OpenCV_LIBRARIES}) + +ADD_LIBRARY (colorspacesshare SHARED colorspaces/rgb2hsv.c colorspaces/rgb2yuv.c colorspaces/colorspaces.h colorspaces/colorspacesmm.h) + +TARGET_LINK_LIBRARIES(colorspacesshare ${OpenCV_LIBRARIES}) + +ADD_LIBRARY (colorspacesmm SHARED colorspaces/imagecv.cpp colorspaces/imagecv.h colorspaces/uncopyable.h) + +TARGET_LINK_LIBRARIES(colorspacesmm ${OpenCV_LIBRARIES} ${OpenCVGUI_LIBRARIES}) +if (ZeroCIce_FOUND) + ADD_LIBRARY (DetectionMetricsUtils ${UTILS_SOURCES}) + + TARGET_LINK_LIBRARIES(DetectionMetricsUtils colorspaces colorspacesmm colorspacesshare) + +else() + ADD_LIBRARY (DetectionMetricsUtils INTERFACE) + + TARGET_LINK_LIBRARIES(DetectionMetricsUtils INTERFACE colorspaces colorspacesmm colorspacesshare) + +endif() diff --git a/DetectionMetrics/libs/utils/CameraUtils.cpp b/DetectionMetrics/libs/utils/CameraUtils.cpp new file mode 100644 index 00000000..4139f4e5 --- /dev/null +++ b/DetectionMetrics/libs/utils/CameraUtils.cpp @@ -0,0 +1,195 @@ +// +// Created by frivas on 4/04/17. +// + +#include +#include +#include +#include "CameraUtils.h" +#include + +cv::Mat CameraUtils::getImageFromCameraProxy(jderobot::ImageDataPtr dataPtr) { + cv::Mat outImage; + colorspaces::Image::FormatPtr fmt; + + fmt = colorspaces::Image::Format::searchFormat(dataPtr->description->format); + if (!fmt) + throw "Format not supported"; + + if (dataPtr->description->format == colorspaces::ImageRGB8::FORMAT_RGB8_Z.get()->name || + dataPtr->description->format == colorspaces::ImageRGB8::FORMAT_DEPTH8_16_Z.get()->name ) + { + + size_t dest_len = dataPtr->description->width*dataPtr->description->height*3; + size_t source_len = dataPtr->pixelData.size(); + + unsigned char* origin_buf = (uchar*) malloc(dest_len); + + int r = uncompress((Bytef *) origin_buf, (uLongf *) &dest_len, (const Bytef *) &(dataPtr->pixelData[0]), (uLong)source_len); + + if(r != Z_OK) { + fprintf(stderr, "[CMPR] Error:\n"); + switch(r) { + case Z_MEM_ERROR: + fprintf(stderr, "[CMPR] Error: Not enough memory to compress.\n"); + break; + case Z_BUF_ERROR: + fprintf(stderr, "[CMPR] Error: Target buffer too small.\n"); + break; + case Z_STREAM_ERROR: // Invalid compression level + fprintf(stderr, "[CMPR] Error: Invalid compression level.\n"); + break; + } + } + else + { + colorspaces::Image imageRGB(dataPtr->description->width,dataPtr->description->height,colorspaces::ImageRGB8::FORMAT_RGB8,&(origin_buf[0])); + colorspaces::ImageRGB8 img_rgb888(imageRGB);//conversion will happen if needed + cv::Mat(cv::Size(img_rgb888.width,img_rgb888.height), CV_8UC3, img_rgb888.data).copyTo(outImage); + img_rgb888.release(); + } + + + if (origin_buf) + free(origin_buf); + + } + else if (dataPtr->description->format == colorspaces::ImageRGB8::FORMAT_RGB8.get()->name || + dataPtr->description->format == colorspaces::ImageRGB8::FORMAT_DEPTH8_16.get()->name ) + { + colorspaces::Image imageRGB(dataPtr->description->width,dataPtr->description->height,colorspaces::ImageRGB8::FORMAT_RGB8,&(dataPtr->pixelData[0])); + colorspaces::ImageRGB8 img_rgb888(imageRGB);//conversion will happen if needed + cv::Mat(cv::Size(img_rgb888.width,img_rgb888.height), CV_8UC3, img_rgb888.data).copyTo(outImage); + img_rgb888.release(); + } + else if (dataPtr->description->format == colorspaces::ImageGRAY8::FORMAT_GRAY8_Z.get()->name) { + //gay compressed + size_t dest_len = dataPtr->description->width*dataPtr->description->height; + size_t source_len = dataPtr->pixelData.size(); + + unsigned char* origin_buf = (uchar*) malloc(dest_len); + + int r = uncompress((Bytef *) origin_buf, (uLongf *) &dest_len, (const Bytef *) &(dataPtr->pixelData[0]), (uLong)source_len); + + if(r != Z_OK) { + fprintf(stderr, "[CMPR] Error:\n"); + switch(r) { + case Z_MEM_ERROR: + fprintf(stderr, "[CMPR] Error: Not enough memory to compress.\n"); + break; + case Z_BUF_ERROR: + fprintf(stderr, "[CMPR] Error: Target buffer too small.\n"); + break; + case Z_STREAM_ERROR: // Invalid compression level + fprintf(stderr, "[CMPR] Error: Invalid compression level.\n"); + break; + } + } + else + { + colorspaces::Image imageGray(dataPtr->description->width,dataPtr->description->height,colorspaces::ImageGRAY8::FORMAT_GRAY8,&(origin_buf[0])); + colorspaces::ImageGRAY8 img_gray8(imageGray);//conversion will happen if needed + + cv::Mat(cv::Size(img_gray8.width,img_gray8.height), CV_8UC1, img_gray8.data).copyTo(outImage); + img_gray8.release(); + } + + + if (origin_buf) + free(origin_buf); + } + else if (dataPtr->description->format == colorspaces::ImageGRAY8::FORMAT_GRAY8.get()->name){ + colorspaces::Image imageGray(dataPtr->description->width,dataPtr->description->height,colorspaces::ImageGRAY8::FORMAT_GRAY8,&(dataPtr->pixelData[0])); + colorspaces::ImageGRAY8 img_gray8(imageGray);//conversion will happen if needed + cv::Mat(cv::Size(img_gray8.width,img_gray8.height), CV_8UC1, img_gray8.data).copyTo(outImage); + img_gray8.release(); + } + else{ + LOG(ERROR) << "Unkown image format"; + } + + return outImage; +} + +std::string CameraUtils::negotiateDefaultFormat(jderobot::CameraPrx prx, const std::string& definedFormat) { + + std::string format; + // Discover what format are supported. + jderobot::ImageFormat formats = prx->getImageFormat(); + + std::vector::iterator it; + it = std::find(formats.begin(), formats.end(), definedFormat); + if (it==formats.end()){ + it = std::find(formats.begin(), formats.end(), colorspaces::ImageRGB8::FORMAT_RGB8.get()->name); + + if (it != formats.end()) + { + format = colorspaces::ImageRGB8::FORMAT_RGB8.get()->name; + it = std::find(formats.begin(), formats.end(), colorspaces::ImageRGB8::FORMAT_RGB8_Z.get()->name); + if (it != formats.end()) + format = colorspaces::ImageRGB8::FORMAT_RGB8_Z.get()->name; + } + else + { + it = std::find(formats.begin(), formats.end(), colorspaces::ImageRGB8::FORMAT_DEPTH8_16.get()->name); + if (it != formats.end()) + { + format = colorspaces::ImageRGB8::FORMAT_DEPTH8_16.get()->name; + it = std::find(formats.begin(), formats.end(), colorspaces::ImageRGB8::FORMAT_DEPTH8_16_Z.get()->name); + if (it != formats.end()) + format = colorspaces::ImageRGB8::FORMAT_DEPTH8_16_Z.get()->name; + } + else{ + format = colorspaces::ImageGRAY8::FORMAT_GRAY8.get()->name; + it = std::find(formats.begin(), formats.end(), colorspaces::ImageGRAY8::FORMAT_GRAY8_Z.get()->name); + if (it != formats.end()) + format = colorspaces::ImageGRAY8::FORMAT_GRAY8_Z.get()->name; + } + } + } + else{ + format = definedFormat; + } + LOG(INFO) << "Negotiated format " + format + " for camera " + prx->getCameraDescription()->name; + return format; +} + +bool CameraUtils::compressImage(const cv::Mat &image, unsigned char **compressed_data,unsigned long& compress_len) { + unsigned long source_len = image.rows*image.cols*3; + compress_len = compressBound(source_len); + *compressed_data = (unsigned char *) malloc(compress_len); + + int r = compress((Bytef *) (*compressed_data), (uLongf *) &compress_len, (const Bytef *) &(image.data[0]), (uLong)source_len ); + + if(r != Z_OK) { + LOG(WARNING) << "Compression Error"; + switch(r) { + case Z_MEM_ERROR: + LOG(ERROR) << "Compression Error: Not enough memory to compress"; + break; + case Z_BUF_ERROR: + LOG(ERROR) << "Compression Error: Target buffer too small."; + break; + case Z_STREAM_ERROR: + LOG(ERROR) << "Compression Error: Invalid compression level."; + break; + } + return false; + } + return true; +} + +jderobot::ImageDataPtr CameraUtils::convert(const cv::Mat &image) { + + jderobot::ImageDataPtr reply=jderobot::ImageDataPtr(new jderobot::ImageData()); + reply->description = jderobot::ImageDescriptionPtr(new jderobot::ImageDescription()); + IceUtil::Time t = IceUtil::Time::now(); + reply->timeStamp.seconds = (long)t.toSeconds(); + reply->timeStamp.useconds = (long)t.toMicroSeconds() - reply->timeStamp.seconds*1000000; + reply->description->format = colorspaces::ImageRGB8::FORMAT_RGB8.get()->name; + reply->description->width=image.size().width; + reply->description->height=image.size().height; + reply->pixelData.resize(image.rows*image.cols * image.channels()); + memcpy(&(reply->pixelData[0]),(unsigned char *) image.data, image.rows*image.cols * image.channels()); + return reply; +} diff --git a/DetectionMetrics/libs/utils/CameraUtils.h b/DetectionMetrics/libs/utils/CameraUtils.h new file mode 100644 index 00000000..8644b14c --- /dev/null +++ b/DetectionMetrics/libs/utils/CameraUtils.h @@ -0,0 +1,22 @@ +// +// Created by frivas on 4/04/17. +// + +#ifndef JDEROBOT_CAMERAUTILS_H +#define JDEROBOT_CAMERAUTILS_H + + +#include +#include +#include + +class CameraUtils { +public: + static cv::Mat getImageFromCameraProxy(jderobot::ImageDataPtr dataPtr); + static std::string negotiateDefaultFormat(jderobot::CameraPrx prx,const std::string& definedFormat ); + static bool compressImage(const cv::Mat& image, unsigned char** compressed_data,unsigned long& compress_len); + static jderobot::ImageDataPtr convert(const cv::Mat& image); +}; + + +#endif //JDEROBOT_CAMERAUTILS_H diff --git a/DetectionMetrics/libs/utils/colorspaces/colorspaces.h b/DetectionMetrics/libs/utils/colorspaces/colorspaces.h new file mode 100644 index 00000000..282aad8a --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/colorspaces.h @@ -0,0 +1,110 @@ +/* + * + * Copyright (C) 1997-2009 JDERobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : Roberto Calvo Palomino + * José María Cañas + * Pablo Miangolarra Tejada + * David Lobato Bravo + * + */ + +#ifndef _COLOR_SPACES_H +#define _COLOR_SPACES_H + +#define NAME "colorspaces" +#define COLORSPACES_VERSION "1.4.0" + + +#ifdef __cplusplus +extern "C" { +#endif + + /// *** RGB to HSI *** /// + + struct HSV + { + double H; + double S; + double V; + }; + + extern struct HSV * LUT_RGB2HSV [64][64][64]; + + extern int isInitTableHSV; + + /// \brief Init the RGB2HSV + void RGB2HSV_init(); + + /// \brief Create a translate RGB2HSV table with resolution of 6bits (64x64x64) + void RGB2HSV_createTable(); + + /// \brief Free de memory of RGB2HSV + void RGB2HSV_destroyTable(); + + /// \brief Print the struct HSV + void RGB2HSV_printHSI (struct HSV*); + + /// \brief Test + void RGB2HSV_test(); + + /// \brief Returns the translation from RGB to HSV + static inline const struct HSV* RGB2HSV_getHSV (int R, int G, int B) { return LUT_RGB2HSV[R>>2][G>>2][B>>2]; } + + /// \brief Returns the translation from HSV to RGB + void hsv2rgb(double H, double S, double V, double *r, double *g, double *b); + + + /// *** RGB to YUV *** /// + + struct YUV + { + double Y; + double U; + double V; + }; + + extern struct YUV * LUT_RGB2YUV [64][64][64]; + + extern int isInitTableYUV; + + /// \brief Init the RGB2YUV + void RGB2YUV_init(); + + /// \brief Create a generic translate RGB2YUV table with resolution of 6bits (64x64x64) + void RGB2YUV_createTable(); + + /// \brief Free de memory of RGB2YUV + void RGB2YUV_destroyTable(); + + /// \brief Print the struct YUV + void RGB2YUV_printYUV (struct YUV*); + + /// \brief Test + void RGB2YUV_test(); + + /// \brief Returns the translation from RGB to YUV + static inline const struct YUV* RGB2YUV_getYUV (int R, int G, int B) { return LUT_RGB2YUV[R>>2][G>>2][B>>2]; } + + /// \brief Returns the translation from YUV to RGB + void yuv2rgb(double Y, double U, double V, double *r, double *g, double *b); + + +#ifdef __cplusplus +} /*extern "C"*/ +#endif + +#endif /*_COLOR_SPACES_H*/ diff --git a/DetectionMetrics/libs/utils/colorspaces/colorspacesmm.h b/DetectionMetrics/libs/utils/colorspaces/colorspacesmm.h new file mode 100644 index 00000000..a1b7d79e --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/colorspacesmm.h @@ -0,0 +1,27 @@ +/* + * + * Copyright (C) 1997-2009 JDERobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : David Lobato Bravo + * + */ + +#ifndef COLORSPACESPP_COLORSPACES_H +#define COLORSPACESPP_COLORSPACES_H + +#include "imagecv.h" + +#endif //COLORSPACESPP_COLORSPACES_H diff --git a/DetectionMetrics/libs/utils/colorspaces/imagecv.cpp b/DetectionMetrics/libs/utils/colorspaces/imagecv.cpp new file mode 100644 index 00000000..25754b40 --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/imagecv.cpp @@ -0,0 +1,486 @@ +#include "imagecv.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace colorspaces { + Image::Format::Format(const std::string name, const int id, const int cvType, imageCtor ctor, imageCvt cvt) + : name(name), id(id), cvType(cvType),ctor(ctor),cvt(cvt) {} + + std::vector& Image::Format::formatTable(){ + static std::vector formatTable; + return formatTable; + } + + const Image::FormatPtr Image::Format::searchFormat(const std::string name){ + std::vector::iterator it; + + for (it = formatTable().begin(); it != formatTable().end(); it++){ + if (name.compare((*it)->name) == 0) + return *it; + } + return FormatPtr(); + } + + const Image::FormatPtr Image::Format::createFormat(const std::string name, const int cvType, imageCtor ctor, imageCvt cvt){ + int id = formatTable().size(); + FormatPtr nFmt(new Format(name,id,cvType,ctor,cvt)); + formatTable().push_back(nFmt); + return nFmt; + } + + Image* Image::Format::createInstance(const int width, const int height, void *const data){ + if (ctor) + return ctor(width,height,data); + return 0; + } + + Image& Image::convert(Image& dst) const throw(NoConversion){ + //std::cerr << "colorspaces: imagecv: convert: " << *_format << "->" << *dst._format << std::endl; + return _format->cvt(*this,dst); + } + + Image Image::clone() const{ + Image copy(cv::Mat::clone(),_format); + return copy; + } + + + //static definitions + const Image::FormatPtr Image::FORMAT_NONE = Image::Format::createFormat("NONE",0,0,0); + const Image::FormatPtr ImageRGB8::FORMAT_RGB8 = Image::Format::createFormat("RGB8",CV_8UC3,&ImageRGB8::createInstance,&ImageRGB8::imageCvt); + const Image::FormatPtr ImageRGB8::FORMAT_RGB8_Z = Image::Format::createFormat("RGB8_Z",CV_8UC3,&ImageRGB8::createInstance,&ImageRGB8::imageCvt); + const Image::FormatPtr ImageRGB8::FORMAT_DEPTH8_16 = Image::Format::createFormat("DEPTH8_16",CV_8UC3,&ImageRGB8::createInstance,&ImageRGB8::imageCvt); + const Image::FormatPtr ImageRGB8::FORMAT_DEPTH8_16_Z = Image::Format::createFormat("DEPTH8_16_Z",CV_8UC3,&ImageRGB8::createInstance,&ImageRGB8::imageCvt); + + const Image::FormatPtr ImageYUY2::FORMAT_YUY2 = Image::Format::createFormat("YUY2",CV_8UC2,&ImageYUY2::createInstance,&ImageYUY2::imageCvt); + const Image::FormatPtr ImageGRAY8::FORMAT_GRAY8 = Image::Format::createFormat("GRAY8",CV_8UC1,&ImageGRAY8::createInstance,&ImageGRAY8::imageCvt); + const Image::FormatPtr ImageGRAY8::FORMAT_GRAY8_Z = Image::Format::createFormat("GRAY8_Z",CV_8UC1,&ImageGRAY8::createInstance,&ImageGRAY8::imageCvt); + const Image::FormatPtr ImageHSV8::FORMAT_HSV8 = Image::Format::createFormat("HSV8",CV_8UC3,&ImageHSV8::createInstance,&ImageHSV8::imageCvt); + const Image::FormatPtr ImageYCRCB::FORMAT_YCRCB = Image::Format::createFormat("YCRCB",CV_8UC3,&ImageYCRCB::createInstance,&ImageYCRCB::imageCvt); + const Image::FormatPtr ImageNV21::FORMAT_NV21 = Image::Format::createFormat("NV21",CV_8UC2,&ImageNV21::createInstance,&ImageNV21::imageCvt); + + + Image::Image() + : cv::Mat(),width(0),height(0),_format() {} + + Image::Image(const int width, const int height, const FormatPtr fmt) + : cv::Mat(height,width,fmt->cvType),width(width),height(height),_format(fmt) {} + + Image::Image(const int width, const int height, const FormatPtr fmt, void *const data) + : cv::Mat(height,width,fmt->cvType,data),width(width),height(height),_format(fmt) {} + + Image::Image(const Image& i) + : cv::Mat(i),width(i.width),height(i.height),_format(i.format()) {} + + Image::Image(const cv::Mat& m, const FormatPtr fmt) + : cv::Mat(m),width(m.cols),height(m.rows),_format(fmt) {} + + ImageRGB8::ImageRGB8(const int width, const int height) + : Image(width,height,FORMAT_RGB8) {} + + ImageRGB8::ImageRGB8(const int width, const int height, void *const data) + : Image(width,height,FORMAT_RGB8,data) {} + + ImageRGB8::ImageRGB8(const Image& i) + : Image(i.width,i.height,FORMAT_RGB8) { + i.convert(*this); + } + + ImageRGB8 ImageRGB8::read(const std::string& filename){ + struct stat s; + if (stat(filename.c_str(),&s) == -1) + throw std::runtime_error(filename+" not found"); + cv::Mat readImage(cv::imread(filename));//BGR + cv::cvtColor(readImage,readImage,cv::COLOR_BGR2RGB); + return ImageRGB8(Image(readImage,FORMAT_RGB8)); + } + + bool ImageRGB8::write(const std::string& filename,const std::vector& params){ + cv::Mat bgrImage(this->size(),this->type()); + cv::cvtColor(*this,bgrImage,cv::COLOR_RGB2BGR); + return cv::imwrite(filename, bgrImage, params); + } + + Image& ImageRGB8::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert((src.format() == FORMAT_RGB8 || src.format() == FORMAT_RGB8_Z + || src.format() == FORMAT_DEPTH8_16 || src.format() == FORMAT_DEPTH8_16_Z) && "src is not a RGB8 image"); + + + if (dst.format() == FORMAT_RGB8 || src.format() == FORMAT_DEPTH8_16 || src.format() == FORMAT_RGB8_Z || src.format() == FORMAT_DEPTH8_16_Z) + dst = src; + else { + const ImageRGB8 srcRgb8(src);//cast src to rgb image + if (dst.format() == ImageYUY2::FORMAT_YUY2) + srcRgb8.toYUY2(dst); + else if (dst.format() == ImageGRAY8::FORMAT_GRAY8) + srcRgb8.toGRAY8(dst); + else if (dst.format() == ImageYCRCB::FORMAT_YCRCB) + srcRgb8.toYCRCB(dst); + else if (dst.format() == ImageHSV8::FORMAT_HSV8) + srcRgb8.toHSV8(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageRGB8::toGRAY8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageGRAY8::FORMAT_GRAY8) + throw Image::FormatMismatch("FORMAT_GRAY8 required for dst"); + cv::cvtColor(*this,dst,cv::COLOR_RGB2GRAY); + } + + void ImageRGB8::toYUY2(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageYUY2::FORMAT_YUY2) + throw Image::FormatMismatch("FORMAT_YUY2 required for dst"); + if ((dst.width % 2 != 0) || (this->width % 2 != 0)) + throw Image::FormatMismatch("src and dst images have to have even number of columns"); + + cv::Mat_ ycrcb(dst.height,dst.width,dst.type());//YUV444 previous conversion + cv::Mat_ yuy2(dst); + cv::cvtColor(*this,ycrcb,cv::COLOR_RGB2YCrCb); + + for (int i=0; i < height; i++){ + for (int j=0; j < width; j+=2){//two pixels each loop + yuy2(i,j)[0] = ycrcb(i,j)[0];//Y0 + yuy2(i,j)[1] = ycrcb(i,j)[2];//U0 + yuy2(i,j+1)[0] = ycrcb(i,j+1)[0];//Y1 + yuy2(i,j+1)[1] = ycrcb(i,j)[1];//V0 + } + } + } + + void ImageRGB8::toHSV8(Image& dst) const throw(FormatMismatch){ + if (dst.format() != ImageHSV8::FORMAT_HSV8) + throw Image::FormatMismatch("FORMAT_HSV8 required for dst"); + cv::cvtColor(*this,dst,cv::COLOR_RGB2HSV); + } + + void ImageRGB8::toYCRCB(Image& dst) const throw(FormatMismatch){ + if (dst.format() != ImageYCRCB::FORMAT_YCRCB) + throw Image::FormatMismatch("FORMAT_YCRCB required for dst"); + cv::cvtColor(*this,dst,cv::COLOR_RGB2YCrCb); + } + + Image* ImageRGB8::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageRGB8(width,height,data); + else + return new ImageRGB8(width,height); + } + + ImageYUY2::ImageYUY2(const int width, const int height) + : Image(width,height,FORMAT_YUY2) {} + + ImageYUY2::ImageYUY2(const int width, const int height, void *const data) + : Image(width,height,FORMAT_YUY2,data) {} + + ImageYUY2::ImageYUY2(const Image& i) + : Image(i.width,i.height,FORMAT_YUY2) { + i.convert(*this); + } + + Image& ImageYUY2::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert(src.format() == FORMAT_YUY2 && "src is not a YUY2 image"); + if (dst.format() == FORMAT_YUY2) + dst = src; + else { + const ImageYUY2 srcYuy2(src); + if (dst.format() == ImageRGB8::FORMAT_RGB8) + srcYuy2.toRGB8(dst); + else if (dst.format() == ImageGRAY8::FORMAT_GRAY8) + srcYuy2.toGRAY8(dst); + else if (dst.format() == ImageYCRCB::FORMAT_YCRCB) + srcYuy2.toYCRCB(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageYUY2::toGRAY8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageGRAY8::FORMAT_GRAY8) + throw Image::FormatMismatch("FORMAT_GRAY8 required for dst"); + + int fromTo[] = {0,0};//first channel of YUY2 have the luminance information + cv::mixChannels(this,1,&dst,1,fromTo,1); + } + + void ImageYUY2::toRGB8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageRGB8::FORMAT_RGB8) + throw Image::FormatMismatch("FORMAT_RGB8 required for dst"); + if ((dst.width % 2 != 0) || (this->width % 2 != 0)) + throw Image::FormatMismatch("src and dst images have to have even number of columns"); + + ImageYCRCB ycrcbImg(dst.height,dst.width);//YCRCB previous conversion + toYCRCB(ycrcbImg); + + cv::cvtColor(ycrcbImg,dst,cv::COLOR_YCrCb2RGB); + } + + void ImageYUY2::toYCRCB(Image& dst) const throw(FormatMismatch){ + if (dst.format() != ImageYCRCB::FORMAT_YCRCB) + throw Image::FormatMismatch("FORMAT_YCRCB required for dst"); + cv::Mat_ ycrcb(dst); + cv::Mat_ yuy2(*this); + + for (int i=0; i < height; i++){ + for (int j=0; j < width; j+=2){//two pixels each loop + ycrcb(i,j)[0] = yuy2(i,j)[0];//Y0<-Y0 + ycrcb(i,j)[1] = yuy2(i,j+1)[1];//V0<-V0 + ycrcb(i,j)[2] = yuy2(i,j)[1];//U0<-U0 + ycrcb(i,j+1)[0] = yuy2(i,j+1)[0];//Y1<-Y1 + ycrcb(i,j+1)[1] = yuy2(i,j+1)[1];//V1<-V0 + ycrcb(i,j+1)[2] = yuy2(i,j)[1];//U1<-U0 + } + } + } + + Image* ImageYUY2::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageYUY2(width,height,data); + else + return new ImageYUY2(width,height); + } + + ImageGRAY8::ImageGRAY8(const int width, const int height) + : Image(width,height,FORMAT_GRAY8) {} + + ImageGRAY8::ImageGRAY8(const int width, const int height, void *const data) + : Image(width,height,FORMAT_GRAY8,data) {} + + ImageGRAY8::ImageGRAY8(const Image& i) + : Image(i.width,i.height,FORMAT_GRAY8) { + i.convert(*this); + } + + ImageGRAY8 ImageGRAY8::read(const std::string& filename){ + struct stat s; + if (stat(filename.c_str(),&s) == -1) + throw std::runtime_error(filename+" not found"); + return ImageGRAY8(Image(cv::imread(filename,0),FORMAT_GRAY8)); + } + + bool ImageGRAY8::write(const std::string& filename,const std::vector& params){ + return cv::imwrite(filename, *this, params); + } + + Image& ImageGRAY8::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert(src.format() == FORMAT_GRAY8 && "src is not a GRAY8 image"); + if (dst.format() == FORMAT_GRAY8) + dst = src; + else { + const ImageGRAY8 srcGray8(src); + if (dst.format() == ImageYUY2::FORMAT_YUY2) + srcGray8.toYUY2(dst); + else if (dst.format() == ImageRGB8::FORMAT_RGB8) + srcGray8.toRGB8(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageGRAY8::toRGB8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageRGB8::FORMAT_RGB8) + throw Image::FormatMismatch("FORMAT_RGB8 required for dst"); + + cv::cvtColor(*this,dst,cv::COLOR_GRAY2RGB); + } + + void ImageGRAY8::toYUY2(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageYUY2::FORMAT_YUY2) + throw Image::FormatMismatch("FORMAT_YUY2 required for dst"); + //U and V will be 0 + cv::Mat uv(cv::Mat::zeros(width,height,FORMAT_GRAY8->cvType)); + + int fromTo[] = {0,0 , 1,1};//GRAY to Y channel, 0->U/V + cv::Mat src[] = {*this,uv}; + cv::mixChannels(src,2,&dst,1,fromTo,1); + } + + Image* ImageGRAY8::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageGRAY8(width,height,data); + else + return new ImageGRAY8(width,height); + } + + + ImageHSV8::ImageHSV8(const int width, const int height) + : Image(width,height,FORMAT_HSV8) {} + + ImageHSV8::ImageHSV8(const int width, const int height, void *const data) + : Image(width,height,FORMAT_HSV8,data) {} + + ImageHSV8::ImageHSV8(const Image& i) + : Image(i.width,i.height,FORMAT_HSV8) { + i.convert(*this); + } + + Image& ImageHSV8::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert(src.format() == FORMAT_HSV8 && "src is not a HSV8 image"); + if (dst.format() == FORMAT_HSV8) + dst = src; + else { + const ImageHSV8 srcHsv8(src); + if (dst.format() == ImageRGB8::FORMAT_RGB8) + srcHsv8.toRGB8(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageHSV8::toRGB8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageRGB8::FORMAT_RGB8) + throw Image::FormatMismatch("FORMAT_RGB8 required for dst"); + + cv::cvtColor(*this,dst,cv::COLOR_HSV2RGB); + } + + Image* ImageHSV8::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageHSV8(width,height,data); + else + return new ImageHSV8(width,height); + } + + ImageYCRCB::ImageYCRCB(const int width, const int height) + : Image(width,height,FORMAT_YCRCB) {} + + ImageYCRCB::ImageYCRCB(const int width, const int height, void *const data) + : Image(width,height,FORMAT_YCRCB,data) {} + + ImageYCRCB::ImageYCRCB(const Image& i) + : Image(i.width,i.height,FORMAT_YCRCB) { + i.convert(*this); + } + + Image& ImageYCRCB::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert(src.format() == FORMAT_YCRCB && "src is not a YCRCB image"); + if (dst.format() == FORMAT_YCRCB) + dst = src; + else { + const ImageYCRCB srcYcrcb(src); + if (dst.format() == ImageRGB8::FORMAT_RGB8) + srcYcrcb.toRGB8(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageYCRCB::toRGB8(Image& dst) const throw(FormatMismatch){ + if (dst.format() != ImageRGB8::FORMAT_RGB8) + throw Image::FormatMismatch("FORMAT_RGB8 required for dst"); + + cv::cvtColor(*this,dst,cv::COLOR_YCrCb2RGB); + } + + Image* ImageYCRCB::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageYCRCB(width,height,data); + else + return new ImageYCRCB(width,height); + } + + ImageNV21::ImageNV21(const int width, const int height) + : Image(width,height,FORMAT_NV21) {} + + ImageNV21::ImageNV21(const int width, const int height, void *const data) + : Image(width,height,FORMAT_NV21,data) {} + + ImageNV21::ImageNV21(const Image& i) + : Image(i.width,i.height,FORMAT_NV21) { + i.convert(*this); + } + + Image& ImageNV21::imageCvt(const Image& src, Image& dst) throw(NoConversion){ + assert(src.format() == FORMAT_NV21 && "src is not a NV21 image"); + if (dst.format() == FORMAT_NV21) + dst = src; + else { + const ImageNV21 srcNv21(src); + if (dst.format() == ImageRGB8::FORMAT_RGB8) + srcNv21.toRGB8(dst); + else + throw Image::NoConversion(); + } + return dst; + } + + void ImageNV21::toGRAY8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageGRAY8::FORMAT_GRAY8) + throw Image::FormatMismatch("FORMAT_GRAY8 required for dst"); + + int fromTo[] = {0,0};//first channel of YUY2 have the luminance information (need to test!!) + cv::mixChannels(this,1,&dst,1,fromTo,1); + } + + void ImageNV21::toRGB8(Image& dst) const throw(Image::FormatMismatch){ + if (dst.format() != ImageRGB8::FORMAT_RGB8) + throw Image::FormatMismatch("FORMAT_RGB8 required for dst"); + if ((dst.width % 2 != 0) || (this->width % 2 != 0)) + throw Image::FormatMismatch("src and dst images have to have even number of columns"); + + //cv::cvtColor(*this,dst,cv::COLOR_YUV420sp2RGB); + + unsigned char *rgb = (unsigned char *)dst.data; + unsigned char *yuv = (unsigned char *)this->data, + *yuv_y = yuv, *yuv_uv = yuv + dst.width * dst.height; + for (int i = 0; i < height; i++, yuv_uv -= (i&1)?width:0) { + for (int j = 0; j < width; j++, yuv_uv += (j&1)?0:2) { + int y = *yuv_y++; + y = (y < 16) ? 16 : y; + int v = yuv_uv[0] - 128; + int u = yuv_uv[1] - 128; + + int multi = 1.164f * (y - 16); + int r = (int) (multi + 1.596f * v); + int g = (int) (multi - 0.813f * v - 0.391f * u); + int b = (int) (multi + 2.018f * u); + + *rgb++ = r < 0 ? 0 : (r > 255 ? 255 : r); + *rgb++ = g < 0 ? 0 : (g > 255 ? 255 : g); + *rgb++ = b < 0 ? 0 : (b > 255 ? 255 : b); + } + } + } + + void ImageNV21::toYCRCB(Image& dst) const throw(FormatMismatch){ + if (dst.format() != ImageYCRCB::FORMAT_YCRCB) + throw Image::FormatMismatch("FORMAT_YCRCB required for dst"); + ImageYCRCB rgbImg(dst.height,dst.width); + toRGB8(rgbImg); + cv::cvtColor(rgbImg,dst,cv::COLOR_RGB2YCrCb); + } + + Image* ImageNV21::createInstance(const int width, const int height, void *const data){ + if (data) + return new ImageNV21(width,height,data); + else + return new ImageNV21(width,height); + } +} + +/** + * Insert a format in an output stream. Only debugging, output could be truncated + */ +std::ostream &operator<<(std::ostream &stream, const colorspaces::Image::Format& fmt){ + stream << "FMT("<< fmt.name << ";channels:" << CV_MAT_CN(fmt.cvType) << ";depth:" << CV_MAT_DEPTH(fmt.cvType) << ")"; + return stream; +} + +std::ostream &operator<<(std::ostream &stream, const colorspaces::Image& img){ + stream << "IMG(" << *(img.format()) << ";" << img.width << "x" << img.height << ")"; + return stream; +} diff --git a/DetectionMetrics/libs/utils/colorspaces/imagecv.h b/DetectionMetrics/libs/utils/colorspaces/imagecv.h new file mode 100644 index 00000000..26b69df7 --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/imagecv.h @@ -0,0 +1,406 @@ +/* + * + * Copyright (C) 1997-2009 JDERobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : David Lobato Bravo + * + */ + +#ifndef IMAGECV_COLORSPACES_H +#define IMAGECV_COLORSPACES_H + +#include +#include +#include +#include +#include +#include "uncopyable.h" + +namespace colorspaces { + /** + * An image + */ + class Image: public cv::Mat { + public: + class FormatMismatch: public std::exception{ + public: + FormatMismatch(const std::string msg) + : message(msg) {} + ~FormatMismatch() throw() {} + virtual const char* what() const throw() + { + return message.c_str(); + } + private: + const std::string message; + }; + + class NoConversion: public std::exception{ + virtual const char* what() const throw() + { + return "Can't convert image to requested format"; + } + }; + + typedef Image* (*imageCtor)(const int height, const int width, void *const data); + typedef Image& (*imageCvt)(const Image& src, Image& dst); + + class Format; + typedef std::shared_ptr FormatPtr; + + /** + * Define the pixel format of an image + */ + class Format: public Uncopyable{ + public: + static const FormatPtr createFormat(const std::string name, const int cvType, imageCtor ctor, imageCvt cvt); + static const FormatPtr getFormat(const int fmtId); + static const FormatPtr searchFormat(const std::string name); + Image* createInstance(const int width, const int height, void *const data); + int bytesPerPixel() { return CV_ELEM_SIZE(cvType); } + std::string name;/**< String that represents this format*/ + int id;/**< Format identification*/ + int cvType;/**< Opencv data type used for pixel data*/ + imageCtor ctor;/**< Contructor*/ + imageCvt cvt;/**< conversion function*/ + private: + Format(const std::string name, const int id, const int cvType, imageCtor ctor, imageCvt cvt); + static std::vector& formatTable(); + }; + + + /** + * Constructor + */ + Image(); + + /** + * Constructor + */ + Image(const int width, const int height, const FormatPtr fmt); + + /** + * Constructor from user data + */ + Image(const int width, const int height, const FormatPtr fmt, void *const data); + + /** + * Copy onstructor + */ + Image(const Image& i); + + /** + * Copy onstructor from cv::Mat + */ + Image(const cv::Mat& m, const FormatPtr fmt); + + /** + * Image destructor + */ + virtual ~Image() {} + + /** + * Get image's format + */ + const FormatPtr format() const { return _format; } + + /** + * Convert image to dst fmt + */ + Image& convert(Image& dst) const throw(NoConversion); + + /** + * Clone image returning a new copy + */ + Image clone() const; + + int width; + int height; + + static const FormatPtr FORMAT_NONE; + private: + FormatPtr _format; + }; + + + /** + * A RGB 24 bit image + */ + class ImageRGB8: public Image { + public: + /** + * Constructor + */ + ImageRGB8(const int width, const int height); + + /** + * Constructor from user data + */ + ImageRGB8(const int width, const int height, void *const data); + + /** + * Copy constructor from Image, conversion will happen if needed + */ + ImageRGB8(const Image& i); + + /** + * Conversion methods. + */ + void toGRAY8(Image& dst) const throw(FormatMismatch); + void toYUY2(Image& dst) const throw(FormatMismatch); + void toHSV8(Image& dst) const throw(FormatMismatch); + void toYCRCB(Image& dst) const throw(FormatMismatch); + + /** + * Read from a file + * See cv::imread for flags and params + */ + static ImageRGB8 read(const std::string& filename); + + /** + * Write to a file + * See cv::imwrite for flags and params + */ + bool write(const std::string& filename, const std::vector& params=std::vector()); + + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_RGB8, FORMAT_RGB8_Z, FORMAT_DEPTH8_16, FORMAT_DEPTH8_16_Z; + }; + + /** + * A YUY2 image + */ + class ImageYUY2: public Image { + public: + /** + * Constructor + * Width have to be an even number. + */ + ImageYUY2(const int width, const int height); + + /** + * Constructor from user data + * Width have to be an even number. + */ + ImageYUY2(const int width, const int height, void *const data); + + /** + * Copy constructor. + * if \param i doesn't match format a conversion will happen. + */ + ImageYUY2(const Image& i); + + + /** + * Conversion methods. + * Returns a copy + */ + void toGRAY8(Image& dst) const throw(FormatMismatch); + void toRGB8(Image& dst) const throw(FormatMismatch); + void toYCRCB(Image& dst) const throw(FormatMismatch); + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_YUY2; + }; + + + /** + * A Gray 8 bit image + */ + class ImageGRAY8: public Image { + public: + /** + * Constructor + */ + ImageGRAY8(const int width, const int height); + + /** + * Constructor from user data + */ + ImageGRAY8(const int width, const int height, void *const data); + + /** + * Copy constructor. + * if \param i doesn't match format a conversion will happen. + */ + ImageGRAY8(const Image& i); + + + /** + * Conversion methods. + * Returns a copy + */ + void toRGB8(Image& dst) const throw(FormatMismatch); + void toYUY2(Image& dst) const throw(FormatMismatch); + + /** + * Read from a file + * See cv::imread for flags and params + */ + static ImageGRAY8 read(const std::string& filename); + + /** + * Write to a file + * See cv::imwrite for flags and params + */ + bool write(const std::string& filename, const std::vector& params=std::vector()); + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_GRAY8, FORMAT_GRAY8_Z; + }; + + /** + * A HSV8 image + */ + class ImageHSV8: public Image { + public: + /** + * Constructor + */ + ImageHSV8(const int width, const int height); + + /** + * Constructor from user data + */ + ImageHSV8(const int width, const int height, void *const data); + + /** + * Copy constructor. + * if \param i doesn't match format a conversion will happen. + */ + ImageHSV8(const Image& i); + + + /** + * Conversion methods. + * Returns a copy + */ + void toRGB8(Image& dst) const throw(FormatMismatch); + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_HSV8; + }; + + /** + * A YCRCB image + */ + class ImageYCRCB: public Image { + public: + /** + * Constructor + */ + ImageYCRCB(const int width, const int height); + + /** + * Constructor from user data + */ + ImageYCRCB(const int width, const int height, void *const data); + + /** + * Copy constructor. + * if \param i doesn't match format a conversion will happen. + */ + ImageYCRCB(const Image& i); + + + /** + * Conversion methods. + * Returns a copy + */ + void toRGB8(Image& dst) const throw(FormatMismatch); + //void toYUY2(Image& dst) const throw(FormatMismatch); + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_YCRCB; + }; + + /** + * A NV21 image + */ + class ImageNV21: public Image { + public: + /** + * Constructor + * Width have to be an even number. + */ + ImageNV21(const int width, const int height); + + /** + * Constructor from user data + * Width have to be an even number. + */ + ImageNV21(const int width, const int height, void *const data); + + /** + * Copy constructor. + * if \param i doesn't match format a conversion will happen. + */ + ImageNV21(const Image& i); + + + /** + * Conversion methods. + * Returns a copy + */ + void toGRAY8(Image& dst) const throw(FormatMismatch); + void toRGB8(Image& dst) const throw(FormatMismatch); + void toYCRCB(Image& dst) const throw(FormatMismatch); + + /** + * Factory method + */ + static Image* createInstance(const int width, const int height, void *const data); + static Image& imageCvt(const Image& src, Image& dst) throw(NoConversion); + static const FormatPtr FORMAT_NV21; + }; + + +} //namespace + +//declarations outside the namespace + +/** + * Insert a format in an output stream. Only debugging, output could be truncated + */ +std::ostream &operator<<(std::ostream &stream, const colorspaces::Image::Format& fmt); + +/** + * Insert an image in an output stream. Only debugging, output could be truncated + */ +std::ostream &operator<<(std::ostream &stream, const colorspaces::Image& img); + +#endif //IMAGECV_COLORSPACES_H diff --git a/DetectionMetrics/libs/utils/colorspaces/rgb2hsv.c b/DetectionMetrics/libs/utils/colorspaces/rgb2hsv.c new file mode 100644 index 00000000..3b6d8667 --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/rgb2hsv.c @@ -0,0 +1,294 @@ +/* + * + * Copyright (C) 1997-2008 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : Roberto Calvo Palomino + * + */ + +#include +#include +#include +#include + +#include "colorspaces.h" + +const int MAX_BITS = 8; +const int SIGNIFICATIVE_BITS = 6; +const int MAX_RGB = 255; + +struct HSV * LUT_RGB2HSV [64][64][64]; + +/* Condicional variable: + * 0: The table RGB2HSV don't exists. + * 1: The table RGB2HSV exists. + */ +int isInitTableHSV; + +/* mutex */ +pthread_mutex_t mutex; + +void rgb2hsv_wiki (double r, double g, double b, double *H, double *S, double *V) +{ + double min, max; + + // Calculamos el minimo + if ((r <= g) && (r <= b)) + min = r; + else if ((g <= r) && (g <= b)) + min = g; + else + min = b; + + // Calculamos el mximo + if ((r >= g) && (r >= b)) + max = r; + else if ((g >= r) && (g >= b)) + max = g; + else + max = b; + + //printf("min=%.1f - max=%.1f - r=%.1f - g=%.1f - b=%.1f\n",min,max,r,g,b); + // Calculamos valor de H + if (max==min) + { + *H=.0; // En estos casos, H no tiene sentido + } + else if (max==r && g>=b) + { + *H=60*((g-b)/(max-min)); + } + else if (max==r && g 0; --i) + { + printf("%d", (status & j) != 0); + j>>= 1; + + //if ((i - 1) % BITS_PACK == 0) + if (i==3) + printf(" "); + } + + printf(" (%lu)\n",status); +} + + +void RGB2HSV_destroyTable () +{ + + int r,g,b; + int pos_r, pos_g, pos_b; + int count = 4; + + printf("Destroy Table LUT_RGB2HSV .... OK\n"); + + for (b=0;b<=MAX_RGB;b=b+count) + for (g=0;g<=MAX_RGB;g=g+count) + for (r=0;r<=MAX_RGB;r=r+count) + { + if (r==0) pos_r=0; else pos_r = r/4; + if (g==0) pos_g=0; else pos_g = g/4; + if (b==0) pos_b=0; else pos_b = b/4; + + if (LUT_RGB2HSV[pos_r][pos_g][pos_b]) + { + free(LUT_RGB2HSV[pos_r][pos_g][pos_b]); + //RGB2HSI[pos_r][pos_g][pos_b]=NULL; + } + } + pthread_mutex_lock(&mutex); + isInitTableHSV = 0; + pthread_mutex_unlock(&mutex); +} + + +void RGB2HSV_init() +{ + /* Checking exist one instance */ + pthread_mutex_lock(&mutex); + if (isInitTableHSV==1) + { + pthread_mutex_unlock(&mutex); + return; + } + pthread_mutex_unlock(&mutex); + + printf("Init %s v%s ... \n",NAME,COLORSPACES_VERSION); + pthread_mutex_lock(&mutex); + isInitTableHSV = 0; + pthread_mutex_unlock(&mutex); +} + + +/// @TODO: Calculate values for create a generic table +void RGB2HSV_createTable() +{ + + int r,g,b; + int count, index; + int pos_r, pos_g, pos_b; + + struct HSV* newHSV; + + /* Checking exist one instance */ + pthread_mutex_lock(&mutex); + if (isInitTableHSV==1) + { + pthread_mutex_unlock(&mutex); + return; + } + pthread_mutex_unlock(&mutex); + + + count = 4; + index = 0; + + for (b=0;b<=MAX_RGB;b=b+count) + for (g=0;g<=MAX_RGB;g=g+count) + for (r=0;r<=MAX_RGB;r=r+count) + { + newHSV = (struct HSV*) malloc(sizeof(struct HSV)); + if (!newHSV) + { + printf("Allocated memory error\n"); + exit(-1); + } + + rgb2hsv_wiki(r,g,b,&(newHSV->H),&(newHSV->S),&(newHSV->V)); + + if (r==0) pos_r=0; else pos_r = r/4; + if (g==0) pos_g=0; else pos_g = g/4; + if (b==0) pos_b=0; else pos_b = b/4; + + //printf("[%d,%d,%d] RGB=%d,%d,%d - %.1f,%.1f,%.1f \n",pos_r,pos_g,pos_b,r,g,b,newHSI->H,newHSI->S,newHSI->I); + LUT_RGB2HSV[pos_r][pos_g][pos_b] = newHSV; + + index++; + } + + printf("Table 'LUT_RGB2HSV' create with 6 bits (%d values)\n",index); + + pthread_mutex_lock(&mutex); + isInitTableHSV=1; + pthread_mutex_unlock(&mutex); +} + + +void RGB2HSV_printHSV (struct HSV* hsv) +{ + printf("HSV: %.1f,%.1f,%.1f\n",hsv->H,hsv->S,hsv->V); +} + + +void RGB2HSV_test (void) +{ + int r,g,b; + const struct HSV* myHSV=NULL; + struct HSV myHSV2; + char line[16]; + + while (1) + { + + printf("\nIntroduce R,G,B: "); + fgets(line,16,stdin); + if ( sscanf(line,"%d,%d,%d",&r,&g,&b)!= 3) + break; + + myHSV = RGB2HSV_getHSV(r,g,b); + + if (myHSV==NULL) + { + printf ("Error in myHSV=NULL\n"); + continue; + } + + printf("[Table] RGB: %d,%d,%d -- HSV: %.1f,%.1f,%.1f\n",r,g,b,myHSV->H,myHSV->S,myHSV->V); + + rgb2hsv_wiki(r,g,b,&myHSV2.H,&myHSV2.S,&myHSV2.V); + + printf("[Algor] RGB: %d,%d,%d -- HSI: %.1f,%.1f,%.1f\n",r,g,b,myHSV2.H,myHSV2.S,myHSV2.V); + + } +} + diff --git a/DetectionMetrics/libs/utils/colorspaces/rgb2yuv.c b/DetectionMetrics/libs/utils/colorspaces/rgb2yuv.c new file mode 100644 index 00000000..d066d217 --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/rgb2yuv.c @@ -0,0 +1,246 @@ +/* + * + * Copyright (C) 1997-2009 JDE Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : Pablo Miangolarra Tejada + * + */ + +#include +#include +#include +#include + +#include "colorspaces.h" + +const int MAXIMUN_RGB = 255; + +struct YUV * LUT_RGB2YUV [64][64][64]; + +/* Condicional variable: + * 0: The table RGB2YUV don't exists. + * 1: The table RGB2YUV exists. + */ +int isInitTableYUV; + +/* mutex */ +pthread_mutex_t mutex; + +void rgb2yuv_wiki (double r, double g, double b, double *Y, double *U, double *V) +{ + /* From the wikipedia: yuv color space */ + + /* Transform R,G,B to [0,1] range*/ + + r = r / MAXIMUN_RGB; + g = g / MAXIMUN_RGB; + b = b / MAXIMUN_RGB; + + /*Apply formulas*/ + + *Y = r * 0.299 + g * 0.587 + b * 0.114; + *U = -0.14713*r - 0.289*g + 0.436*b; + *V = 0.615*r - 0.515*g - 0.100*b; + + //printf(" Valores Y: %.2f U: %.2f V: %.2f ---- R: %.2f G: %.2f B: %.2f\n",*Y,*U,*V,r,g,b); + +} + + + +void yuv2rgb(double Y, double U, double V, double *r, double *g, double *b) +{ + /* From the wikipedia: yuv color space*/ + + *r = Y + 1.13983 * V; + *g = Y - 0.39465 * U - 0.58060 * V; + *b = Y + 2.03211 * U; + + if (*r>1.0){*r = 1.0;} + if (*g>1.0){*g = 1.0;} + if (*b>1.0){*b = 1.0;} + + if (*r<0.0){*r = 0.0;} + if (*g<0.0){*g = 0.0;} + if (*b<0.0){*b = 0.0;} + + /* Return r,g,b in [0,1] range*/ + +} + +/// \brief Function to print unsiged int in binary +void print_status_HSV(unsigned long status) +{ + + //const int BITS_PACK = 4; + + unsigned int t = 8; + unsigned int i; + unsigned long int j= 1 << (t - 1); + + for (i= t; i > 0; --i) + { + printf("%d", (status & j) != 0); + j>>= 1; + + //if ((i - 1) % BITS_PACK == 0) + if (i==3) + printf(" "); + } + + printf(" (%lu)\n",status); +} + + +void RGB2YUV_destroyTable () +{ + + int r,g,b; + int pos_r, pos_g, pos_b; + int count = 4; + + printf("Destroy Table LUT_RGB2YUV .... OK\n"); + + for (b=0;b<=MAXIMUN_RGB;b=b+count) + for (g=0;g<=MAXIMUN_RGB;g=g+count) + for (r=0;r<=MAXIMUN_RGB;r=r+count) + { + if (r==0) pos_r=0; else pos_r = r/4; + if (g==0) pos_g=0; else pos_g = g/4; + if (b==0) pos_b=0; else pos_b = b/4; + + if (LUT_RGB2YUV[pos_r][pos_g][pos_b]) + { + free(LUT_RGB2YUV[pos_r][pos_g][pos_b]); + } + } + pthread_mutex_lock(&mutex); + isInitTableYUV = 0; + pthread_mutex_unlock(&mutex); +} + + +void RGB2YUV_init() +{ + /* Checking exist one instance */ + pthread_mutex_lock(&mutex); + if (isInitTableYUV==1) + { + pthread_mutex_unlock(&mutex); + return; + } + pthread_mutex_unlock(&mutex); + + printf("Init %s v%s ... \n",NAME,COLORSPACES_VERSION); + pthread_mutex_lock(&mutex); + isInitTableYUV = 0; + pthread_mutex_unlock(&mutex); +} + + +/// @TODO: Calculate values for create a generic table +void RGB2YUV_createTable() +{ + + int r,g,b; + int count, index; + int pos_r, pos_g, pos_b; + + struct YUV* newYUV; + + /* Checking exist one instance */ + pthread_mutex_lock(&mutex); + if (isInitTableYUV==1) + { + pthread_mutex_unlock(&mutex); + printf("YUV table already exists\n"); + return; + } + pthread_mutex_unlock(&mutex); + + + count = 4; + index = 0; + + for (b=0;b<=MAXIMUN_RGB;b=b+count) + for (g=0;g<=MAXIMUN_RGB;g=g+count) + for (r=0;r<=MAXIMUN_RGB;r=r+count) + { + newYUV = (struct YUV*) malloc(sizeof(struct YUV)); + if (!newYUV) + { + printf("Allocated memory error\n"); + exit(-1); + } + + rgb2yuv_wiki(r,g,b,&(newYUV->Y),&(newYUV->U),&(newYUV->V)); + + if (r==0) pos_r=0; else pos_r = r/4; + if (g==0) pos_g=0; else pos_g = g/4; + if (b==0) pos_b=0; else pos_b = b/4; + + //printf("[%d,%d,%d] RGB=%d,%d,%d - %.1f,%.1f,%.1f \n",pos_r,pos_g,pos_b,r,g,b,newYUV->H,newYUV->S,newYUV->I); + LUT_RGB2YUV[pos_r][pos_g][pos_b] = newYUV; + + index++; + } + + printf("Table 'LUT_RGB2YUV' create with 6 bits (%d values)\n",index); + + pthread_mutex_lock(&mutex); + isInitTableYUV=1; + pthread_mutex_unlock(&mutex); +} + + +void RGB2YUV_printYUV (struct YUV* yuv) +{ + printf("YUV: %.1f,%.1f,%.1f\n",yuv->Y,yuv->U,yuv->V); +} + + +void RGB2YUV_test (void) +{ + int r,g,b; + const struct YUV* myYUV=NULL; + struct YUV myYUV2; + char line[16]; + + while (1) + { + + printf("\nIntroduce R,G,B: "); + fgets(line,16,stdin); + if ( sscanf(line,"%d,%d,%d",&r,&g,&b)!= 3) + break; + + myYUV = RGB2YUV_getYUV(r,g,b); + + if (myYUV==NULL) + { + printf ("Error in myYUV=NULL\n"); + continue; + } + + printf("[Table] RGB: %d,%d,%d -- YUV: %.1f,%.1f,%.1f\n",r,g,b,myYUV->Y,myYUV->U,myYUV->V); + + rgb2yuv_wiki(r,g,b,&myYUV2.Y,&myYUV2.U,&myYUV2.V); + + printf("[Algor] RGB: %d,%d,%d -- YUV: %.1f,%.1f,%.1f\n",r,g,b,myYUV2.Y,myYUV2.U,myYUV2.V); + + } +} + diff --git a/DetectionMetrics/libs/utils/colorspaces/uncopyable.h b/DetectionMetrics/libs/utils/colorspaces/uncopyable.h new file mode 100644 index 00000000..43e5ce84 --- /dev/null +++ b/DetectionMetrics/libs/utils/colorspaces/uncopyable.h @@ -0,0 +1,37 @@ +/* + * + * Copyright (C) 1997-2009 JDERobot Developers Team + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + * + * Authors : David Lobato Bravo + * + */ + +#ifndef UNCOPYABLE_COLORSPACES_H +#define UNCOPYABLE_COLORSPACES_H + + +namespace colorspaces{ + class Uncopyable{ + public: + Uncopyable() {} + private: + Uncopyable(const Uncopyable&); + Uncopyable& operator=(const Uncopyable&); + }; +}//namespace + +#endif //UNCOPYABLE_COLORSPACES_H + diff --git a/DetectionMetrics/package.sh b/DetectionMetrics/package.sh new file mode 100644 index 00000000..ce9a7e4d --- /dev/null +++ b/DetectionMetrics/package.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +######################################################################## +# Package the binaries built on Travis-CI as an AppImage +# By Simon Peter 2016 +# For more information, see http://appimage.org/ +######################################################################## + +export ARCH=$(arch) + +if [[ "$TO_TEST" == "WITH_ROS_AND_ICE" ]]; +then +APP=DetectionMetrics_with_ROS_and_ICE +else +APP=DetectionMetrics +fi + +LOWERAPP=${APP,,} + + +mkdir -p $APP.AppDir/usr/ + +cd $APP.AppDir + +echo `pwd` + +mkdir -p usr/bin +cp ../DatasetEvaluationApp/DatasetEvaluationApp usr/bin/ + +mkdir -p usr/lib +ldd ../DatasetEvaluationApp/DatasetEvaluationApp | grep "=> /" | awk '{print $3}' | xargs -I '{}' cp -v '{}' usr/lib/ + +echo "Now copying Qt plugin libraries" +mkdir usr/bin/platforms/ + +# For Qt Dependency +cp -v `find /usr -iname 'libqxcb.so'` usr/bin/platforms + +find /usr -iname 'libqxcb.so' | xargs ldd | grep "=> /" | awk '{print $3}' | xargs -I '{}' cp -v '{}' usr/bin/platforms + +# Copying necessary python modules +cp -v -r ../../DetectionMetricsLib/python_modules usr/lib/ + +cd usr/ ; find . -type f -exec sed -i -e 's|/usr|././|g' {} \; ; cd - + +cat > AppRun << 'EOF' +#!/usr/bin/env bash +# some magic to find out the real location of this script dealing with symlinks +DIR=`readlink "$0"` || DIR="$0"; +DIR=`dirname "$DIR"`; +cd "$DIR" +DIR=`pwd` +cd - > /dev/null +# disable parameter expansion to forward all arguments unprocessed to the VM +set -f +# run the VM and pass along all arguments as is +export PYTHONPATH="$DIR/usr/lib/python_modules" +LD_LIBRARY_PATH="$DIR/usr/lib" "${DIR}/usr/bin/DatasetEvaluationApp" "$@" +EOF + +chmod +x AppRun + +wget http://files.pharo.org/media/logo/icon-lighthouse-512x512.png -O $APP.png + +cat > $APP.desktop < +#include + + +void myFunction() { + LOG(ERROR) << "fatal message"; + +} + + +int main(int argc, char **argv) { + std::string logPath = "/home/frivas/devel/machine-learning/DetectionMetrics/cmake-build-debug/test/GLOG/"; + google::InitGoogleLogging(argv[0]); +// google::SetLogDestination(0, std::string(logPath + "info.log").c_str()); +// google::SetLogDestination(1, std::string(logPath + "warning.log").c_str()); + + for (google::LogSeverity s = google::WARNING; s < google::NUM_SEVERITIES; s++) + google::SetLogDestination(s, ""); + google::SetLogDestination(google::INFO, "log.log"); + FLAGS_alsologtostderr = 1; + + fLI::FLAGS_max_log_size = 1; //MB + + + fLI::FLAGS_minloglevel=google::ERROR; + + LOG(INFO) << "Info message"; + LOG(WARNING) << "Warning message"; + LOG(ERROR) << "Error message"; + myFunction(); + LOG(INFO) << "no" << std::endl; + int num_cookies = 11; + LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies"; + + DLOG(INFO) << "Debug message"; + + + + PCHECK(num_cookies == 4) << "Write failed!"; + + +} diff --git a/Doxyfile b/Doxyfile new file mode 100644 index 00000000..51d9c95c --- /dev/null +++ b/Doxyfile @@ -0,0 +1,2494 @@ +# Doxyfile 1.8.13 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = DetectionMetrics + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 1.00 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Tool to evaluate deep-learning detection and segmentation models, and to create datasets" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = jderobot.png + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = . + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +# EXCLUDE_PATTERNS = */build/* + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse-libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /