diff --git a/README.md b/README.md index 2219a68..f9ae57b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
- Logo + Logo
Getting Started Documentation diff --git a/conf/facerec/common_capturer_pb_uld_fda.xml b/conf/facerec/common_capturer_pb_uld_fda.xml new file mode 100644 index 0000000..27b0303 --- /dev/null +++ b/conf/facerec/common_capturer_pb_uld_fda.xml @@ -0,0 +1,23 @@ + + + +fld_capturer + +pb_fd +uld + +150 +1 + +0.2 +0.3 +0.7 + +frontal +fda +share/fda/fda0017.3500.enc + +0 +conf/facerec/iris_fitter.xml + + diff --git a/conf/facerec/common_capturer_ssyv_fda_v2.xml b/conf/facerec/common_capturer_ssyv_fda_v2.xml index 98a276c..1c62e98 100644 --- a/conf/facerec/common_capturer_ssyv_fda_v2.xml +++ b/conf/facerec/common_capturer_ssyv_fda_v2.xml @@ -3,8 +3,8 @@ fld_capturer -ssyv - +pb_fd +ssyv 2 0.4 @@ -14,4 +14,7 @@ fda share/fda/fda0017.3500.enc +0 +conf/facerec/iris_fitter.xml + diff --git a/conf/facerec/common_capturer_ssyv_fda_v3.xml b/conf/facerec/common_capturer_ssyv_fda_v3.xml new file mode 100644 index 0000000..e29fa06 --- /dev/null +++ b/conf/facerec/common_capturer_ssyv_fda_v3.xml @@ -0,0 +1,20 @@ + + + +fld_capturer + +pb_fd +ssyv +3 + +0.4 +0.45 + +frontal +fda +share/fda/fda0017.3500.enc + +0 +conf/facerec/iris_fitter.xml + + diff --git a/conf/facerec/common_capturer_ssyv_fda_v5.xml b/conf/facerec/common_capturer_ssyv_fda_v5.xml index f502861..7ccb106 100644 --- a/conf/facerec/common_capturer_ssyv_fda_v5.xml +++ b/conf/facerec/common_capturer_ssyv_fda_v5.xml @@ -3,9 +3,9 @@ fld_capturer -ssyv - -3 +pb_fd +ssyv +4 0.4 0.45 @@ -14,4 +14,7 @@ fda share/fda/fda0017.3500.enc +0 +conf/facerec/iris_fitter.xml + diff --git a/conf/facerec/safety_city_q1.xml b/conf/facerec/safety_city_q1.xml index f502861..a9e4003 100644 --- a/conf/facerec/safety_city_q1.xml +++ b/conf/facerec/safety_city_q1.xml @@ -3,9 +3,9 @@ fld_capturer -ssyv - -3 +pb_fd +ssyv +4 0.4 0.45 diff --git a/conf/facerec/safety_city_q2.xml b/conf/facerec/safety_city_q2.xml index 12d462c..374ae91 100644 --- a/conf/facerec/safety_city_q2.xml +++ b/conf/facerec/safety_city_q2.xml @@ -2,36 +2,16 @@ fld_capturer -frontal - -uld -50 -10000 -1 -uld-part1 -share/facedetectors/uld/uld_part1.enc -uld-part2 -share/facedetectors/uld/uld_part2.enc -uld-part3 -share/facedetectors/uld/uld_part3.enc +pb_fd +ssyv +3 -0.3 -0.2 -0.6 -1.0 - -0 -0 -0 +0.4 +0.45 +frontal fda share/fda/fda0017.3500.enc -0 -conf/facerec/iris_fitter.xml - - -1 - diff --git a/conf/facerec/safety_city_q3.xml b/conf/facerec/safety_city_q3.xml new file mode 100644 index 0000000..12d462c --- /dev/null +++ b/conf/facerec/safety_city_q3.xml @@ -0,0 +1,37 @@ + + + +fld_capturer +frontal + +uld +50 +10000 +1 + +uld-part1 +share/facedetectors/uld/uld_part1.enc +uld-part2 +share/facedetectors/uld/uld_part2.enc +uld-part3 +share/facedetectors/uld/uld_part3.enc + +0.3 +0.2 +0.6 +1.0 + +0 +0 +0 + +fda +share/fda/fda0017.3500.enc + +0 +conf/facerec/iris_fitter.xml + + +1 + + diff --git a/conf/facerec/video_worker_fdatracker_pb_uld_fda.xml b/conf/facerec/video_worker_fdatracker_pb_uld_fda.xml new file mode 100644 index 0000000..7ac9311 --- /dev/null +++ b/conf/facerec/video_worker_fdatracker_pb_uld_fda.xml @@ -0,0 +1,106 @@ + + + +libface_video_worker + +fdatracker + + + +share/fda/fda0017.3500.enc + +5 +30 +0.7 +0.2 + + +0 +conf/facerec/iris_fitter.xml + +0.1 +0 +8 +640 +640 + + +pb_fd +uld +150 +1 + +0.2 +0.3 +0.7 + +0 +0 +0 + +0 +conf/facerec/depth_liveness_estimator_cnn.xml +5000 +150 +3 + +share/face_quality/fda_fqm.bin + +2 + + -40 + 40 + + -40 + 40 + +5 + +3 + +10 + +32 +32 +0.20 + +0 + +0 + +0 + +1 + + +10000 +1000 + +0 + +0 + +0 + + + +0 + +0 + +1 + +-1 +-1 +-1 + +0 + + +conf/facerec/age_gender_estimator.xml +conf/facerec/emotions_estimator.xml + +0 +conf/facerec/active_liveness_estimator.xml + + diff --git a/conf/facerec/video_worker_fdatracker_ssyv_fda_v2.xml b/conf/facerec/video_worker_fdatracker_ssyv_fda_v2.xml new file mode 100644 index 0000000..b4d59eb --- /dev/null +++ b/conf/facerec/video_worker_fdatracker_ssyv_fda_v2.xml @@ -0,0 +1,105 @@ + + + +libface_video_worker + +fdatracker + + + +share/fda/fda0017.3500.enc + +5 +30 +0.7 +0.2 + + +0 +conf/facerec/iris_fitter.xml + +0.1 +0 +8 +640 +640 + + +pb_fd +ssyv +2 +1 + +0.45 +0.4 + +0 +0 +0 + +0 +conf/facerec/depth_liveness_estimator_cnn.xml +5000 +150 +3 + +share/face_quality/fda_fqm.bin + +2 + + -40 + 40 + + -40 + 40 + +5 + +3 + +10 + +32 +32 +0.20 + +0 + +0 + +0 + +1 + + +10000 +1000 + +0 + +0 + +0 + + + +0 + +0 + +1 + +-1 +-1 +-1 + +0 + + +conf/facerec/age_gender_estimator.xml +conf/facerec/emotions_estimator.xml + +0 +conf/facerec/active_liveness_estimator.xml + + diff --git a/conf/facerec/video_worker_fdatracker_ssyv_fda_v3.xml b/conf/facerec/video_worker_fdatracker_ssyv_fda_v3.xml new file mode 100644 index 0000000..5a09938 --- /dev/null +++ b/conf/facerec/video_worker_fdatracker_ssyv_fda_v3.xml @@ -0,0 +1,105 @@ + + + +libface_video_worker + +fdatracker + + + +share/fda/fda0017.3500.enc + +5 +30 +0.7 +0.2 + + +0 +conf/facerec/iris_fitter.xml + +0.1 +0 +8 +640 +640 + + +pb_fd +ssyv +3 +1 + +0.45 +0.4 + +0 +0 +0 + +0 +conf/facerec/depth_liveness_estimator_cnn.xml +5000 +150 +3 + +share/face_quality/fda_fqm.bin + +2 + + -40 + 40 + + -40 + 40 + +5 + +3 + +10 + +32 +32 +0.20 + +0 + +0 + +0 + +1 + + +10000 +1000 + +0 + +0 + +0 + + + +0 + +0 + +1 + +-1 +-1 +-1 + +0 + + +conf/facerec/age_gender_estimator.xml +conf/facerec/emotions_estimator.xml + +0 +conf/facerec/active_liveness_estimator.xml + + diff --git a/conf/facerec/video_worker_fdatracker_ssyv_fda_v5.xml b/conf/facerec/video_worker_fdatracker_ssyv_fda_v5.xml new file mode 100644 index 0000000..5fbf4a7 --- /dev/null +++ b/conf/facerec/video_worker_fdatracker_ssyv_fda_v5.xml @@ -0,0 +1,105 @@ + + + +libface_video_worker + +fdatracker + + + +share/fda/fda0017.3500.enc + +5 +30 +0.7 +0.2 + + +0 +conf/facerec/iris_fitter.xml + +0.1 +0 +8 +640 +640 + + +pb_fd +ssyv +4 +1 + +0.45 +0.4 + +0 +0 +0 + +0 +conf/facerec/depth_liveness_estimator_cnn.xml +5000 +150 +3 + +share/face_quality/fda_fqm.bin + +2 + + -40 + 40 + + -40 + 40 + +5 + +3 + +10 + +32 +32 +0.20 + +0 + +0 + +0 + +1 + + +10000 +1000 + +0 + +0 + +0 + + + +0 + +0 + +1 + +-1 +-1 +-1 + +0 + + +conf/facerec/age_gender_estimator.xml +conf/facerec/emotions_estimator.xml + +0 +conf/facerec/active_liveness_estimator.xml + + diff --git a/examples/android/demo/build.gradle b/examples/android/demo/build.gradle index ebdf9f2..28771bc 100644 --- a/examples/android/demo/build.gradle +++ b/examples/android/demo/build.gradle @@ -50,7 +50,8 @@ android { "share/quality_iso/**", "share/face_quality/**", "share/faceanalysis/**", - "share/processing_block/**", + "share/processing_block/age_estimator/**", + "share/processing_block/gender_estimator/**", "share/age_second/**", "share/liveness_2d_v2/**", "share/faceattributes/**", diff --git a/examples/android/telpo_liveness_video_recognition/src/main/res/layout/about.xml b/examples/android/telpo_liveness_video_recognition/src/main/res/layout/about.xml index 29105ee..89f67ec 100644 --- a/examples/android/telpo_liveness_video_recognition/src/main/res/layout/about.xml +++ b/examples/android/telpo_liveness_video_recognition/src/main/res/layout/about.xml @@ -16,7 +16,7 @@ android:id="@+id/aboutText1" android:layout_width="match_parent" android:layout_height="wrap_content" - android:text="Video recognition with depth and IR liveness demo for Telpo (v 0.2)\n\nFace SDK version 3.19.1\n" + android:text="Video recognition with depth and IR liveness demo for Telpo (v 0.2)\n\nFace SDK version 3.20.0\n" android:layout_below="@+id/aboutLogo" /> diff --git a/examples/cpp/CMakeLists.txt b/examples/cpp/CMakeLists.txt index 52a9c0b..7f85d77 100644 --- a/examples/cpp/CMakeLists.txt +++ b/examples/cpp/CMakeLists.txt @@ -17,4 +17,7 @@ add_subdirectory(test_calibration) add_subdirectory(demo) add_subdirectory(video_recognition_demo) add_subdirectory(test_videocap) -add_subdirectory(processing_block) + +if(NOT WITHOUT_PROCESSING_BLOCK) + add_subdirectory(processing_block) +endif() diff --git a/examples/cpp/processing_block/processing_block.cpp b/examples/cpp/processing_block/processing_block.cpp index b7844b7..3fb68c4 100644 --- a/examples/cpp/processing_block/processing_block.cpp +++ b/examples/cpp/processing_block/processing_block.cpp @@ -101,7 +101,7 @@ void drawFaceKeypoint(const pbio::Context& data, cv::Mat& image) { for(const auto& point : obj.at("keypoints").at("points")) { - cv::circle(image, cv::Point2f(point["x"].getDouble() * image.size[1], point["y"].getDouble() * image.size[0]), 2, {0, 255, 0}, 5); + cv::circle(image, cv::Point2f(point["proj"][0].getDouble() * image.size[1], point["proj"][1].getDouble() * image.size[0]), 2, {0, 255, 0}, 5); } } } @@ -322,10 +322,9 @@ int main(int argc, char **argv) configCtx["facerec_conf_dir"] = sdk_dir + "/conf/facerec/"; } - pbio::ProcessingBlock processingBlock = service->createProcessingBlock(configCtx); - if(unit_type == "quality" || (unit_type == "liveness" && modification == "v4")) + if(unit_type == "quality" || (unit_type == "liveness" && modification == "2d")) { // create capturer const pbio::Capturer::Ptr capturer = service->createCapturer("common_capturer_refa_fda_a.xml"); @@ -347,14 +346,21 @@ int main(int argc, char **argv) { auto faceCtx = service->createContext(); faceCtx["unit_type"] = unitTypes.at("face"); - faceCtx["ONNXRuntime"]["library_path"] = lib_dir; - faceCtx["use_cuda"] = use_cuda; - faceCtx["confidence_threshold"] = 0.4; + faceCtx["version"] = static_cast(2); pbio::ProcessingBlock faceBlock = service->createProcessingBlock(faceCtx); auto faceImageCtx = ioData["image"]; pbio::context_utils::putImage(faceImageCtx, input_rawimg); faceBlock(ioData); + + if (unit_type.compare("face_keypoint")) + { + auto fitterCtx = service->createContext(); + fitterCtx["unit_type"] = unitTypes.at("face_keypoint"); + pbio::ProcessingBlock fitterBlock = service->createProcessingBlock(fitterCtx); + fitterBlock(ioData); + } + processingBlock(ioData); } else // just put the whole image to the Context diff --git a/examples/cpp/processing_block_recognition_demo/main.cpp b/examples/cpp/processing_block_recognition_demo/main.cpp index 15f1eb9..ef3c2ed 100644 --- a/examples/cpp/processing_block_recognition_demo/main.cpp +++ b/examples/cpp/processing_block_recognition_demo/main.cpp @@ -15,7 +15,7 @@ using Context = api::Context; #include "../console_arguments_parser/ConsoleArgumentsParser.h" -void recognitionSample(std::string sdk_path, std::string input_image_path1, std::string input_image_path2, std::string window, std::string output, std::string mode); +void recognitionSample(std::string sdk_path, std::string input_image_path1, std::string input_image_path2, std::string window, std::string output, std::string mode, std::string modification); int main(int argc, char **argv) { @@ -26,6 +26,7 @@ int main(int argc, char **argv) " [--sdk_path ..]" " [--window ]" " [--output ]" + " [--modification <30, 50, 100, 1000>]" << std::endl; ConsoleArgumentsParser parser(argc, argv); @@ -35,12 +36,13 @@ int main(int argc, char **argv) const std::string sdk_dir = parser.get("--sdk_path", ".."); const std::string window = parser.get("--window", "yes"); const std::string output = parser.get("--output", "no"); + const std::string modification = parser.get("--modification", "1000"); try{ if (mode != "verify" && mode != "identify") throw std::runtime_error("there is no modifier " + mode); - recognitionSample(sdk_dir, input_image_path, input_image_path2, window, output, mode); + recognitionSample(sdk_dir, input_image_path, input_image_path2, window, output, mode, modification); }catch(const std::exception &e){ std::cout << "! exception catched: '" << e.what() << "' ... exiting" << std::endl; return 1; @@ -117,7 +119,7 @@ void checkFileExist(std::string path) } -api::Context getFaces(api::Service &service, api::ProcessingBlock &faceDetector, api::ProcessingBlock &faceFitter, cv::Mat &image){ +api::Context getFaces(pbio::FacerecService &service, api::ProcessingBlock &faceDetector, api::ProcessingBlock &faceFitter, cv::Mat &image){ cv::Mat input_image; cv::cvtColor(image, input_image, cv::COLOR_BGR2RGB); @@ -132,30 +134,28 @@ api::Context getFaces(api::Service &service, api::ProcessingBlock &faceDetector, return ioData; } -void recognitionSample(std::string sdk_path, std::string input_image_path1, std::string input_image_path2, std::string window, std::string output, std::string mode) +void recognitionSample(std::string sdk_path, std::string input_image_path1, std::string input_image_path2, std::string window, std::string output, std::string mode, std::string modification) { api::Service service = api::Service::createService(sdk_path); Context detectorCtx = service.createContext(); Context fitterCtx = service.createContext(); - Context recognizerCtx = service.createContext(); - Context matcherCtx = service.createContext(); - + Context faceTemplateExtractorCtx = service.createContext(); + detectorCtx["unit_type"] = "FACE_DETECTOR"; detectorCtx["modification"] = "uld"; - detectorCtx["min_size"] = 50l; + detectorCtx["precision_level"] = 3; detectorCtx["confidence_threshold"] = 0.6; fitterCtx["unit_type"] = "FACE_FITTER"; fitterCtx["modification"] = "tddfa_faster"; - recognizerCtx["unit_type"] = "FACE_RECOGNIZER"; - matcherCtx["unit_type"] = "MATCHER_MODULE"; + faceTemplateExtractorCtx["unit_type"] = "FACE_TEMPLATE_EXTRACTOR"; + faceTemplateExtractorCtx["modification"] = modification; api::ProcessingBlock faceDetector = service.createProcessingBlock(detectorCtx); api::ProcessingBlock faceFitter = service.createProcessingBlock(fitterCtx); - api::ProcessingBlock recognizerModule = service.createProcessingBlock(recognizerCtx); - api::ProcessingBlock matcherModule = service.createProcessingBlock(matcherCtx); + api::ProcessingBlock faceTemplateExtractor = service.createProcessingBlock(faceTemplateExtractorCtx); checkFileExist(input_image_path1); checkFileExist(input_image_path2); @@ -179,25 +179,31 @@ void recognitionSample(std::string sdk_path, std::string input_image_path1, std: throw std::runtime_error("many faces on" + input_image_path2 + " image"); ///////////Recognizer//////////////// - recognizerModule(ioData); - recognizerModule(ioData2); + faceTemplateExtractor(ioData); + faceTemplateExtractor(ioData2); ///////////////////////////////////// - Context matcherData = service.createContext(); - if (mode == "verify") { - matcherData["verification"]["objects"].push_back(ioData["objects"][0]); - matcherData["verification"]["objects"].push_back(ioData2["objects"][0]); + Context verificationConfig = service.createContext(); + Context verificationData = service.createContext(); - ///////////Matcher//////////////// - matcherModule(matcherData); + verificationConfig["unit_type"] = "VERIFICATION_MODULE"; + verificationConfig["modification"] = modification; + + api::ProcessingBlock verificationModule = service.createProcessingBlock(verificationConfig); + + verificationData["template1"] = ioData["objects"][0]["template"]; + verificationData["template2"] = ioData2["objects"][0]["template"]; + + ///////////Verification//////////////// + verificationModule(verificationData); ////////////////////////////////// - double distance = matcherData["verification"]["result"]["distance"].getDouble(); - bool verdict = matcherData["verification"]["result"]["verdict"].getBool(); + double distance = verificationData["result"]["distance"].getDouble(); + double score = verificationData["result"]["score"].getDouble(); - cv::Scalar color = verdict ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255); + cv::Scalar color = score >= 0.85 ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255); drawBBox(ioData["objects"][0], image, output, color); drawBBox(ioData2["objects"][0], image2, output, color); @@ -212,7 +218,8 @@ void recognitionSample(std::string sdk_path, std::string input_image_path1, std: crop2.copyTo(result(cv::Rect(crop1.cols, 0, crop2.cols, crop2.rows))); std::cout << "distance = " << distance << "\n"; - std::cout << "verdict = " << (verdict ? "True" : "False") << "\n"; + std::cout << "verdict = " << (score >= 0.85 ? "True" : "False") << "\n"; + std::cout << "score = " << score << "\n"; if (window == "yes"){ cv::imshow("result", result); @@ -222,27 +229,50 @@ void recognitionSample(std::string sdk_path, std::string input_image_path1, std: } else { - matcherData["search"]["knn"] = 1l; - matcherData["search"]["type_index"] = "array"; + Context matcherConfig = service.createContext(); + Context templateIndexConfig = service.createContext(); + Context templates = service.createContext(); + Context matcherData = service.createContext(); + + matcherConfig["unit_type"] = "MATCHER_MODULE"; + matcherConfig["modification"] = modification; - matcherData["search"]["template_index"] = ioData["objects"]; - matcherData["search"]["queries"].push_back(ioData2["objects"][0]); + templateIndexConfig["unit_type"] = "TEMPLATE_INDEX"; + templateIndexConfig["modification"] = modification; + + api::ProcessingBlock matcherModule = service.createProcessingBlock(matcherConfig); + api::ProcessingBlock templateIndex = service.createProcessingBlock(templateIndexConfig); + + for (const Context& object : ioData["objects"]) + { + templates.push_back(object["template"]); + } + + ioData["templates"] = std::move(templates); + + templateIndex(ioData); + + matcherData["knn"] = 1l; + matcherData["template_index"] = ioData["template_index"]; + matcherData["queries"].push_back(ioData2["objects"][0]); ///////////Matcher//////////////// matcherModule(matcherData); ////////////////////////////////// - int find_index = static_cast(matcherData["search"]["results"][0]["index"].getLong()); - double distance = matcherData["search"]["results"][0]["distance"].getDouble(); - bool verdict = matcherData["search"]["results"][0]["verdict"].getBool(); + int find_index = static_cast(matcherData["results"][0]["index"].getLong()); + double distance = matcherData["results"][0]["distance"].getDouble(); + double score = matcherData["results"][0]["score"].getDouble(); + std::cout << "distance = " << distance <<" \n"; - std::cout << "verdict = " << verdict <<" \n"; + std::cout << "verdict = " << (score >= 0.85 ? "True" : "False") <<" \n"; std::cout << "index = " << find_index <<" \n"; + std::cout << "score = " << score << " \n"; for(int i = 0; i < ioData["objects"].size(); i++) { const api::Context &obj = ioData["objects"][i]; - cv::Scalar color = (i == find_index && verdict) ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255); + cv::Scalar color = (i == find_index && score >= 0.85) ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255); drawBBox(obj, image, output, color); } diff --git a/examples/csharp/demo/csharp_demo.csproj b/examples/csharp/demo/csharp_demo.csproj index f80f509..91fa534 100644 --- a/examples/csharp/demo/csharp_demo.csproj +++ b/examples/csharp/demo/csharp_demo.csproj @@ -1,21 +1,20 @@ - + Exe - netcoreapp2.0 + net6.0 + ..\..\..\bin\FacerecCSharpWrapper.dll ../../../lib/FacerecCSharpWrapper.dll - - - - + - - + + + diff --git a/examples/csharp/demo/vs/Properties/launchSettings.json b/examples/csharp/demo/vs/Properties/launchSettings.json new file mode 100644 index 0000000..20f9ca4 --- /dev/null +++ b/examples/csharp/demo/vs/Properties/launchSettings.json @@ -0,0 +1,8 @@ +{ + "profiles": { + "csharp_demo": { + "commandName": "Project", + "commandLineArgs": "--config_dir ../../../../../../../conf/facerec" + } + } +} \ No newline at end of file diff --git a/examples/csharp/demo/vs/csharp_demo.csproj b/examples/csharp/demo/vs/csharp_demo.csproj index 998faef..7269e33 100644 --- a/examples/csharp/demo/vs/csharp_demo.csproj +++ b/examples/csharp/demo/vs/csharp_demo.csproj @@ -1,68 +1,21 @@ - - - + + - Debug - x64 - csharp_demo Exe - v4.6 - {9C504E04-5D81-4B01-8482-3B94D27BED43} - --config_dir ../../../../../../conf/facerec - - - x64 - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - x64 - pdbonly - true - bin\Release\ - TRACE - prompt - 4 + net6.0 + - ..\..\..\..\bin\FacerecCSharpWrapper.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\CommandLine.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.Blob.dll - True + ..\..\..\..\bin\FacerecCSharpWrapper.dll - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.dll - True - - - - - - - - - + PreserveNewest - - PreserveNewest - - + PreserveNewest @@ -77,13 +30,17 @@ PreserveNewest + + PreserveNewest + + + PreserveNewest + + + + + + - - - \ No newline at end of file + + diff --git a/examples/csharp/demo/vs/csharp_demo.sln b/examples/csharp/demo/vs/csharp_demo.sln index da047af..7f6d9c3 100644 --- a/examples/csharp/demo/vs/csharp_demo.sln +++ b/examples/csharp/demo/vs/csharp_demo.sln @@ -1,22 +1,22 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "csharp_demo", "csharp_demo.csproj", "{9C504E04-5D81-4B01-8482-3B94D27BED43}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|x64.ActiveCfg = Debug|x64 - {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|x64.Build.0 = Debug|x64 - {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|x64.ActiveCfg = Release|x64 - {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "csharp_demo", "csharp_demo.csproj", "{9C504E04-5D81-4B01-8482-3B94D27BED43}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/examples/csharp/demo/demo.cs b/examples/csharp/demo/vs/demo.cs similarity index 78% rename from examples/csharp/demo/demo.cs rename to examples/csharp/demo/vs/demo.cs index 61b461f..8202107 100644 --- a/examples/csharp/demo/demo.cs +++ b/examples/csharp/demo/vs/demo.cs @@ -72,7 +72,7 @@ public Worker(string facerec_conf_dir, string capturer_conf, string license_dir) //_age_geder_estimator = _service.createAgeGenderEstimator("age_gender_estimator_v2.xml"); _emotions_estimator = _service.createEmotionsEstimator("emotions_estimator.xml"); _face_quality_estimator = _service.createFaceQualityEstimator("face_quality_estimator.xml"); - _liveness_2d_estimator = _service.createLiveness2DEstimator("liveness_2d_estimator_v3.xml"); + _liveness_2d_estimator = _service.createLiveness2DEstimator("liveness_2d_estimator_v3.xml"); _face_mask_estimator = _service.createFaceAttributesEstimator("face_mask_estimator.xml"); _eyes_openness_estimator = _service.createFaceAttributesEstimator("eyes_openness_estimator_v2.xml"); @@ -96,18 +96,18 @@ public Worker(string facerec_conf_dir, string capturer_conf, string license_dir) bool get_flag(int i) { - switch(i) + switch (i) { - case 0: return _flag_positions; - case 1: return _flag_angles; - case 2: return _flag_quality; - case 3: return _flag_liveness; - case 4: return _flag_age_gender; - case 5: return _flag_cutting_base; - case 6: return _flag_cutting_full; - case 7: return _flag_cutting_token; - case 8: return _flag_points; - case 9: return _flag_face_quality; + case 0: return _flag_positions; + case 1: return _flag_angles; + case 2: return _flag_quality; + case 3: return _flag_liveness; + case 4: return _flag_age_gender; + case 5: return _flag_cutting_base; + case 6: return _flag_cutting_full; + case 7: return _flag_cutting_token; + case 8: return _flag_points; + case 9: return _flag_face_quality; case 10: return _flag_angles_vectors; case 11: return _flag_emotions; case 12: return _flag_masked_face; @@ -119,28 +119,28 @@ bool get_flag(int i) void set_flag(int i, bool value) { - switch(i) + switch (i) { - case 0: _flag_positions = value; return; - case 1: _flag_angles = value; return; - case 2: _flag_quality = value; return; - case 3: _flag_liveness = value; return; - case 4: _flag_age_gender = value; return; - case 5: _flag_cutting_base = value; return; - case 6: _flag_cutting_full = value; return; - case 7: _flag_cutting_token = value; return; - case 8: _flag_points = value; return; - case 9: _flag_face_quality = value; return; + case 0: _flag_positions = value; return; + case 1: _flag_angles = value; return; + case 2: _flag_quality = value; return; + case 3: _flag_liveness = value; return; + case 4: _flag_age_gender = value; return; + case 5: _flag_cutting_base = value; return; + case 6: _flag_cutting_full = value; return; + case 7: _flag_cutting_token = value; return; + case 8: _flag_points = value; return; + case 9: _flag_face_quality = value; return; case 10: _flag_angles_vectors = value; return; - case 11: _flag_emotions = value; return; - case 12: _flag_masked_face = value; return; - case 13: _flag_eyes_openness = value; return; + case 11: _flag_emotions = value; return; + case 12: _flag_masked_face = value; return; + case 13: _flag_eyes_openness = value; return; } } string flag_name(int i) { - switch(i) + switch (i) { case 0: return "rectangles"; case 1: return "angles"; @@ -167,25 +167,25 @@ OpenCvSharp.Rect flag_rect(int i) } // mouse click callback for flags control - static void onMouse(OpenCvSharp.MouseEvent mouseEvent, int x, int y, OpenCvSharp.MouseEvent flags) + static void onMouse(OpenCvSharp.MouseEventTypes MouseEventTypes, int x, int y, OpenCvSharp.MouseEventFlags flags, nint userData) { - if(mouseEvent != OpenCvSharp.MouseEvent.LButtonDown && mouseEvent != OpenCvSharp.MouseEvent.LButtonDown && mouseEvent != OpenCvSharp.MouseEvent.MButtonDown) + if (MouseEventTypes != OpenCvSharp.MouseEventTypes.LButtonDown && MouseEventTypes != OpenCvSharp.MouseEventTypes.LButtonDown && MouseEventTypes != OpenCvSharp.MouseEventTypes.MButtonDown) { return; } // if mouse click is in some flag's rectangle // change flag state - for(int i = 0; i < flags_count; ++i) + for (int i = 0; i < flags_count; ++i) { OpenCvSharp.Rect r = Worker.Instance.flag_rect(i); - if(x >= r.X && y >= r.Y && x <= r.X + r.Width && y <= r.Y + r.Height) + if (x >= r.X && y >= r.Y && x <= r.X + r.Width && y <= r.Y + r.Height) Worker.Instance.set_flag(i, !Worker.Instance.get_flag(i)); } } - void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point2f position) + void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point position) { // twice - for better reading // since we are drawing on the frame from webcam @@ -224,7 +224,7 @@ public void work(OpenCvSharp.Mat frame) // clone the frame for drawing on it OpenCvSharp.Mat draw_image = frame.Clone(); // handle each face on the frame separately - for(int i = 0; i < samples.Count; ++i) + for (int i = 0; i < samples.Count; ++i) { RawSample sample = samples[i]; @@ -232,11 +232,11 @@ public void work(OpenCvSharp.Mat frame) RawSample.Rectangle rectangle = sample.getRectangle(); // set a point to place information for this face - OpenCvSharp.Point2f text_point = new OpenCvSharp.Point2f( + OpenCvSharp.Point text_point = new OpenCvSharp.Point( rectangle.x + rectangle.width + 3, rectangle.y + 10); - const float text_line_height = 22; + const int text_line_height = 22; // draw facial points // red color for all points @@ -244,12 +244,12 @@ public void work(OpenCvSharp.Mat frame) // yellow for right eye // (yes, there is a mess with left and right eyes in face_sdk api, // but if we fix it now we will lose compatibility with previous versions) - if(_flag_points) + if (_flag_points) { List points = sample.getLandmarks(); List iris_points = sample.getIrisLandmarks(); - for(int j = -2; j < points.Count; ++j) + for (int j = -2; j < points.Count; ++j) { Point p = j == -2 ? @@ -268,7 +268,7 @@ public void work(OpenCvSharp.Mat frame) OpenCvSharp.Cv2.Circle( draw_image, - new OpenCvSharp.Point2f(p.x, p.y), + new OpenCvSharp.Point(p.x, p.y), j < 0 ? 4 : 2, color, -1, @@ -276,31 +276,32 @@ public void work(OpenCvSharp.Mat frame) } // draw iris points - for(int j = 0; j < iris_points.Count; ++j) + for (int j = 0; j < iris_points.Count; ++j) { int ms = 1; OpenCvSharp.Scalar color = new OpenCvSharp.Scalar(0, 255, 255); int oi = j - 20 * Convert.ToInt32(j >= 20); Point pt1 = iris_points[j]; Point pt2 = iris_points[(oi < 19 ? j : j - 15) + 1]; - OpenCvSharp.Point2f cv_pt1 = new OpenCvSharp.Point2f(pt1.x, pt1.y); - OpenCvSharp.Point2f cv_pt2 = new OpenCvSharp.Point2f(pt2.x, pt2.y); + OpenCvSharp.Point cv_pt1 = new OpenCvSharp.Point(pt1.x, pt1.y); + OpenCvSharp.Point cv_pt2 = new OpenCvSharp.Point(pt2.x, pt2.y); - if(oi < 5) + if (oi < 5) { color = new OpenCvSharp.Scalar(0, 165, 255); - if(oi == 0) + if (oi == 0) { double radius = Math.Sqrt(Math.Pow(pt1.x - pt2.x, 2) + Math.Pow(pt1.y - pt2.y, 2)); OpenCvSharp.Cv2.Circle( draw_image, cv_pt1, - (int) radius, + (int)radius, color, ms, OpenCvSharp.LineTypes.AntiAlias); } - }else + } + else { OpenCvSharp.Cv2.Line( draw_image, @@ -322,7 +323,7 @@ public void work(OpenCvSharp.Mat frame) } // draw rectangle - if(_flag_positions) + if (_flag_positions) { OpenCvSharp.Cv2.Rectangle( @@ -338,18 +339,18 @@ public void work(OpenCvSharp.Mat frame) } // draw age and gender - if( _flag_age_gender ) + if (_flag_age_gender) { - AgeGenderEstimator.AgeGender age_gender = _age_geder_estimator.estimateAgeGender(sample); + AgeGenderEstimator.AgeGender age_gender = _age_geder_estimator.estimateAgeGender(sample); string age_text = "age: "; - switch(age_gender.age) + switch (age_gender.age) { - case AgeGenderEstimator.Age.AGE_KID : age_text += "kid "; break; - case AgeGenderEstimator.Age.AGE_YOUNG : age_text += "young "; break; - case AgeGenderEstimator.Age.AGE_ADULT : age_text += "adult "; break; - case AgeGenderEstimator.Age.AGE_SENIOR : age_text += "senior "; break; + case AgeGenderEstimator.Age.AGE_KID: age_text += "kid "; break; + case AgeGenderEstimator.Age.AGE_YOUNG: age_text += "young "; break; + case AgeGenderEstimator.Age.AGE_ADULT: age_text += "adult "; break; + case AgeGenderEstimator.Age.AGE_SENIOR: age_text += "senior "; break; } age_text += string.Format("years: {0:G3}", age_gender.age_years); @@ -371,12 +372,12 @@ public void work(OpenCvSharp.Mat frame) } // draw emotions - if( _flag_emotions ) + if (_flag_emotions) { List emotions = _emotions_estimator.estimateEmotions(sample); - for(int j = 0; j < emotions.Count; ++j) + for (int j = 0; j < emotions.Count; ++j) { EmotionsEstimator.Emotion emotion = emotions[j].emotion; float confidence = emotions[j].confidence; @@ -388,23 +389,23 @@ public void work(OpenCvSharp.Mat frame) (int)text_point.Y - (int)text_line_height / 2, (int)(100 * confidence), (int)text_line_height), - emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? new OpenCvSharp.Scalar(255, 0, 0) : - emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? new OpenCvSharp.Scalar(0, 255, 0) : - emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? new OpenCvSharp.Scalar(0, 0, 255) : + emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? new OpenCvSharp.Scalar(255, 0, 0) : + emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? new OpenCvSharp.Scalar(0, 255, 0) : + emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? new OpenCvSharp.Scalar(0, 0, 255) : emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? new OpenCvSharp.Scalar(0, 255, 255) : new OpenCvSharp.Scalar(0, 0, 0), -1); puttext( draw_image, - emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? "neutral" : - emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? "happy" : - emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? "angry" : + emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? "neutral" : + emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? "happy" : + emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? "angry" : emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? "surprise" : emotion == EmotionsEstimator.Emotion.EMOTION_DISGUSTED ? "disgusted" : emotion == EmotionsEstimator.Emotion.EMOTION_SAD ? "sad" : emotion == EmotionsEstimator.Emotion.EMOTION_SCARED ? "scared" : "?", - text_point + new OpenCvSharp.Point2f(100, 0)); + text_point + new OpenCvSharp.Point(100, 0)); text_point.Y += text_line_height; @@ -414,9 +415,9 @@ public void work(OpenCvSharp.Mat frame) // draw angles text - if( _flag_angles ) + if (_flag_angles) { - + string yaw, pitch, roll; yaw = string.Format("yaw: {0}", (0.1f * (int)10 * sample.getAngles().yaw + 0.5f)); pitch = string.Format("pitch: {0}", (0.1f * (int)10 * sample.getAngles().pitch + 0.5f)); @@ -435,36 +436,40 @@ public void work(OpenCvSharp.Mat frame) } // draw angles vectors - if( _flag_angles_vectors ) + if (_flag_angles_vectors) { RawSample.Angles angles = sample.getAngles(); - float cos_a = (float)Math.Cos( angles.yaw * OpenCvSharp.Cv2.PI / 180 ); - float sin_a = (float)Math.Sin( angles.yaw * OpenCvSharp.Cv2.PI / 180 ); + float cos_a = (float)Math.Cos(angles.yaw * OpenCvSharp.Cv2.PI / 180); + float sin_a = (float)Math.Sin(angles.yaw * OpenCvSharp.Cv2.PI / 180); - float cos_b = (float)Math.Cos( angles.pitch * OpenCvSharp.Cv2.PI / 180 ); - float sin_b = (float)Math.Sin( angles.pitch * OpenCvSharp.Cv2.PI / 180 ); + float cos_b = (float)Math.Cos(angles.pitch * OpenCvSharp.Cv2.PI / 180); + float sin_b = (float)Math.Sin(angles.pitch * OpenCvSharp.Cv2.PI / 180); - float cos_c = (float)Math.Cos( angles.roll * OpenCvSharp.Cv2.PI / 180 ); - float sin_c = (float)Math.Sin( angles.roll * OpenCvSharp.Cv2.PI / 180 ); + float cos_c = (float)Math.Cos(angles.roll * OpenCvSharp.Cv2.PI / 180); + float sin_c = (float)Math.Sin(angles.roll * OpenCvSharp.Cv2.PI / 180); OpenCvSharp.Point3f[] xyz = { new OpenCvSharp.Point3f(cos_a * cos_c, -sin_c, -sin_a), new OpenCvSharp.Point3f(sin_c, cos_b * cos_c, -sin_b), new OpenCvSharp.Point3f(sin_a, sin_b, cos_a * cos_b) }; - OpenCvSharp.Point2f center = new OpenCvSharp.Point2f( + OpenCvSharp.Point center = new OpenCvSharp.Point( + (sample.getLeftEye().x + sample.getRightEye().x) * 0.5f, + (sample.getLeftEye().y + sample.getRightEye().y) * 0.5f); + + OpenCvSharp.Point2f centerf = new OpenCvSharp.Point( (sample.getLeftEye().x + sample.getRightEye().x) * 0.5f, (sample.getLeftEye().y + sample.getRightEye().y) * 0.5f); float length = (rectangle.width + rectangle.height) * 0.3f; - for(int c = 0; c < 3; ++c) + for (int c = 0; c < 3; ++c) { OpenCvSharp.Cv2.Line( draw_image, center, - center + new OpenCvSharp.Point2f(xyz[c].X, -xyz[c].Y) * length, + center + new OpenCvSharp.Point(xyz[c].X * length, -xyz[c].Y * length), c == 0 ? new OpenCvSharp.Scalar(50, 255, 255) : c == 1 ? new OpenCvSharp.Scalar(50, 255, 50) : c == 2 ? new OpenCvSharp.Scalar(50, 50, 255) : new OpenCvSharp.Scalar(), @@ -474,7 +479,7 @@ public void work(OpenCvSharp.Mat frame) } // draw quality text - if( _flag_quality ) + if (_flag_quality) { QualityEstimator.Quality q = _quality_estimator.estimateQuality(sample); @@ -501,7 +506,7 @@ public void work(OpenCvSharp.Mat frame) } // draw liveness text - if( _flag_liveness ) + if (_flag_liveness) { Liveness2DEstimator.LivenessAndScore liveness_2d_result = _liveness_2d_estimator.estimate(sample); string score_str = Math.Round(liveness_2d_result.score, 3).ToString(); @@ -518,7 +523,7 @@ public void work(OpenCvSharp.Mat frame) } // draw face quality - if( _flag_face_quality ) + if (_flag_face_quality) { float quality = _face_quality_estimator.estimateQuality(sample); @@ -529,7 +534,8 @@ public void work(OpenCvSharp.Mat frame) } // draw face attribute (masked_face) - if(_flag_masked_face) { + if (_flag_masked_face) + { FaceAttributesEstimator.Attribute attr = _face_mask_estimator.estimate(sample); string score_str = Math.Round(attr.score, 3).ToString(); puttext( @@ -541,7 +547,8 @@ public void work(OpenCvSharp.Mat frame) } // draw face attribute (eyes_openness) - if(_flag_eyes_openness) { + if (_flag_eyes_openness) + { FaceAttributesEstimator.Attribute attr = _eyes_openness_estimator.estimate(sample); string left_score_str = Math.Round(attr.left_eye_state.score, 3).ToString(); string right_score_str = Math.Round(attr.right_eye_state.score, 3).ToString(); @@ -570,11 +577,11 @@ public void work(OpenCvSharp.Mat frame) } // draw face cuts - for(int cut_i = 0; cut_i < 3; ++cut_i) + for (int cut_i = 0; cut_i < 3; ++cut_i) { - if( (cut_i == 0 && ! _flag_cutting_base) || - (cut_i == 1 && ! _flag_cutting_full) || - (cut_i == 2 && ! _flag_cutting_token) ) + if ((cut_i == 0 && !_flag_cutting_base) || + (cut_i == 1 && !_flag_cutting_full) || + (cut_i == 2 && !_flag_cutting_token)) { continue; } @@ -616,7 +623,7 @@ public void work(OpenCvSharp.Mat frame) img.Rows - img_rect_y, draw_image.Rows - Math.Max(0, text_point.Y)); - if(img_rect_width <= 0 || img_rect_height <= 0) + if (img_rect_width <= 0 || img_rect_height <= 0) continue; OpenCvSharp.Rect img_rect = new OpenCvSharp.Rect(img_rect_x, img_rect_y, img_rect_width, img_rect_height); @@ -637,7 +644,7 @@ draw_image[new OpenCvSharp.Rect( } // draw checkboxes - for(int i = 0; i < flags_count; ++i) + for (int i = 0; i < flags_count; ++i) { OpenCvSharp.Rect rect = flag_rect(i); OpenCvSharp.Rect rect2 = new OpenCvSharp.Rect(rect.X + 5, rect.Y + 5, rect.Width - 10, rect.Height - 10); @@ -645,7 +652,7 @@ draw_image[new OpenCvSharp.Rect( OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(255), -1); OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(0), 2, OpenCvSharp.LineTypes.AntiAlias); - if( get_flag(i) ) + if (get_flag(i)) { OpenCvSharp.Cv2.Rectangle(draw_image, rect2, OpenCvSharp.Scalar.All(0), -1, OpenCvSharp.LineTypes.AntiAlias); } @@ -653,15 +660,15 @@ draw_image[new OpenCvSharp.Rect( puttext( draw_image, flag_name(i), - new OpenCvSharp.Point2f(rect.X + rect.Width + 3, rect.Y + rect.Height - 3)); + new OpenCvSharp.Point(rect.X + rect.Width + 3, rect.Y + rect.Height - 3)); } - - + + // show image with drawed information OpenCvSharp.Cv2.ImShow("demo", draw_image); // register callback on mouse events - OpenCvSharp.Cv2.SetMouseCallback("demo", (OpenCvSharp.CvMouseCallback)onMouse); + OpenCvSharp.Cv2.SetMouseCallback("demo", onMouse); } public void dispose() @@ -713,7 +720,7 @@ static int Main(string[] args) // print values of arguments Console.WriteLine("Arguments:"); - foreach(var opt in options.GetType().GetProperties()) + foreach (var opt in options.GetType().GetProperties()) { Console.WriteLine("--{0} = {1}", opt.Name, opt.GetValue(options, null)); } @@ -738,17 +745,16 @@ static int Main(string[] args) // VideoCapture camera; OpenCvSharp.VideoCapture camera = new OpenCvSharp.VideoCapture(); - - for(; camera_id < 10; ++camera_id) + for (; camera_id < 10; ++camera_id) { camera.Open(camera_id); - camera.Set(OpenCvSharp.CaptureProperty.FrameWidth, desiredFrameWidht); - camera.Set(OpenCvSharp.CaptureProperty.FrameHeight, desiredFrameHeight); + camera.Set(OpenCvSharp.VideoCaptureProperties.FrameWidth, desiredFrameWidht); + camera.Set(OpenCvSharp.VideoCaptureProperties.FrameHeight, desiredFrameHeight); OpenCvSharp.Mat frame = new OpenCvSharp.Mat(); for (int i = 0; i < 10; i++) frame = camera.RetrieveMat(); - if(frame.Empty()) + if (frame.Empty()) { Console.WriteLine("webcam " + camera_id + " not opened"); camera.Release(); @@ -759,22 +765,22 @@ static int Main(string[] args) } } - for(;;) + for (; ; ) { OpenCvSharp.Mat frame; //camera.Grab(); frame = camera.RetrieveMat(); - if(frame.Empty()) + if (frame.Empty()) break; - + // give a frame to the worker worker.work(frame); - if(27 == (char) OpenCvSharp.Cv2.WaitKey(10)) + if (27 == (char)OpenCvSharp.Cv2.WaitKey(10)) { - camera.Release(); + camera.Release(); break; } } @@ -783,11 +789,11 @@ static int Main(string[] args) // otherwise licence error may occur // when create sdk object in next time worker.dispose(); - } - catch(Exception e) - { - Console.WriteLine("! exception catched: '" + e + "' ... exiting"); + catch (Exception e) + { + Console.WriteLine($"! exception catched: '{e}' ... exiting"); + return 1; } Console.WriteLine("close program"); diff --git a/examples/csharp/processing_block_demo/README.txt b/examples/csharp/processing_block_demo/README.txt new file mode 100644 index 0000000..8fa75df --- /dev/null +++ b/examples/csharp/processing_block_demo/README.txt @@ -0,0 +1,28 @@ +=== Build with Visual Studio === + +1. extract windows_x86_64.zip +1.1 please make sure the libonnxruntime.so library can be found when building the project +2. open examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.sln in Visual Studio, build and run. + +=== Build with dotnet === + +Requirements: + * dotnet https://docs.microsoft.com/en-us/dotnet/core/get-started + * opencvsharp https://github.com/shimat/opencvsharp + * command line parser https://github.com/commandlineparser/commandline + + +Build +cd examples/csharp/demo +dotnet publish -o publish + + +Run + +Windows: +set PATH=%PATH%;..\..\..\bin +dotnet publish\csharp_processing_block_demo.dll --sdk_path=../../../ --unit_type=face --modification uld --min_size 50 + + +Linux: +LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:../../../lib:/path/to/opencvsharp/native/libs dotnet publish/csharp_processing_block_demo.dll --sdk_path=../../../ --unit_type=face --modification uld --min_size 50 diff --git a/examples/csharp/processing_block_demo/csharp_processing_block_demo.csproj b/examples/csharp/processing_block_demo/csharp_processing_block_demo.csproj new file mode 100644 index 0000000..51cdfe9 --- /dev/null +++ b/examples/csharp/processing_block_demo/csharp_processing_block_demo.csproj @@ -0,0 +1,21 @@ + + + + Exe + net6.0 + true + + + + + ..\..\..\bin\FacerecCSharpWrapper.dll + ../../../lib/FacerecCSharpWrapper.dll + + + + + + + + + diff --git a/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.csproj b/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.csproj new file mode 100644 index 0000000..b966035 --- /dev/null +++ b/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.csproj @@ -0,0 +1,48 @@ + + + + Exe + net6.0 + true + {F6F992D0-AF99-4A65-98AF-E7DC073BBA5C} + + + + + ..\..\..\..\bin\FacerecCSharpWrapper.dll + + + + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + + + + + + + diff --git a/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.sln b/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.sln new file mode 100644 index 0000000..deebd7d --- /dev/null +++ b/examples/csharp/processing_block_demo/vs/csharp_processing_block_demo.sln @@ -0,0 +1,22 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "csharp_processing_block_demo", "csharp_processing_block_demo.csproj", "{F6F992D0-AF99-4A65-98AF-E7DC073BBA5C}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9C504E04-5D81-4B01-8482-3B94D27BED43}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/examples/csharp/processing_block_demo/vs/processing_block_demo.cs b/examples/csharp/processing_block_demo/vs/processing_block_demo.cs new file mode 100644 index 0000000..d03b465 --- /dev/null +++ b/examples/csharp/processing_block_demo/vs/processing_block_demo.cs @@ -0,0 +1,633 @@ +using System; +using System.Collections.Generic; +using System.ComponentModel.Design; +using System.Data.SqlTypes; +using System.Linq; +using System.Runtime.InteropServices; +using System.Text; +using CommandLine; +using OpenCvSharp; + +using VDT.FaceRecognition.SDK; + +public class Options +{ + [Option("sdk_path", Default = "../../../", HelpText = "Path to FaceSDK directory.")] + public string SdkPath { get; set; } + + [Option("input_image")] + public string InputImage { get; set; } + + [Option("unit_type", Default = "objects", HelpText = "body|face|face_keypoint|pose|objects|emotions|age|gender|mask|liveness|quality")] + public string UnitType { get; set; } + + [Option("version", Default = 1)] + public int Version { get; set; } + + [Option("modification", Default = "")] + public string Modification { get; set; } + + [Option("use_cuda", Default = false)] + public bool UseCuda { get; set; } +}; + +namespace csharp_processing_block_demo +{ + class Program + { + private static readonly Dictionary unitTypes = new() + { + { "body", "HUMAN_BODY_DETECTOR" }, + { "face", "FACE_DETECTOR" }, + { "objects", "OBJECT_DETECTOR" }, + { "face_keypoint", "FACE_FITTER" }, + { "emotions", "EMOTION_ESTIMATOR" }, + { "age", "AGE_ESTIMATOR" }, + { "gender","GENDER_ESTIMATOR" }, + { "mask", "MASK_ESTIMATOR" }, + { "liveness", "LIVENESS_ESTIMATOR" }, + { "quality", "QUALITY_ASSESSMENT_ESTIMATOR" }, + { "pose", "HUMAN_POSE_ESTIMATOR" }, + }; + + static int Main(string[] args) + { + Console.WriteLine + ( + $@"Usage: dotnet csharp_csharp_processing_block_demo.dll {System.Reflection.Assembly.GetExecutingAssembly().Location} + [--input_image ] + [--unit_type body|face|face_keypoint|pose|objects|emotions|age|gender|mask|liveness|quality] + [--sdk_path ../../../] + [--use_cuda]" + ); + + bool error = false; + Options options = new Options(); + CommandLine.Parser.Default.ParseArguments(args) + .WithParsed(opts => options = opts) + .WithNotParsed(errs => error = true); + + if (error) + { + return 1; + } + + Console.WriteLine("Arguments:"); + foreach (var opt in options.GetType().GetProperties()) + { + Console.WriteLine($"--{ToSnakeCase(opt.Name)} = {opt.GetValue(options, null)}"); + } + Console.WriteLine(); + + try + { + FacerecService service = FacerecService.createService(options.SdkPath + "/conf/facerec/", options.SdkPath + "/license/"); + string libDir = options.SdkPath + (OperatingSystem.IsWindows() ? "\\bin" : "/lib"); + + string unitType = options.UnitType; + Dictionary configContext = new() + { + { + "ONNXRuntime", + new Dictionary + { + { "library_path", libDir } + } + } + }; + + Console.WriteLine($"Library version: {service.getVersion()}"); + + configContext["unit_type"] = unitTypes[unitType]; + configContext["use_cuda"] = options.UseCuda; + configContext["version"] = options.Version; + configContext["@sdk_path"] = options.SdkPath; + + if (unitType == "quality") + { + configContext["config_name"] = "quality_assessment.xml"; + } + else if (unitType == "liveness") + { + configContext["config_name"] = "liveness_2d_estimator_v3.xml"; + configContext["sdk_path"] = options.SdkPath; + configContext["facerec_conf_dir"] = options.SdkPath + "/conf/facerec"; + } + + if (options.Modification != "") + { + configContext["modification"] = options.Modification; + } + + ProcessingBlock block = service.CreateProcessingBlock(configContext); + Dictionary imageContext = new(); + Mat image = Cv2.ImRead(options.InputImage); + RawImage rawImage = new(image.Cols, image.Rows, RawImage.Format.FORMAT_BGR, GetImageData(image)); + Mat rgbImage = new(); + + if (image.Empty()) + { + throw new Exception($"Can't read image file: {options.InputImage}"); + } + + Cv2.CvtColor(image, rgbImage, ColorConversionCodes.BGR2RGB); + + MatToBsm(ref imageContext, rgbImage); + + Context ioData = service.CreateContext + ( + new Dictionary + { + { "objects", new Dictionary() }, + { "image", imageContext } + } + ); + + if (unitType == "quality" || (unitType == "liveness" && options.Modification == "2d")) + { + Capturer capturer = service.createCapturer("common_capturer_refa_fda_a.xml"); + + foreach (RawSample sample in capturer.capture(rawImage)) + { + ioData["objects"].PushBack(sample.ToContext()); + } + } + else if (new List { "emotions", "gender", "age", "mask", "face_keypoint" }.Contains(unitType)) + { + ProcessingBlock faceBlock = service.CreateProcessingBlock + ( + new Dictionary + { + { "unit_type", unitTypes["face"] }, + { "modification", "ssyv" }, + { "version", 3 }, + { "use_cuda", options.UseCuda }, + { "ONNXRuntime", configContext["ONNXRuntime"] }, + { "confidence_threshold", 0.4 } + } + ); + + faceBlock.Invoke(ioData); + + if (unitType != "face_keypoint") + { + ProcessingBlock fitter = service.CreateProcessingBlock + ( + new Dictionary + { + { "unit_type", "FACE_FITTER" }, + { "modification", "tddfa" }, + { "use_cuda", options.UseCuda }, + { "ONNXRuntime", configContext["ONNXRuntime"] }, + } + ); + + fitter.Invoke(ioData); + } + } + else if (unitType == "pose") + { + ProcessingBlock bodyDetector = service.CreateProcessingBlock + ( + new Dictionary + { + { "unit_type", unitTypes["body"] }, + { "ONNXRuntime", configContext["ONNXRuntime"] }, + { "confidence_threshold", 0.4 }, + { "iou_threshold", 0.45 }, + { "use_cuda", options.UseCuda } + } + ); + + bodyDetector.Invoke(ioData); + } + + block.Invoke(ioData); + + if (ioData["objects"].Length() == 0) + { + Console.WriteLine("No objects have been detected!"); + + return 0; + } + + switch (unitType) + { + case "body": + case "pose": + DrawObjects(ioData, image, "body"); + + break; + + case "face": + DrawObjects(ioData, image, "face"); + + break; + + case "face_keypoint": + DrawFaceKeypoint(ioData, image); + + break; + + case "objects": + DrawObjects(ioData, image); + + break; + + case "emotions": + DrawEmotions(ioData, image); + + break; + + case "age": + case "gender": + case "mask": + case "quality": + DrawAgeGenderMaskQuality(ioData, image, unitType); + + break; + + case "liveness": + DrawLiveness(ioData, image); + + break; + } + + Cv2.ImShow("image", image); + Cv2.WaitKey(); + } + catch (Exception e) + { + Console.WriteLine($"! exception catched: '{e}' ... exiting"); + + return 1; + } + + return 0; + } + + private static void DrawObjects(Context ioData, Mat image, string classFilter = "") + { + List> bone_map = new() + { + new("right_ankle", "right_knee"), + new("right_knee","right_hip"), + new("left_hip","right_hip"), + new("left_shoulder","left_hip"), + new("right_shoulder","right_hip"), + new("left_shoulder","right_shoulder"), + new("left_shoulder","left_elbow"), + new("right_shoulder","right_elbow"), + new("left_elbow","left_wrist"), + new("right_elbow","right_wrist"), + new("left_eye","right_eye"), + new("nose","left_eye"), + new("left_knee", "left_hip"), + new("right_ear", "right_shoulder"), + new("left_ear", "left_shoulder"), + new("right_eye", "right_ear"), + new("left_eye", "left_ear"), + new("nose", "right_eye"), + new("left_ankle", "left_knee") + }; + + int width = image.Cols; + int heigth = image.Rows; + Context objects = ioData["objects"]; + + for (int i = 0; i < (int)objects.Length(); i++) + { + Context obj = objects[i]; + string className = obj["class"].GetString(); + if (classFilter.Length != 0 && className != classFilter) + { + continue; + } + + Context rectCtx = obj["bbox"]; + OpenCvSharp.Point topLeft = new(Math.Max(rectCtx[0].GetDouble() * width, 0), Math.Max(rectCtx[1].GetDouble() * heigth, 0)); + OpenCvSharp.Point bottomRight = new(Math.Min(rectCtx[2].GetDouble() * width, width), Math.Min(rectCtx[3].GetDouble() * heigth, heigth)); + Cv2.Rectangle(image, topLeft, bottomRight, new Scalar(0, 255, 0)); + if (classFilter.Length == 0) + { + PutTextWithRightExpansion(image, className, new(topLeft.X, topLeft.Y + 20), HersheyFonts.HersheyDuplex, 1, new(0, 255, 0)); + } + + if (classFilter == "body" && obj.Contains("keypoints")) + { + Context posesCtx = obj["keypoints"]; + + foreach (string key in posesCtx.Keys()) + { + Context proj = posesCtx[key]["proj"]; + + Cv2.Circle(image, new(proj[0].GetDouble() * width, proj[1].GetDouble() * heigth), 3, new(0, 255, 0)); + } + + foreach (Tuple bone in bone_map) + { + string key1 = bone.Item1; + string key2 = bone.Item2; + int x1 = (int)(posesCtx[key1]["proj"][0].GetDouble() * width); + int y1 = (int)(posesCtx[key1]["proj"][1].GetDouble() * heigth); + int x2 = (int)(posesCtx[key2]["proj"][0].GetDouble() * width); + int y2 = (int)(posesCtx[key2]["proj"][1].GetDouble() * heigth); + + Cv2.Line(image, new(x1, y1), new(x2, y2), new(0, 255, 0), 1, LineTypes.Link4); + } + } + } + } + + private static void DrawFaceKeypoint(Context ioData, Mat image) + { + int width = image.Cols; + int heigth = image.Rows; + Context objects = ioData["objects"]; + + DrawObjects(ioData, image, "face"); + + for (int i = 0; i < (int)objects.Length(); i++) + { + Context obj = objects[i]; + Context points = obj["keypoints"]["points"]; + + for (int j = 0; j < (int)points.Length(); j++) + { + Context point = points[j]; + + Cv2.Circle(image, new(point["proj"][0].GetDouble() * width, point["proj"][1].GetDouble() * heigth), 2, new Scalar(0, 255, 0), 5); + } + } + } + private static void DrawEmotions(Context ioData, Mat image) + { + int width = image.Cols; + int height = image.Rows; + Context objects = ioData["objects"]; + + DrawObjects(ioData, image, "face"); + + for (int i = 0; i < (int)objects.Length(); i++) + { + Context obj = objects[i]; + string className = obj["class"].GetString(); + + if (className != "face") + { + continue; + } + + Context emotions = obj["emotions"]; + OpenCvSharp.Point textPoint = new + ( + Math.Min(obj["bbox"][2].GetDouble() * width, width), + Math.Max(obj["bbox"][1].GetDouble() * height, 0) + 15 + ); + + SortedSet> results = new(); + + for (int j = 0; j < (int)emotions.Length(); j++) + { + results.Add(new(emotions[j]["confidence"].GetDouble(), emotions[j]["emotion"].GetString())); + } + + double topShift = results.Last().Item1; + + double lengthCoef = 50; + float fontHeight = 12; + + foreach (Tuple iterator in results.AsEnumerable().Reverse()) + { + string emotion = iterator.Item2; + double confidence = iterator.Item1; + int textLineHeight = (int)(fontHeight * Math.Max(0.5, confidence / topShift)); + + PutTextWithRightExpansion(image, emotion, textPoint + new OpenCvSharp.Point(topShift * lengthCoef, 0), HersheyFonts.HersheyDuplex, textLineHeight / (fontHeight * 2), new(255, 255, 0)); + + Cv2.Rectangle + ( + image, + new Rect(textPoint.X, textPoint.Y - textLineHeight / 2, (int)(confidence * lengthCoef), textLineHeight), + emotion == "NEUTRAL" ? new(255, 0, 0) : + emotion == "HAPPY" ? new(0, 255, 0) : + emotion == "ANGRY" ? new(0, 0, 255) : + emotion == "SURPRISED" ? new(0, 255, 255) : + emotion == "DISGUSTED" ? new(255, 255, 0) : + emotion == "SAD" ? new(255, 0, 255) : + emotion == "SCARED" ? new(127, 127, 127) : + new(0, 0, 0), + -1 + ); + + textPoint.Y += textLineHeight; + textPoint.Y += textLineHeight / 3; + } + } + } + private static void DrawAgeGenderMaskQuality(Context ioData, Mat image, string classFilter = "") + { + int width = image.Cols; + int heigth = image.Rows; + Mat imageCopy = image.Clone(); + Context objects = ioData["objects"]; + + DrawObjects(ioData, image, "face"); + + int objectsCounter = 1; + + for (int i = 0; i < (int)objects.Length(); i++) + { + Context obj = objects[i]; + + if (obj["class"].GetString() != "face") + { + continue; + } + + OpenCvSharp.Point textPoint = new + ( + Math.Min(obj["bbox"][2].GetDouble() * width, width), + Math.Max(obj["bbox"][1].GetDouble() * heigth, 0) + 15 + ); + + if (classFilter == "gender") + { + PutTextWithRightExpansion(image, obj["gender"].GetString(), textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + } + else if (classFilter == "age") + { + PutTextWithRightExpansion(image, $"Age: {obj[classFilter].GetLong()}", textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + } + else if (classFilter == "mask") + { + PutTextWithRightExpansion(image, $"Mask: {obj["has_medical_mask"]["value"].GetBool()}", textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + } + else if (classFilter == "quality") + { + if (objectsCounter <= 6) + { + OpenCvSharp.Point topLeft = new(Math.Max(obj["bbox"][0].GetDouble() * width, 0), Math.Max(obj["bbox"][1].GetDouble() * heigth, 0)); + OpenCvSharp.Point bottomRight = new(Math.Min(obj["bbox"][2].GetDouble() * width, width), Math.Min(obj["bbox"][3].GetDouble() * heigth, heigth)); + Rect faceRect = Rect.FromLTRB(topLeft.X, topLeft.Y, bottomRight.X, bottomRight.Y); + int heightForTextAndCrop = (int)(heigth * 0.15 * objectsCounter - (heigth * 0.05)); + Mat face = imageCopy[faceRect]; + double scaleFactor = heigth / (7.0 * face.Rows); + + Cv2.Resize(face, face, new(), scaleFactor, scaleFactor, InterpolationFlags.Linear); + + Rect roi = new(width + 5, heightForTextAndCrop, face.Cols, face.Rows); + + PutTextWithRightExpansion(image, "Result of 6 first faces:", new OpenCvSharp.Point(width, 20), HersheyFonts.HersheySimplex, + 0.5, new(0, 255, 0), 1); + + PutTextWithRightExpansion(image, "Quality :", new OpenCvSharp.Point(width + 5 + roi.Width, heightForTextAndCrop + 30), HersheyFonts.HersheyDuplex, + 0.5, new(0, 255, 0), 1); + + PutTextWithRightExpansion(image, Math.Round(obj["quality"]["total_score"].GetDouble() * 100).ToString(), + new OpenCvSharp.Point(width + 75 + roi.Width, heightForTextAndCrop + 30), HersheyFonts.HersheyDuplex, + 0.5, new(0, 255, 0), 1); + Mat drawRoi = image[roi]; + + face.CopyTo(drawRoi); + + objectsCounter++; + } + } + } + } + + private static void DrawLiveness(Context ioData, Mat image) + { + int width = image.Cols; + int heigth = image.Rows; + Context objects = ioData["objects"]; + + DrawObjects(ioData, image, "face"); + + for (int i = 0; i < (int)objects.Length(); i++) + { + Context obj = objects[i]; + + if (obj["class"].GetString() != "face") + { + continue; + } + + OpenCvSharp.Point textPoint = new(obj["bbox"][2].GetDouble() * width, obj["bbox"][1].GetDouble() * heigth); + + if (obj.Contains("liveness")) + { + Context liveness = obj["liveness"]; + + PutTextWithRightExpansion(image, liveness["value"].GetString(), textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + + textPoint.Y += 12; + + PutTextWithRightExpansion(image, liveness["confidence"].GetDouble().ToString(), textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + + if (liveness.Contains("info")) + { + textPoint.Y += 12; + + PutTextWithRightExpansion(image, $"info: {liveness["info"].GetString()}", textPoint, HersheyFonts.HersheyDuplex, 0.5, new(0, 0, 255), 1); + } + } + } + } + + private static void PutTextWithRightExpansion(Mat image, string text, OpenCvSharp.Point org, HersheyFonts fontFace, double fontScale, Scalar color, + int thickness = 1, LineTypes lineType = OpenCvSharp.LineTypes.Link8, bool bottomLeftOrigin = false, BorderTypes borderType = BorderTypes.Constant) + { + + Size textSize = Cv2.GetTextSize(text, fontFace, fontScale, thickness, out int baseLine); + int rightBorder = textSize.Width + org.X - image.Cols; + + if (rightBorder > 0) + { + Cv2.CopyMakeBorder(image, image, 0, 0, 0, rightBorder + 2, borderType); + } + + Cv2.PutText(image, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin); + } + + private static void MatToBsm(ref Dictionary bsmCtx, Mat img) + { + Dictionary CvTypeToStr = new() + { + {MatType.CV_8U,"uint8_t"}, {MatType.CV_8S, "int8_t"}, + {MatType.CV_16U, "uint16_t"}, {MatType.CV_16S, "int16_t"} , + {MatType.CV_32S, "int32_t"}, {MatType.CV_32F, "float"}, {MatType.CV_64F, "double"} + }; + + var inputImage = img.IsContinuous() ? img : img.Clone(); + long size = inputImage.Total() * inputImage.ElemSize(); + List sizes = new(); + byte[] buffer = new byte[size]; + + using (Mat temp = new(inputImage.Rows, inputImage.Cols, inputImage.Type(), buffer)) + { + inputImage.CopyTo(temp); + } + + for (int i = 0; i < inputImage.Dims; ++i) + { + sizes.Add(inputImage.Size(i)); + } + + sizes.Add(inputImage.Channels()); + + bsmCtx["blob"] = buffer; + bsmCtx["format"] = "NDARRAY"; + bsmCtx["shape"] = sizes; + bsmCtx["dtype"] = CvTypeToStr[inputImage.Depth()]; + } + + private static byte[] GetImageData(Mat image) + { + int length = image.Cols * image.Rows * image.Channels(); + byte[] result = new byte[length]; + + unsafe + { + Marshal.Copy((IntPtr)image.DataPointer, result, 0, length); + } + + return result; + } + + private static string ToSnakeCase(string text) + { + if (text == null) + { + throw new ArgumentNullException(nameof(text)); + } + + if (text.Length < 2) + { + return text; + } + + StringBuilder stringBuilder = new(); + + stringBuilder.Append(char.ToLowerInvariant(text[0])); + + for (int i = 1; i < text.Length; ++i) + { + char c = text[i]; + + if (char.IsUpper(c)) + { + stringBuilder.Append('_'); + stringBuilder.Append(char.ToLowerInvariant(c)); + } + else + { + stringBuilder.Append(c); + } + } + + return stringBuilder.ToString(); + } + } +} diff --git a/examples/csharp/video_recognition_demo/csharp_video_recognition_demo.csproj b/examples/csharp/video_recognition_demo/csharp_video_recognition_demo.csproj index 029ce26..8a085c5 100644 --- a/examples/csharp/video_recognition_demo/csharp_video_recognition_demo.csproj +++ b/examples/csharp/video_recognition_demo/csharp_video_recognition_demo.csproj @@ -1,22 +1,20 @@ - + Exe - netcoreapp2.0 + net6.0 true + ..\..\..\bin\FacerecCSharpWrapper.dll ../../../lib/FacerecCSharpWrapper.dll - - - - + - - + + diff --git a/examples/csharp/video_recognition_demo/vs/Properties/launchSettings.json b/examples/csharp/video_recognition_demo/vs/Properties/launchSettings.json new file mode 100644 index 0000000..445e934 --- /dev/null +++ b/examples/csharp/video_recognition_demo/vs/Properties/launchSettings.json @@ -0,0 +1,8 @@ +{ + "profiles": { + "csharp_video_recognition_demo": { + "commandName": "Project", + "commandLineArgs": "0 --config_dir ../../../../../../../conf/facerec --database_dir ../../../../../../../bin/base" + } + } +} \ No newline at end of file diff --git a/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.csproj b/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.csproj index 9671b95..0361df7 100644 --- a/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.csproj +++ b/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.csproj @@ -1,73 +1,22 @@ - - - + + - Debug - x64 Exe - v4.6 - {128CE552-C258-4E73-9B70-BA1C92F429BB} - --config_dir ../../../../../../conf/facerec --database_dir ../../../../../../bin/base 0 + net6.0 true - - x64 - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - x64 - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - + - ..\..\..\..\bin\FacerecCSharpWrapper.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\CommandLine.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.Blob.dll - True + ..\..\..\..\bin\FacerecCSharpWrapper.dll - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.dll - True - - - - - - - - - - - - - - + PreserveNewest - - PreserveNewest - - + PreserveNewest @@ -82,13 +31,17 @@ PreserveNewest + + PreserveNewest + + + PreserveNewest + + + + + + - - - \ No newline at end of file + + diff --git a/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.sln b/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.sln index 7f1f0b0..c77358d 100644 --- a/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.sln +++ b/examples/csharp/video_recognition_demo/vs/csharp_video_recognition_demo.sln @@ -1,22 +1,22 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{C304A816-92B3-4933-8410-62CB974DBBE4}") = "csharp_video_recognition_demo", "csharp_video_recognition_demo.csproj", "{128CE552-C258-4E73-9B70-BA1C92F429BB}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|x64.ActiveCfg = Debug|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|x64.Build.0 = Debug|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|x64.ActiveCfg = Release|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{C304A816-92B3-4933-8410-62CB974DBBE4}") = "csharp_video_recognition_demo", "csharp_video_recognition_demo.csproj", "{128CE552-C258-4E73-9B70-BA1C92F429BB}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/examples/csharp/video_recognition_demo/src/Database.cs b/examples/csharp/video_recognition_demo/vs/src/Database.cs similarity index 100% rename from examples/csharp/video_recognition_demo/src/Database.cs rename to examples/csharp/video_recognition_demo/vs/src/Database.cs diff --git a/examples/csharp/video_recognition_demo/src/ImageAndDepthSource.cs b/examples/csharp/video_recognition_demo/vs/src/ImageAndDepthSource.cs similarity index 90% rename from examples/csharp/video_recognition_demo/src/ImageAndDepthSource.cs rename to examples/csharp/video_recognition_demo/vs/src/ImageAndDepthSource.cs index c5a81cf..8d76c42 100644 --- a/examples/csharp/video_recognition_demo/src/ImageAndDepthSource.cs +++ b/examples/csharp/video_recognition_demo/vs/src/ImageAndDepthSource.cs @@ -10,7 +10,7 @@ public class ImageAndDepth { public OpenCvSharp.Mat image; - public OpenCvSharp.MatOfUShort depth; + public OpenCvSharp.Mat depth; public UInt64 image_timestamp_microsec; public UInt64 depth_timestamp_microsec; @@ -30,7 +30,7 @@ public DepthMapRaw make_dmr() public ImageAndDepth() { image = new OpenCvSharp.Mat(); - depth = new OpenCvSharp.MatOfUShort(); + depth = new OpenCvSharp.Mat(); depth_opts = new DepthMapRaw(); } }; diff --git a/examples/csharp/video_recognition_demo/src/MAssert.cs b/examples/csharp/video_recognition_demo/vs/src/MAssert.cs similarity index 100% rename from examples/csharp/video_recognition_demo/src/MAssert.cs rename to examples/csharp/video_recognition_demo/vs/src/MAssert.cs diff --git a/examples/csharp/video_recognition_demo/src/Main.cs b/examples/csharp/video_recognition_demo/vs/src/Main.cs similarity index 100% rename from examples/csharp/video_recognition_demo/src/Main.cs rename to examples/csharp/video_recognition_demo/vs/src/Main.cs diff --git a/examples/csharp/video_recognition_demo/src/OpencvSource.cs b/examples/csharp/video_recognition_demo/vs/src/OpencvSource.cs similarity index 93% rename from examples/csharp/video_recognition_demo/src/OpencvSource.cs rename to examples/csharp/video_recognition_demo/vs/src/OpencvSource.cs index 918312c..ff27b27 100644 --- a/examples/csharp/video_recognition_demo/src/OpencvSource.cs +++ b/examples/csharp/video_recognition_demo/vs/src/OpencvSource.cs @@ -47,8 +47,8 @@ public OpencvSource(string cam_or_url) MAssert.Check(capturer.IsOpened(), "webcam not opened"); // set resolution - capturer.Set(OpenCvSharp.CaptureProperty.FrameWidth, 1280); - capturer.Set(OpenCvSharp.CaptureProperty.FrameHeight, 720); + capturer.Set(OpenCvSharp.VideoCaptureProperties.FrameWidth, 1280); + capturer.Set(OpenCvSharp.VideoCaptureProperties.FrameHeight, 720); MAssert.Check(capturer.IsOpened(), "webcam not opened"); } diff --git a/examples/csharp/video_recognition_demo/src/Worker.cs b/examples/csharp/video_recognition_demo/vs/src/Worker.cs similarity index 99% rename from examples/csharp/video_recognition_demo/src/Worker.cs rename to examples/csharp/video_recognition_demo/vs/src/Worker.cs index 1227771..6f20882 100644 --- a/examples/csharp/video_recognition_demo/src/Worker.cs +++ b/examples/csharp/video_recognition_demo/vs/src/Worker.cs @@ -141,7 +141,7 @@ public void Dispose() Dispose(); } - static void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point2f position) + static void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point position) { // twice - for better reading // since we are drawing on the frame from webcam @@ -588,10 +588,10 @@ OpenCvSharp.Mat Draw( RawSample.Rectangle rectangle = face.sample.getRectangle(); // set a point to place information for this face - OpenCvSharp.Point2f text_point = new OpenCvSharp.Point2f( + OpenCvSharp.Point text_point = new OpenCvSharp.Point( rectangle.x + rectangle.width + 3, rectangle.y + 10); - const float text_line_height = 22; + const int text_line_height = 22; // choose color OpenCvSharp.Scalar color = @@ -696,7 +696,7 @@ OpenCvSharp.Mat Draw( emotion == EmotionsEstimator.Emotion.EMOTION_DISGUSTED ? "disgusted" : emotion == EmotionsEstimator.Emotion.EMOTION_SAD ? "sad" : emotion == EmotionsEstimator.Emotion.EMOTION_SCARED ? "scared" : "?", - text_point + new OpenCvSharp.Point2f(100, 0)); + text_point + new OpenCvSharp.Point(100, 0)); text_point.Y += text_line_height; diff --git a/examples/csharp/video_recognition_demo_realsense/csharp_video_recognition_demo.csproj b/examples/csharp/video_recognition_demo_realsense/csharp_video_recognition_demo.csproj index 2f28b83..96bedea 100644 --- a/examples/csharp/video_recognition_demo_realsense/csharp_video_recognition_demo.csproj +++ b/examples/csharp/video_recognition_demo_realsense/csharp_video_recognition_demo.csproj @@ -1,8 +1,8 @@ - + Exe - netcoreapp2.0 + net6.0 true @@ -16,13 +16,9 @@ ..\..\..\bin\Intel.RealSense.dll ../../../bin/Intel.RealSense.dll - - - - - - + + diff --git a/examples/csharp/video_recognition_demo_realsense/src/Database.cs b/examples/csharp/video_recognition_demo_realsense/src/Database.cs deleted file mode 100644 index c714ddd..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/Database.cs +++ /dev/null @@ -1,211 +0,0 @@ -/** - \file video_recognition_demo/src/Database.h -*/ - -using System; -using System.Threading; -using System.IO; -using System.Diagnostics; -using System.Collections.Generic; -using System.Runtime.InteropServices; - -using VDT.FaceRecognition.SDK; - -// database -public class Database -{ - public List vwElements; - public List samples; - public List thumbnails; - public List names; - - // make a thumbnail of a sample - public static OpenCvSharp.Mat makeThumbnail( - RawSample sample, - string name = "") - { - int thumbnail_size = Worker.thumbnail_size; - - // buffer for the cutted image - MemoryStream stream = new MemoryStream(); - - // make a cut in bmp format - // so we don't waste time for encode/decode image - // just copying it few times, which is irrelevant - sample.cutFaceImage( - stream, - RawSample.ImageFormat.IMAGE_FORMAT_BMP, - RawSample.FaceCutType.FACE_CUT_BASE); - - OpenCvSharp.Mat temp = OpenCvSharp.Mat.ImDecode(stream.ToArray(), OpenCvSharp.ImreadModes.Color); - - // so we got an image - - // check it - MAssert.Check(!temp.Empty()); - MAssert.Check(temp.Type() == OpenCvSharp.MatType.CV_8UC3); - - - // and resize to the thumbnail_size - - OpenCvSharp.Rect resRect; - - if(temp.Rows >= temp.Cols) - { - resRect.Height = thumbnail_size; - resRect.Width = temp.Cols * thumbnail_size / temp.Rows; - } - else - { - resRect.Width = thumbnail_size; - resRect.Height = temp.Rows * thumbnail_size / temp.Cols; - } - - resRect.X = (thumbnail_size - resRect.Width) / 2; - resRect.Y = (thumbnail_size - resRect.Height) / 2; - - OpenCvSharp.Mat result = new OpenCvSharp.Mat( - thumbnail_size, - thumbnail_size, - OpenCvSharp.MatType.CV_8UC3, - OpenCvSharp.Scalar.All(0)); - - OpenCvSharp.Cv2.Resize( - temp, - result[resRect], - resRect.Size); - - if(!string.IsNullOrEmpty(name)) - { - result[new OpenCvSharp.Rect(0, result.Rows - 27, result.Cols, 27)] = result.RowRange(result.Rows - 27, result.Rows) * 0.5f; - - OpenCvSharp.Cv2.PutText( - result, - name, - new OpenCvSharp.Point(0, result.Rows - 7), - OpenCvSharp.HersheyFonts.HersheyDuplex, - 0.7, - OpenCvSharp.Scalar.All(255), - 1, - OpenCvSharp.LineTypes.AntiAlias); - } - - return result; - - } - - // create the database - public Database( - string databaseDirPath, - Recognizer recognizer, - Capturer capturer, - float distanceThreshold) - { - vwElements = new List(); - samples = new List(); - thumbnails = new List(); - names = new List(); - // check paths - MAssert.Check(Directory.Exists(databaseDirPath), "database not found"); - - // get directory content - List path_l1 = new List(Directory.EnumerateDirectories(databaseDirPath)); - - // check every element in that directory - - ulong element_id_counter = 0; - - for(int il1 = 0; il1 < path_l1.Count; ++il1) - { - // ignore files - if(!Directory.Exists(path_l1[il1])) - continue; - // so path_l1[il1] is supposed to be the path to the person directory - - // get files inside i - List path_l2 = new List(Directory.EnumerateFiles(path_l1[il1])); - string name = string.Empty; - - // search for the name.txt file - - for(int il2 = 0; il2 < path_l2.Count; ++il2) - { - if(Path.GetFileName(path_l2[il2]) == "name.txt") - { - // put file content in the name - - using (StreamReader sr = new StreamReader(path_l2[il2])) - { - name = sr.ReadToEnd(); - } - } - } - - // try to open each file as an image - for(int il2 = 0; il2 < path_l2.Count; ++il2) - { - if(Path.GetFileName(path_l2[il2]) == "name.txt") - continue; - - Console.WriteLine("processing '{0}' name: '{1}'", path_l2[il2], name); - - // read image with opencv - - OpenCvSharp.Mat readed_image = OpenCvSharp.Cv2.ImRead(path_l2[il2]); - - if(readed_image.Empty() || readed_image.Type() != OpenCvSharp.MatType.CV_8UC3) - { - Console.WriteLine("\n\nWARNING: can't read image '{0}'\n\n", path_l2[il2]); - continue; - } - - byte[] data = new byte[readed_image.Total() * readed_image.Type().Channels]; - Marshal.Copy(readed_image.DataStart, data, 0, (int)data.Length); - RawImage image = new RawImage(readed_image.Width, readed_image.Height, RawImage.Format.FORMAT_BGR, data); - - // capture the face - List capturedSamples = capturer.capture(image); - - if(capturedSamples.Count != 1) - { - Console.WriteLine("\n\nWARNING: detected {0} faces on '{1}' image instead of one, image ignored \n\n", capturedSamples.Count, path_l2[il2]); - continue; - } - - RawSample sample = capturedSamples[0]; - - // make template - Template templ = recognizer.processing(sample); - - // prepare data for VideoWorker - VideoWorker.DatabaseElement vwElement = new VideoWorker.DatabaseElement(element_id_counter++, (ulong)il1, templ, distanceThreshold); - - vwElements.Add(vwElement); - - samples.Add(sample); - - thumbnails.Add(makeThumbnail(sample, name)); - - names.Add(name); - } - - } - - MAssert.Check((int)element_id_counter == vwElements.Count); - MAssert.Check((int)element_id_counter == samples.Count); - MAssert.Check((int)element_id_counter == thumbnails.Count); - MAssert.Check((int)element_id_counter == names.Count); - } -}; - - - - - - - - - -/** - \file video_recognition_demo/src/Database.cpp -*/ diff --git a/examples/csharp/video_recognition_demo_realsense/src/ImageAndDepthSource.cs b/examples/csharp/video_recognition_demo_realsense/src/ImageAndDepthSource.cs deleted file mode 100644 index c5a81cf..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/ImageAndDepthSource.cs +++ /dev/null @@ -1,41 +0,0 @@ -/** - \file csharp_demo/video_recognition_demo/src/ImageAndDepthSource.cs -*/ - -using System; - -using VDT.FaceRecognition.SDK; - - -public class ImageAndDepth -{ - public OpenCvSharp.Mat image; - public OpenCvSharp.MatOfUShort depth; - - public UInt64 image_timestamp_microsec; - public UInt64 depth_timestamp_microsec; - - public DepthMapRaw depth_opts; // all but depth_data and depth_data_stride_in_bytes must be set - - public DepthMapRaw make_dmr() - { - MAssert.Check(!depth.Empty()); - - DepthMapRaw r = depth_opts; - r.depth_data_ptr = depth.Data; - r.depth_data_stride_in_bytes = (int)depth.Step(); - - return r; - } - public ImageAndDepth() - { - image = new OpenCvSharp.Mat(); - depth = new OpenCvSharp.MatOfUShort(); - depth_opts = new DepthMapRaw(); - } -}; - -abstract class ImageAndDepthSource -{ - public abstract ImageAndDepth Get(); -}; diff --git a/examples/csharp/video_recognition_demo_realsense/src/MAssert.cs b/examples/csharp/video_recognition_demo_realsense/src/MAssert.cs deleted file mode 100644 index 0cc98f3..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/MAssert.cs +++ /dev/null @@ -1,15 +0,0 @@ -/** - \file csharp_demo/video_recognition_demo/src/MAssert.cs -*/ - -using System; - -class MAssert -{ - static public void Check(bool expr, string description="") - { - if(!expr) - throw new Exception(description); - } - -} diff --git a/examples/csharp/video_recognition_demo_realsense/src/Main.cs b/examples/csharp/video_recognition_demo_realsense/src/Main.cs deleted file mode 100644 index 0b2028f..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/Main.cs +++ /dev/null @@ -1,267 +0,0 @@ -/** - \file csharp_demo/video_recognition_demo/src/Main.cs -*/ - - -using System; -using System.IO; -using System.Threading; -using System.Collections.Generic; - -using CommandLine; - -using VDT.FaceRecognition.SDK; - - - -class Options -{ - [Option("config_dir", Default = "../../../conf/facerec", HelpText = "Path to config directory.")] - public string config_dir { get; set; } - - [Option("license_dir", Default = null, HelpText = "Path to license directory [optional].")] - public string license_dir { get; set; } - - [Option("database_dir", Default = "../../base", HelpText = "Path to database directory.")] - public string database_dir { get; set; } - - [Option("method_config", Default = "recognizer_latest_v100.xml", HelpText = "Recognizer config file.")] - public string method_config { get; set; } - - [Option("recognition_far_threshold", Default = 1e-6f, HelpText = "Recognition FAR threshold.")] - public float recognition_far_threshold { get; set; } - - [Option("frame_fps_limit", Default = 25f, HelpText = "Frame fps limit.")] - public float frame_fps_limit { get; set; } - - [Value(0, MetaName = "video_sources", HelpText = "List of video sources (id of web-camera, url of rtsp stream or path to video file)")] - public IEnumerable video_sources { get; set; } -}; - -class VideoRecognitionDemo -{ - - static int Main(string[] args) - { - try - { - // print usage - Console.WriteLine("Usage: dotnet csharp_video_recognition_demo.dll [OPTIONS] ..."); - Console.WriteLine("Examples:"); - Console.WriteLine(" Webcam: dotnet csharp_video_recognition_demo.dll --config_dir ../../../conf/facerec 0"); - Console.WriteLine(" RTSP stream: dotnet csharp_video_recognition_demo.dll --config_dir ../../../conf/facerec rtsp://localhost:8554/"); - Console.WriteLine(""); - - // parse arguments - bool error = false; - Options options = new Options(); - CommandLine.Parser.Default.ParseArguments(args) - .WithParsed(opts => options = opts) - .WithNotParsed(errs => error = true); - - // exit if argument parsign error - if (error) return 1; - - // print values of arguments - Console.WriteLine("Arguments:"); - foreach(var opt in options.GetType().GetProperties()) - { - if(opt.Name == "video_sources") - { - Console.Write("video sources = "); - foreach(string vs in options.video_sources){ - Console.Write(vs + " "); - } - Console.WriteLine(); - } else { - Console.WriteLine("--{0} = {1}", opt.Name, opt.GetValue(options, null)); - } - } - Console.WriteLine("\n"); - - //parameters parse - string config_dir = options.config_dir; - string license_dir = options.license_dir; - string database_dir = options.database_dir; - string method_config = options.method_config; - float recognition_far_threshold = options.recognition_far_threshold; - float frame_fps_limit = options.frame_fps_limit; - List video_sources = new List(options.video_sources); - - // check params - MAssert.Check(config_dir != string.Empty, "Error! config_dir is empty."); - MAssert.Check(database_dir != string.Empty, "Error! database_dir is empty."); - MAssert.Check(method_config != string.Empty, "Error! method_config is empty."); - MAssert.Check(recognition_far_threshold > 0, "Error! Failed recognition far threshold."); - - List sources = new List(); - List sources_names = new List(); - - - sources_names.Add("RealSense source"); - sources.Add(new RealSenseSource()); - - MAssert.Check(sources_names.Count == 1); - - - MAssert.Check(sources_names.Count == sources.Count); - - // print sources - Console.WriteLine("\n{0} sources: ", sources.Count); - - for(int i = 0; i < sources_names.Count; ++i) - Console.WriteLine(" {0}", sources_names[i]); - Console.WriteLine(""); - - // create facerec servcie - FacerecService service = - FacerecService.createService( - config_dir, - license_dir); - - Console.WriteLine("Library version: {0}\n", service.getVersion()); - - // create database - Recognizer recognizer = service.createRecognizer(method_config, true, false, false); - float recognition_distance_threshold = Convert.ToSingle(recognizer.getROCCurvePointByFAR(recognition_far_threshold).distance); - - Capturer capturer = service.createCapturer("common_capturer4_lbf_singleface.xml"); - Database database = new Database( - database_dir, - recognizer, - capturer, - recognition_distance_threshold); - recognizer.Dispose(); - capturer.Dispose(); - - FacerecService.Config vw_config = new FacerecService.Config("video_worker_fdatracker_blf_fda.xml"); - // vw_config.overrideParameter("single_match_mode", 1); - vw_config.overrideParameter("search_k", 10); - vw_config.overrideParameter("not_found_match_found_callback", 1); - vw_config.overrideParameter("downscale_rawsamples_to_preferred_size", 0); - - //ActiveLiveness.CheckType[] checks = new ActiveLiveness.CheckType[3] - //{ - // ActiveLiveness.CheckType.BLINK, - // ActiveLiveness.CheckType.TURN_RIGHT, - // ActiveLiveness.CheckType.SMILE - //}; - - vw_config.overrideParameter("depth_data_flag", 1); - - vw_config.overrideParameter("good_light_dark_threshold", 1); - vw_config.overrideParameter("good_light_range_threshold", 1); - // vw_config.overrideParameter("good_blur_threshold", 0.2); - - // create one VideoWorker - VideoWorker video_worker = - service.createVideoWorker( - new VideoWorker.Params() - .video_worker_config(vw_config) - .recognizer_ini_file(method_config) - .streams_count(sources.Count) - .age_gender_estimation_threads_count(sources.Count) - .emotions_estimation_threads_count(sources.Count) - //.active_liveness_checks_order(checks) - .processing_threads_count(sources.Count) - .matching_threads_count(sources.Count)); - - // set database - video_worker.setDatabase(database.vwElements, Recognizer.SearchAccelerationType.SEARCH_ACCELERATION_1); - - for(int i = 0; i < sources_names.Count; ++i) - { - OpenCvSharp.Window window = new OpenCvSharp.Window(sources_names[i]); - - OpenCvSharp.Cv2.ImShow(sources_names[i], new OpenCvSharp.Mat(100, 100, OpenCvSharp.MatType.CV_8UC3, OpenCvSharp.Scalar.All(0))); - } - - // prepare buffers for store drawed results - Mutex draw_images_mutex = new Mutex(); - List draw_images = new List(sources.Count); - - - // create one worker per one source - List workers = new List(); - - for(int i = 0; i < sources.Count; ++i) - { - draw_images.Add(new OpenCvSharp.Mat(100, 100, OpenCvSharp.MatType.CV_8UC3, OpenCvSharp.Scalar.All(0))); - workers.Add(new Worker( - database, - video_worker, - sources[i], - i, // stream_id - draw_images_mutex, - draw_images[i], - frame_fps_limit - )); - } - - - // draw results until escape presssed - for(;;) - { - { - draw_images_mutex.WaitOne(); - for(int i = 0; i < draw_images.Count; ++i) - { - OpenCvSharp.Mat drawed_im = workers[i]._draw_image; - if(!drawed_im.Empty()) - { - OpenCvSharp.Cv2.ImShow(sources_names[i], drawed_im); - draw_images[i] = new OpenCvSharp.Mat(); - } - } - draw_images_mutex.ReleaseMutex(); - } - - int key = OpenCvSharp.Cv2.WaitKey(20); - if(27 == key) - { - foreach(Worker w in workers) - { - w.Dispose(); - } - break; - } - - if(' ' == key) - { - Console.WriteLine("enable processing 0"); - video_worker.enableProcessingOnStream(0); - } - - if(13 == key) - { - Console.WriteLine("disable processing 0"); - video_worker.disableProcessingOnStream(0); - } - - - if('r' == key) - { - Console.WriteLine("reset trackerOnStream"); - video_worker.resetTrackerOnStream(0); - } - - - // check exceptions in callbacks - video_worker.checkExceptions(); - } - - // force free resources - // otherwise licence error may occur - // when create sdk object in next time - service.Dispose(); - video_worker.Dispose(); - } - catch(Exception e) - { - Console.WriteLine("video_recognition_show exception catched: '{0}'", e.ToString()); - return 1; - } - - return 0; - } -} diff --git a/examples/csharp/video_recognition_demo_realsense/src/RealSenseSource.cs b/examples/csharp/video_recognition_demo_realsense/src/RealSenseSource.cs deleted file mode 100644 index ea2def2..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/RealSenseSource.cs +++ /dev/null @@ -1,129 +0,0 @@ -/** - \file csharp_demo/video_recognition_demo/src/RealSenseSource.cs -*/ - - -using System; - -using Intel.RealSense; - -using VDT.FaceRecognition.SDK; - -class RealSenseSource : ImageAndDepthSource -{ - Pipeline pipeline; - - Align align_to_color; - - private float depth_scale = 0; - - private long prev_depth_frame_number = -1; - private long prev_video_frame_number = -1; - - public RealSenseSource() - { - using (Context ctx = new Context()) - { - DeviceList devices = ctx.QueryDevices(); - - Console.WriteLine("There are {0} connected RealSense devices.", devices.Count); - - MAssert.Check(devices.Count > 0, "there is no RealSense devices"); - - for(int i = 0; i < devices.Count; ++i) - { - Console.WriteLine( - "Device {0}: name {1}, serial {2}, firmware version: {3}", - i, - devices[i].Info[CameraInfo.Name], - devices[i].Info[CameraInfo.SerialNumber], - devices[i].Info[CameraInfo.FirmwareVersion]); - } - } - - Config config = new Config(); - config.EnableStream(Stream.Depth, 640, 480, Format.Z16); - config.EnableStream(Stream.Color, 640, 480, Format.Bgr8); - - pipeline = new Pipeline(); - - PipelineProfile pipeline_profile = pipeline.Start(config); - - depth_scale = pipeline_profile.Device.Sensors[0].DepthScale * 1000; - - MAssert.Check(depth_scale > 0); - - align_to_color = new Align(Stream.Color); - } - - - public override ImageAndDepth Get() - { - ImageAndDepth res = new ImageAndDepth(); - - using(FrameSet frameset = pipeline.WaitForFrames()) - { - DepthFrame depth_frame = frameset.FirstOrDefault(Stream.Depth, Format.Z16).DisposeWith(frameset); - - if(depth_frame != null && ((long)depth_frame.Number) > prev_depth_frame_number) - { - prev_depth_frame_number = (long) depth_frame.Number; - - FrameSet registered = align_to_color.Process(frameset).As().DisposeWith(frameset); - - depth_frame = registered.FirstOrDefault(Stream.Depth, Format.Z16).DisposeWith(frameset); - - MAssert.Check(depth_frame != null); - MAssert.Check(depth_frame.BitsPerPixel == 16); - MAssert.Check(depth_frame.Stride >= depth_frame.Width * 2); - - float[] fov = depth_frame.Profile.As().GetIntrinsics().FOV; - - res.depth_opts.horizontal_fov = fov[0]; - res.depth_opts.vertical_fov = fov[1]; - res.depth_opts.depth_unit_in_millimeters = depth_scale; - - - res.depth_opts.depth_map_2_image_offset_x = 0; - res.depth_opts.depth_map_2_image_offset_y = 0; - res.depth_opts.depth_map_2_image_scale_x = 1; - res.depth_opts.depth_map_2_image_scale_y = 1; - - res.depth_opts.depth_map_rows = depth_frame.Height; - res.depth_opts.depth_map_cols = depth_frame.Width; - - res.depth_timestamp_microsec = (ulong) depth_frame.Timestamp * 1000; - - res.depth = new OpenCvSharp.MatOfUShort( - depth_frame.Height, - depth_frame.Width, - depth_frame.Data, - depth_frame.Stride).Clone(); - } - - - VideoFrame video_frame = frameset.FirstOrDefault(Stream.Color, Format.Bgr8).DisposeWith(frameset); - - if(video_frame != null && ((long)video_frame.Number > prev_video_frame_number)) - { - prev_video_frame_number = (long) video_frame.Number; - - MAssert.Check(video_frame.BitsPerPixel == 24); - MAssert.Check(video_frame.Stride >= video_frame.Width * 3); - - res.image = new OpenCvSharp.Mat( - video_frame.Height, - video_frame.Width, - OpenCvSharp.MatType.CV_8UC3, - video_frame.Data, - video_frame.Stride).Clone(); - - res.image_timestamp_microsec = (ulong) video_frame.Timestamp * 1000; - } - } - - return res; - } - -}; - diff --git a/examples/csharp/video_recognition_demo_realsense/src/Worker.cs b/examples/csharp/video_recognition_demo_realsense/src/Worker.cs deleted file mode 100644 index b82d182..0000000 --- a/examples/csharp/video_recognition_demo_realsense/src/Worker.cs +++ /dev/null @@ -1,891 +0,0 @@ -/** - \file csharp_demo/video_recognition_demo/src/Worker.cs -*/ - - -using System; -using System.Threading; -using System.IO; -using System.Collections.Generic; -using System.Runtime.InteropServices; -using VDT.FaceRecognition.SDK; - - -// data for each person that currently in process -public class FaceData -{ - public RawSample sample; - public bool lost; - public bool weak; - public int frame_id; - public TimeSpan lost_time; - public int match_database_index; - public bool age_gender_set; - public AgeGenderEstimator.AgeGender age_gender; - public bool emotion_set; - public List emotion_confidence; - public ActiveLiveness.ActiveLivenessStatus active_liveness_status; - - public float draw_multilier; - public FaceData() - { - lost = true; - match_database_index = -1; - draw_multilier = 1; - age_gender_set = false; - emotion_set = false; - } -}; - -class Worker: IDisposable -{ - public Mutex _draw_image_mutex = new Mutex(); - public OpenCvSharp.Mat _draw_image; - - public VideoWorker _video_worker; - public ImageAndDepthSource _source; - public int _stream_id; - - public Database _database; - - public int _tracking_callback_id; - public int _tracking_lost_callback_id; - public int _match_found_callback_id; - - - volatile bool _shutdown; - public Thread _camera_thread; - public Thread _drawing_thread; - - public double _frame_min_delay_ms; - - public Queue > _frames = new Queue >(); - - public ImageAndDepth _last_depth_frame = new ImageAndDepth(); - public Mutex _frames_mutex = new Mutex(); - - public DrawingData _drawing_data = new DrawingData(); - public Mutex _drawing_data_mutex = new Mutex(); - - // constants for drawing - public const int thumbnail_size = 150; - public const int draw_border = 5; - public const int max_count_in_stripe = 6; - - - public Worker( - Database database, - VideoWorker video_worker, - ImageAndDepthSource source, - int stream_id, - Mutex draw_image_mutex, - OpenCvSharp.Mat draw_image, - float frame_fps_limit) - { - _draw_image_mutex = draw_image_mutex; - _draw_image = draw_image; - _video_worker = video_worker; - _source = source; - _frame_min_delay_ms = 1000f / frame_fps_limit; - _stream_id = stream_id; - _database = database; - - // check arguments - MAssert.Check(video_worker != null); - MAssert.Check(source != null); - MAssert.Check(stream_id >= 0 && stream_id < video_worker.getStreamsCount()); - - // set callbacks arguments - _tracking_callback_id = - video_worker.addTrackingCallbackU( - TrackingCallback, - this); - - _tracking_lost_callback_id = - video_worker.addTrackingLostCallbackU( - TrackingLostCallback, - this); - - _match_found_callback_id = - video_worker.addMatchFoundCallbackU( - MatchFoundCallback, - this); - - // start threads - _shutdown = false; - - _drawing_thread = new Thread(DrawingThreadFunc); - _camera_thread = new Thread(CameraThreadFunc); - _drawing_thread.Start(); - _camera_thread.Start(); - - } - - public void Dispose() - { - _video_worker.removeTrackingCallback(_tracking_callback_id); - _video_worker.removeTrackingLostCallback(_tracking_lost_callback_id); - _video_worker.removeMatchFoundCallback(_match_found_callback_id); - - // stop threads - _shutdown = true; - - _camera_thread.Join(); - _drawing_thread.Join(); - - GC.SuppressFinalize(this); - } - - ~Worker() - { - Dispose(); - } - - static void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point2f position) - { - // twice - for better reading - // since we are drawing on the frame from webcam - - // white background - OpenCvSharp.Cv2.PutText( - image, - text, - position, - OpenCvSharp.HersheyFonts.HersheyDuplex, - 0.7, - OpenCvSharp.Scalar.All(255), - 5, - OpenCvSharp.LineTypes.AntiAlias); - - // black text - OpenCvSharp.Cv2.PutText( - image, - text, - position, - OpenCvSharp.HersheyFonts.HersheyDuplex, - 0.7, - OpenCvSharp.Scalar.All(0), - 1, - OpenCvSharp.LineTypes.AntiAlias); - } - - // data for drawing - public class DrawingData - { - public bool updated; - - public OpenCvSharp.Mat frame; - public OpenCvSharp.Mat depth; - public int frame_id; - - // map - public Dictionary faces; - public DrawingData() - { - updated = false; - faces = new Dictionary(); - } - }; - - // tracking callback function - // userdata is supposed to be pointer to this Worker - private static - void TrackingCallback( - VideoWorker.TrackingCallbackData data, - Object userdata) - { - // check arguments - MAssert.Check(data != null); - MAssert.Check(userdata != null); - - int stream_id = (int)data.stream_id; - int frame_id = (int)data.frame_id; - RawSample[] samples = data.samples; - bool[] samples_weak = data.samples_weak; - float[] samples_quality = data.samples_quality; - - bool[] AG_set = data.samples_track_age_gender_set; - AgeGenderEstimator.AgeGender[] AG = data.samples_track_age_gender; - bool[] emotion_set = data.samples_track_emotions_set; - List [] emotion = data.samples_track_emotions; - - MAssert.Check(samples.Length == samples_weak.Length); - MAssert.Check(samples.Length == samples_quality.Length); - - // userdata is supposed to be pointer to this Worker - // so cast the pointer - Worker worker = (Worker)(userdata); - - // we care only about the worker._stream_id source - // so just ignore any others - if(stream_id != worker._stream_id) - return; - - // get the frame with frame_id id - ImageAndDepth frame = new ImageAndDepth(); - - { - worker._frames_mutex.WaitOne(); - - // searching in worker._frames - for(;;) - { - // there already must be some frames - MAssert.Check(worker._frames.Count > 0); - - // this frame_id can't be from future - MAssert.Check(worker._frames.Peek().Item1 <= frame_id); - - if(worker._frames.Peek().Item1 == frame_id) - { - // we found it - frame = worker._frames.Dequeue().Item2; - break; - } - else - { - // this (i.e. the worker._frames.front() ) frame was skipeed by tracking - Console.WriteLine("skiped {0}:{1}", stream_id, worker._frames.Dequeue().Item1); - } - } - worker._frames_mutex.ReleaseMutex(); - } - - // update the data - { - worker._drawing_data_mutex.WaitOne(); - - // frame - - worker._drawing_data.frame = frame.image; - worker._drawing_data.depth = frame.depth; - worker._drawing_data.frame_id = frame_id; - worker._drawing_data.updated = true; - - // and samples - for(int i = 0; i < samples.Length; ++i) - { - if(!worker._drawing_data.faces.ContainsKey(samples[i].getID())) - worker._drawing_data.faces[samples[i].getID()] = new FaceData(); - FaceData face = worker._drawing_data.faces[samples[i].getID()]; - face.frame_id = samples[i].getFrameID(); // it can differ from the frame_id - face.lost = false; - face.weak = samples_weak[i]; - face.sample = samples[i]; - if (AG_set[i]) - { - face.age_gender_set = true; - face.age_gender = AG[i]; - } - if (emotion_set[i]) - { - face.emotion_set = true; - face.emotion_confidence = emotion[i]; - } - face.active_liveness_status = data.samples_active_liveness_status[i]; - } - worker._drawing_data_mutex.ReleaseMutex(); - } - } - - - // tracking lost callback function - // userdata is supposed to be pointer to this Worker - private static - void TrackingLostCallback( - VideoWorker.TrackingLostCallbackData data, - Object userdata) - { - MAssert.Check(data != null); - MAssert.Check(userdata != null); - - int stream_id = (int)data.stream_id; - int track_id = (int)data.track_id; - int first_frame_id = (int)data.first_frame_id; - int last_frame_id = (int)data.last_frame_id; - float best_quality = (float)data.best_quality; - int best_quality_frame_id = (int)data.best_quality_frame_id; - RawSample best_quality_sample = data.best_quality_sample; - Template best_quality_templ = data.best_quality_templ; - - // userdata is supposed to be pointer to this Worker - // so cast the pointer - Worker worker = (Worker)(userdata); - - // we care only about the worker._stream_id source - // so just ignore any others - if(stream_id != worker._stream_id) - return; - - // here we just remember the time when the face was lost - // but the real purpose of this callback if to store faces - // for futher matching or processing - - { - worker._drawing_data_mutex.WaitOne(); - - FaceData face = worker._drawing_data.faces[track_id]; - - MAssert.Check(!face.lost); - - face.lost = true; - face.lost_time = new TimeSpan(DateTime.Now.Ticks); - if (best_quality_sample != null) - { - face.sample = best_quality_sample; - } - - worker._drawing_data_mutex.ReleaseMutex(); - } - } - - - // match found callback function - // userdata is supposed to be pointer to this Worker - private static - void MatchFoundCallback( - VideoWorker.MatchFoundCallbackData data, - Object userdata) - { - MAssert.Check(data != null); - MAssert.Check(userdata != null); - - int stream_id = (int)data.stream_id; - int frame_id = (int)data.frame_id; - RawSample sample = data.sample; - float quality = (float)data.quality; - Template templ = data.templ; - VideoWorker.SearchResult[] search_results = data.search_results; - - // userdata is supposed to be pointer to this Worker - // so cast the pointer - Worker worker = (Worker)(userdata); - - // we care only about the worker._stream_id source - // so just ignore any others - if(stream_id != worker._stream_id) - return; - - MAssert.Check(sample != null); - MAssert.Check(templ != null); - MAssert.Check(search_results.Length > 0); - - // just print distances in the console - Console.WriteLine("stream {0} match track {1} : ", stream_id, sample.getID()); - - for(int i = 0; i < search_results.Length; ++i) - { - ulong element_id = search_results[i].element_id; - - if(element_id == (ulong)VideoWorker.MATCH_NOT_FOUND_ID) - { - MAssert.Check(i == 0); - Console.WriteLine(" {0}: MATCH NOT FOUND", i); - } - else - { - MAssert.Check(element_id < (UInt64)worker._database.names.Count); - Console.WriteLine(" {0}: with '{1}' distance: {2}", - i, - worker._database.names[(int)element_id], - search_results[i].match_result.distance); - } - } - Console.WriteLine(""); - - ulong match_element_id = search_results[0].element_id; - - if(match_element_id != (ulong)VideoWorker.MATCH_NOT_FOUND_ID) - { - MAssert.Check((int)match_element_id < worker._database.thumbnails.Count); - - // set the match info in the worker._drawing_data.faces - worker._drawing_data_mutex.WaitOne(); - - FaceData face = worker._drawing_data.faces[sample.getID()]; - - MAssert.Check(!face.lost); - - face.match_database_index = (int)match_element_id; - - worker._drawing_data_mutex.ReleaseMutex(); - } - } - - - // here we grab frames from the sources - // and put them in a VideoWorker - void CameraThreadFunc() - { - try - { - TimeSpan prev_capture_time = new TimeSpan(DateTime.Now.Ticks); - - for(;;) - { - // check for stop - if(_shutdown) - break; - - // sleep some time to get stable frequency - if (_frame_min_delay_ms > 0) - { - TimeSpan now = new TimeSpan(DateTime.Now.Ticks); - - // 40ms - for 24 fps - int sleep_time = - (int)(_frame_min_delay_ms - (now - prev_capture_time).TotalMilliseconds); - - if(sleep_time > 0) - Thread.Sleep(sleep_time); - - prev_capture_time = new TimeSpan(DateTime.Now.Ticks); - } - - - ImageAndDepth frame = _source.Get(); - - if (!frame.depth.Empty()) - { - _video_worker.addDepthFrame(frame.make_dmr(), _stream_id, (long)frame.depth_timestamp_microsec); - - _last_depth_frame = frame; - } - // put a frame in the VideoWorker - if (!frame.image.Empty()) - { - // get the frame - byte[] data = new byte[frame.image.Total() * frame.image.Type().Channels]; - Marshal.Copy(frame.image.DataStart, data, 0, (int)data.Length); - RawImage raw_image = new RawImage(frame.image.Width, frame.image.Height, RawImage.Format.FORMAT_BGR, data); - - _frames_mutex.WaitOne(); - - int frame_id = _video_worker.addVideoFrame(raw_image, _stream_id, (long)frame.image_timestamp_microsec); - - if (frame.depth.Empty()) - frame.depth = _last_depth_frame.depth; - - // and store it here for further drawing - _frames.Enqueue(new Tuple(frame_id, frame)); - _frames_mutex.ReleaseMutex(); - } - } - } - catch(Exception e) - { - Console.WriteLine("\n WCameraThreadFunc exception: '{0}'\n", e.ToString()); - } - } - - // draw results - private static - OpenCvSharp.Mat Draw( - DrawingData data, - Database database) - { - // drawing params of stripe with current faces - int stripe_width = thumbnail_size * 2 + draw_border * 2; - - int stripe_height = - (thumbnail_size + draw_border) * max_count_in_stripe - draw_border; - // image for draw the frame and the stripe - OpenCvSharp.Mat result = new OpenCvSharp.Mat( - Math.Max(data.frame.Rows + data.depth.Rows, stripe_height), - Math.Max(data.frame.Cols, data.depth.Cols) + stripe_width, - OpenCvSharp.MatType.CV_8UC3, - OpenCvSharp.Scalar.All(0)); - - // copy the frame - int frame_y_offset = (result.Rows - data.frame.Rows - data.depth.Rows) / 2; - int depth_y_offset = frame_y_offset + data.frame.Rows; - - data.frame.CopyTo( - result[ - new OpenCvSharp.Rect( - 0, - frame_y_offset, - data.frame.Cols, - data.frame.Rows)]); - - for(int i = 0; i < data.depth.Rows; ++i) - { - unsafe - { - UInt16* src_ptr = (UInt16*) data.depth.Ptr(i); - byte* dst_ptr = (byte*) result.Ptr(depth_y_offset + i); - - for(int j = 0; j < data.depth.Cols; ++j, ++src_ptr, dst_ptr += 3) - { - UInt16 depth = *src_ptr; - byte* bgr = dst_ptr; - - bgr[0] = bgr[1] = bgr[2] = 0; - - if(depth == 0) - continue; - - if(depth < 256) - bgr[2] = (byte) ( 256 - depth ); - - if(depth < 256 * 2) - bgr[1] = (byte) ( depth / 2 ); - else - bgr[1] = (byte) ( 255 ); - - if(depth < 256 * 4) - bgr[0] = (byte) ( depth / 4 ); - else - bgr[0] = (byte) ( 255 ); - } - } - } - - // clone faces data for random access - List> faces = new List>(); - foreach(KeyValuePair pair in data.faces) - { - faces.Add(new Tuple(pair.Key, pair.Value)); - } - - - - // make order with recognized first - // pair - List> order = new List>(); - - for(int i = 0; i < faces.Count; ++i) - { - order.Add(new Tuple(faces[i].Item2.match_database_index, i)); - } - - order.Sort(); - - // draw alive faces - for(int order_i = 0; order_i < order.Count; ++order_i) - { - FaceData face = faces[order[order_i].Item2].Item2; - - // draw circles of faces appeared on this frame - if(face.frame_id == data.frame_id && !face.lost) - { - // get points - List points = face.sample.getLandmarks(); - List iris_points = face.sample.getIrisLandmarks(); - - // compute center - OpenCvSharp.Point2f center = new OpenCvSharp.Point2f(0, 0); - - for(int j = 0; j < points.Count; ++j) - { - center.X += points[j].x; - center.Y += points[j].y; - } - center *= 1.0 / points.Count; - - - // compute radius - double radius = 0; - - for(int j = 0; j < points.Count; ++j) - radius += OpenCvSharp.Point2f.Distance(new OpenCvSharp.Point2f(points[j].x, points[j].y), center); - - radius *= 1.5 / points.Count; - - radius *= 2; - - RawSample.Rectangle rectangle = face.sample.getRectangle(); - - // set a point to place information for this face - OpenCvSharp.Point2f text_point = new OpenCvSharp.Point2f( - rectangle.x + rectangle.width + 3, - rectangle.y + 10); - const float text_line_height = 22; - - // choose color - OpenCvSharp.Scalar color = - face.match_database_index < 0 ? - new OpenCvSharp.Scalar(0, 0, 255) : // red color for unrecognized - new OpenCvSharp.Scalar(0, 255, 0); // green color for recognizerd - - for (int k = 0; k < (data.depth.Empty() ? 1 : 2); ++k) - { - int y_offset = (k == 0 ? frame_y_offset : depth_y_offset); - - // dashed circle for weak face samples - if(face.weak) - { - // draw dashed cirle for weak samples - int n = 8; - for(int i = 0; i < n; ++i) - OpenCvSharp.Cv2.Ellipse( - result, - (OpenCvSharp.Point)(center + new OpenCvSharp.Point2f(0f, y_offset)), - new OpenCvSharp.Size(radius, radius), - (face.frame_id * 2) % 360, - (i * 2 + 0) * 180 / n, - (i * 2 + 1) * 180 / n, - color, - 3, - OpenCvSharp.LineTypes.AntiAlias); - } - else - { - OpenCvSharp.Cv2.Circle( - result, - (OpenCvSharp.Point)(center + new OpenCvSharp.Point2f(0f, y_offset)), - (int)radius, - color, - 3, - OpenCvSharp.LineTypes.AntiAlias); - } - } - if (face.age_gender_set) - { - // draw - AgeGenderEstimator.AgeGender age_gender = face.age_gender; - - string age_text = "age: "; - - switch (age_gender.age) - { - case AgeGenderEstimator.Age.AGE_KID: age_text += "kid "; break; - case AgeGenderEstimator.Age.AGE_YOUNG: age_text += "young "; break; - case AgeGenderEstimator.Age.AGE_ADULT: age_text += "adult "; break; - case AgeGenderEstimator.Age.AGE_SENIOR: age_text += "senior "; break; - } - - age_text += string.Format("years: {0:G3}", age_gender.age_years); - - puttext( - result, - age_text, - text_point); - - text_point.Y += text_line_height; - - puttext( - result, - age_gender.gender == AgeGenderEstimator.Gender.GENDER_FEMALE ? "gender: female" : - age_gender.gender == AgeGenderEstimator.Gender.GENDER_MALE ? "gender: male" : "?", - text_point); - text_point.Y += text_line_height; - - text_point.Y += text_line_height / 3; - - // Console.WriteLine(face.age_gender.age_years); - } - if (face.emotion_set){ - // draw - List emotions = face.emotion_confidence; - - for (int j = 0; j < emotions.Count; ++j){ - EmotionsEstimator.Emotion emotion = emotions[j].emotion; - float confidence = emotions[j].confidence; - - OpenCvSharp.Cv2.Rectangle( - result, - new OpenCvSharp.Rect( - (int)text_point.X, - (int)text_point.Y - (int)text_line_height / 2, - (int)(100 * confidence), - (int)text_line_height), - emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? new OpenCvSharp.Scalar(255, 0, 0) : - emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? new OpenCvSharp.Scalar(0, 255, 0) : - emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? new OpenCvSharp.Scalar(0, 0, 255) : - emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? new OpenCvSharp.Scalar(0, 255, 255) : - new OpenCvSharp.Scalar(0, 0, 0), -1); - - puttext( - result, - emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? "neutral" : - emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? "happy" : - emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? "angry" : - emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? "surprise" : - emotion == EmotionsEstimator.Emotion.EMOTION_DISGUSTED ? "disgusted" : - emotion == EmotionsEstimator.Emotion.EMOTION_SAD ? "sad" : - emotion == EmotionsEstimator.Emotion.EMOTION_SCARED ? "scared" : "?", - text_point + new OpenCvSharp.Point2f(100, 0)); - - text_point.Y += text_line_height; - - text_point.Y += text_line_height / 3; - } - } - if (face.active_liveness_status.verdict != ActiveLiveness.Liveness.NOT_COMPUTED){ - string active_liveness = ""; - - if (face.active_liveness_status.verdict == ActiveLiveness.Liveness.WAITING_FACE_ALIGN) - active_liveness += face.active_liveness_status.verdict.ToString(); - else { - active_liveness += face.active_liveness_status.check_type.ToString(); - active_liveness += ": "; - active_liveness += face.active_liveness_status.verdict.ToString(); - active_liveness += " " + face.active_liveness_status.progress_level.ToString(); - } - puttext(result, active_liveness, text_point); - - text_point.Y += text_line_height; - - text_point.Y += text_line_height / 3; - - - } - -// // draw iris points -// for(int j = 0; j < iris_points.Count; ++j) -// { -// int ms = 1; -// OpenCvSharp.Scalar icolor = new OpenCvSharp.Scalar(50, 255, 50); -// int oi = j - 20 * Convert.ToInt32(j >= 20); -// Point pt1 = iris_points[j]; -// Point pt2 = iris_points[(oi < 19 ? j : j - 15) + 1]; -// OpenCvSharp.Point2f cv_pt1 = new OpenCvSharp.Point2f(pt1.x, frame_y_offset + pt1.y); -// OpenCvSharp.Point2f cv_pt2 = new OpenCvSharp.Point2f(pt2.x, frame_y_offset + pt2.y); -// -// if(oi < 5) -// { -// icolor = new OpenCvSharp.Scalar(0, 165, 255); -// if(oi == 0) -// { -// double iradius = Math.Sqrt(Math.Pow(pt1.x - pt2.x, 2) + Math.Pow(pt1.y - pt2.y, 2)); -// OpenCvSharp.Cv2.Circle( -// result, -// cv_pt1, -// (int) iradius, -// icolor, -// ms, -// OpenCvSharp.LineTypes.AntiAlias); -// } -// }else -// { -// OpenCvSharp.Cv2.Line( -// result, -// cv_pt1, -// cv_pt2, -// icolor, -// ms, -// OpenCvSharp.LineTypes.AntiAlias); -// } -// -// OpenCvSharp.Cv2.Circle( -// result, -// cv_pt1, -// ms, -// color, -// -1, -// OpenCvSharp.LineTypes.AntiAlias); -// } - } - - // no - draw the stripe - if(order_i < max_count_in_stripe) - { - // place for thumbnail from the frame - OpenCvSharp.Rect sample_rect = new OpenCvSharp.Rect( - data.frame.Cols + draw_border, - (thumbnail_size + draw_border) * order_i, - thumbnail_size, - thumbnail_size); - - // place for thumbnail from the database - OpenCvSharp.Rect match_rect = new OpenCvSharp.Rect( - data.frame.Cols + draw_border * 2 + thumbnail_size, - (thumbnail_size + draw_border) * order_i, - thumbnail_size, - thumbnail_size); - - // make thumbnail from the frame - Database.makeThumbnail(face.sample).CopyTo(result[sample_rect]); - - // fade if image is lost - if(face.draw_multilier < 1) - result[sample_rect] *= face.draw_multilier; - - if(face.match_database_index < 0) - { - // gray color for unrecognized persons - result[match_rect].SetTo(128 * face.draw_multilier); - } - else - { - // thumbnail from the database for recognized persons - database.thumbnails[face.match_database_index].CopyTo(result[match_rect]); - - // fade if image is lost - if(face.draw_multilier < 1) - result[match_rect] *= face.draw_multilier; - } - } - } - - return result; - } - - // here we draw results - void DrawingThreadFunc() - { - for(;;) - { - // check for stop - if(_shutdown) - break; - - // we dont need to redraw too often - Thread.Sleep(20); - - // get the data - DrawingData data = new DrawingData(); - - { - _drawing_data_mutex.WaitOne(); - // remove lost faces after 5 seconds - { - TimeSpan now = new TimeSpan(DateTime.Now.Ticks); - - const float timeout = 5000; // ms - i.e. 5 seconds - - List removingKeys = new List(); - foreach (KeyValuePair entry in _drawing_data.faces) - { - FaceData face = entry.Value; - if(!face.lost) - continue; - - _drawing_data.updated = true; - - float delay = (float)(now - face.lost_time).TotalMilliseconds; - - if(delay < timeout) - face.draw_multilier = 1 - delay / timeout; - else - removingKeys.Add(entry.Key); - } - - for(int i_key = 0; i_key < removingKeys.Count; i_key++) - _drawing_data.faces.Remove(removingKeys[i_key]); - } - - data = _drawing_data; - - _drawing_data.updated = false; - _drawing_data_mutex.ReleaseMutex(); - } - - // draw - if(data.frame == null) - continue; - OpenCvSharp.Mat drawed = Draw( - data, - _database); - - // and store it (it will be used in imshow in the main) - { - _draw_image_mutex.WaitOne(); - _draw_image = drawed; - _draw_image_mutex.ReleaseMutex(); - } - } - } - -}; diff --git a/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.csproj b/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.csproj index b2d409c..cf82b95 100644 --- a/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.csproj +++ b/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.csproj @@ -1,81 +1,25 @@ - - - + + - Debug - x64 Exe - v4.6 - {128CE552-C258-4E73-9B70-BA1C92F429BB} - --config_dir ../../../../../../conf/facerec --database_dir ../../../../../../bin/base 0 + net6.0 true - - x64 - true - full - false - bin\Debug\ - DEBUG;TRACE - prompt - 4 - - - x64 - pdbonly - true - bin\Release\ - TRACE - prompt - 4 - + - ..\..\..\..\bin\FacerecCSharpWrapper.dll - True + ..\..\..\..\bin\FacerecCSharpWrapper.dll - ..\..\..\..\bin\Intel.RealSense.dll - True - - - ..\..\..\..\bin\realsense2.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\CommandLine.dll - True - - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.Blob.dll - True + ..\..\..\..\bin\Intel.Realsense.dll - - ..\..\..\..\bin\csharp_demo\deps\OpenCvSharp.dll - True - - - - - - - - - - - - - - + PreserveNewest - - PreserveNewest - - + PreserveNewest @@ -90,18 +34,17 @@ PreserveNewest + + PreserveNewest + + + PreserveNewest + - - - xcopy /y $(SolutionDir)..\..\..\..\bin\realsense2.dll $(SolutionDir)bin\Release -xcopy /y $(SolutionDir)..\..\..\..\bin\realsense2.dll $(SolutionDir)bin\Debug - - - - \ No newline at end of file + + + + + + + diff --git a/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.sln b/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.sln index 7f1f0b0..c77358d 100644 --- a/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.sln +++ b/examples/csharp/video_recognition_demo_realsense/vs/csharp_video_recognition_demo.sln @@ -1,22 +1,22 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 14 -VisualStudioVersion = 14.0.25420.1 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{C304A816-92B3-4933-8410-62CB974DBBE4}") = "csharp_video_recognition_demo", "csharp_video_recognition_demo.csproj", "{128CE552-C258-4E73-9B70-BA1C92F429BB}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|x64.ActiveCfg = Debug|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|x64.Build.0 = Debug|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|x64.ActiveCfg = Release|x64 - {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{C304A816-92B3-4933-8410-62CB974DBBE4}") = "csharp_video_recognition_demo", "csharp_video_recognition_demo.csproj", "{128CE552-C258-4E73-9B70-BA1C92F429BB}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {128CE552-C258-4E73-9B70-BA1C92F429BB}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/examples/flutter/demo/ios/Runner/Info.plist b/examples/flutter/demo/ios/Runner/Info.plist index d3b7e3b..61dc22e 100644 --- a/examples/flutter/demo/ios/Runner/Info.plist +++ b/examples/flutter/demo/ios/Runner/Info.plist @@ -43,5 +43,9 @@ NSCameraUsageDescription Capture faces + NSMicrophoneUsageDescription + The application does not use this feature + FLTEnableImpeller + diff --git a/examples/flutter/demo/lib/main.dart b/examples/flutter/demo/lib/main.dart index e6840ca..1e60700 100644 --- a/examples/flutter/demo/lib/main.dart +++ b/examples/flutter/demo/lib/main.dart @@ -195,7 +195,7 @@ class _MyAppState extends State { // debugShowCheckedModeBanner: false, initialRoute: '/', theme: ThemeData( - accentColor: Colors.blue, // accentColor => hintColor + hintColor: Colors.blue, // hintColor => accentColor brightness: Brightness.dark, visualDensity: VisualDensity.adaptivePlatformDensity, ), diff --git a/examples/flutter/demo/pubspec.yaml b/examples/flutter/demo/pubspec.yaml index b556d2e..924923c 100644 --- a/examples/flutter/demo/pubspec.yaml +++ b/examples/flutter/demo/pubspec.yaml @@ -6,7 +6,7 @@ description: Demo demonstrates using of the Face SDK Flutter plugin publish_to: 'none' # Remove this line if you wish to publish to pub.dev environment: - sdk: ">=2.17.0 <=2.18.2" + sdk: ">=2.17.0 <=3.2.3" dependencies: flutter: diff --git a/examples/flutter/processing_block_demo/ios/Runner/Info.plist b/examples/flutter/processing_block_demo/ios/Runner/Info.plist index e7272d0..4e65c52 100644 --- a/examples/flutter/processing_block_demo/ios/Runner/Info.plist +++ b/examples/flutter/processing_block_demo/ios/Runner/Info.plist @@ -49,5 +49,7 @@ NSPhotoLibraryUsageDescription Pick a photo from the gallery + FLTEnableImpeller + diff --git a/examples/flutter/processing_block_demo/pubspec.yaml b/examples/flutter/processing_block_demo/pubspec.yaml index c01290c..80fa1f7 100644 --- a/examples/flutter/processing_block_demo/pubspec.yaml +++ b/examples/flutter/processing_block_demo/pubspec.yaml @@ -6,7 +6,7 @@ description: A new Flutter project. publish_to: 'none' # Remove this line if you wish to publish to pub.dev environment: - sdk: ">=2.17.0 <=2.18.2" + sdk: ">=2.17.0 <=3.2.3" dependencies: flutter: @@ -47,7 +47,7 @@ flutter: - assets/license/3divi_face_sdk.lic - assets/share/faceanalysis/ - assets/share/faceattributes/ - - assets/share/processing_block/face_detector/uld/ + - assets/share/processing_block/face_detector/uld/1/ - assets/share/processing_block/quality_face_estimation/estimation/ - assets/share/processing_block/face_fitter/tddfa_faster/ - assets/share/quality_iso/ diff --git a/examples/python/processing_blocks/liveness_estimator_v4.py b/examples/python/processing_blocks/liveness_estimator_2d.py similarity index 95% rename from examples/python/processing_blocks/liveness_estimator_v4.py rename to examples/python/processing_blocks/liveness_estimator_2d.py index 8eddb15..10aaa14 100644 --- a/examples/python/processing_blocks/liveness_estimator_v4.py +++ b/examples/python/processing_blocks/liveness_estimator_2d.py @@ -31,10 +31,8 @@ def liveness_estimator(input_image, sdk_path): sdk_conf_dir = os.path.join(sdk_path, 'conf', 'facerec') if platform == "win32": # for Windows sdk_dll_path = os.path.join(sdk_path, 'bin', 'facerec.dll') - sdk_onnx_path = os.path.join(sdk_path, 'bin') else: # for Linux sdk_dll_path = os.path.join(sdk_path, 'lib', 'libfacerec.so') - sdk_onnx_path = os.path.join(sdk_path, 'lib') service = FacerecService.create_service( # create FacerecService sdk_dll_path, @@ -43,7 +41,7 @@ def liveness_estimator(input_image, sdk_path): liveness_config = { # liveness block configuration parameters "unit_type": "LIVENESS_ESTIMATOR", # required parameter - "modification": "v4" + "modification": "2d", } liveness_block = service.create_processing_block(liveness_config) # create liveness estimation processing block capturer = service.create_capturer("common_capturer_refa_fda_a.xml") @@ -54,7 +52,7 @@ def liveness_estimator(input_image, sdk_path): "blob": image.tobytes(), "dtype": "uint8_t", "format": "NDARRAY", - "shape": [dim for dim in img.shape] + "shape": [dim for dim in image.shape] } raw_image = RawImage(image.shape[1], image.shape[0], Format.FORMAT_RGB, image.tobytes()) ioData = service.create_context( diff --git a/examples/python/processing_blocks/quality_estimator.py b/examples/python/processing_blocks/quality_estimator.py index 2d68823..b5b79bf 100644 --- a/examples/python/processing_blocks/quality_estimator.py +++ b/examples/python/processing_blocks/quality_estimator.py @@ -75,10 +75,10 @@ def quality_estimator(input_image, sdk_path, modification): samples = capturer.capture(img_bytes) # capture faces in an image image_ctx = { # put an image in container - "blob": img.tobytes(), + "blob": image.tobytes(), "dtype": "uint8_t", "format": "NDARRAY", - "shape": [dim for dim in img.shape] + "shape": [dim for dim in image.shape] } ioData = service.create_context({"image": image_ctx}) diff --git a/examples/python/processing_blocks/recognition_demo.py b/examples/python/processing_blocks/recognition_demo.py index b288be9..a0f5fb6 100644 --- a/examples/python/processing_blocks/recognition_demo.py +++ b/examples/python/processing_blocks/recognition_demo.py @@ -14,9 +14,11 @@ def help_message(): " [--input_image2 ]" \ " [--sdk_path ]" \ " [--window ]" \ - " [--output ]" + " [--output ]" \ + " [--modification <30, 50, 100, 1000>]" print(message) + def get_crop(obj, image): img_w = image.shape[1] img_h = image.shape[0] @@ -32,19 +34,22 @@ def get_crop(obj, image): return crop_image + def draw_bbox(obj, img, output, color=(0, 255, 0)): rect = obj["bbox"] if output == "yes": - print(f"BBox coordinates: {int(rect[0].get_value() * img.shape[1])}, {int(rect[1].get_value() * img.shape[0])}, {(int(rect[2].get_value() * img.shape[1]))}, {int(rect[3].get_value() * img.shape[0])}") + print( + f"BBox coordinates: {int(rect[0].get_value() * img.shape[1])}, {int(rect[1].get_value() * img.shape[0])}, {(int(rect[2].get_value() * img.shape[1]))}, {int(rect[3].get_value() * img.shape[0])}") return cv2.rectangle(img, (int(rect[0].get_value() * img.shape[1]), int(rect[1].get_value() * img.shape[0])), (int(rect[2].get_value() * img.shape[1]), int(rect[3].get_value() * img.shape[0])), color, 2) + def get_faces(service, face_detector, face_fitter, img): input_image: np.ndarray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) height, width, _ = input_image.shape imgCtx = {"blob": input_image.tobytes(), "dtype": "uint8_t", "format": "NDARRAY", - "shape": [dim for dim in input_image.shape]} + "shape": [dim for dim in input_image.shape]} ioData = service.create_context({"image": imgCtx}) face_detector(ioData) @@ -52,17 +57,18 @@ def get_faces(service, face_detector, face_fitter, img): return ioData -def recognition_demo(sdk_path, img_path_1, img_path_2, window, output, mode): + +def recognition_demo(sdk_path, img_path_1, img_path_2, window, output, mode, modification): service = Service.create_service(sdk_path) if not os.path.exists(img_path_1): raise Exception(f"not exist file {img_path_1}") if not os.path.exists(img_path_2): raise Exception(f"not exist file {img_path_2}") - face_detector = service.create_processing_block({"unit_type": "FACE_DETECTOR", "confidence_threshold": 0.3}) - face_recognizer = service.create_processing_block({"unit_type": "FACE_RECOGNIZER"}) - face_fitter = service.create_processing_block({"unit_type": "FACE_FITTER", "modification": "tddfa"}) - matcher = service.create_processing_block({"unit_type": "MATCHER_MODULE"}) + face_detector = service.create_processing_block( + {"unit_type": "FACE_DETECTOR", "modification": "uld", "confidence_threshold": 0.6, "precision_level": 3}) + face_template_extractor = service.create_processing_block({"unit_type": "FACE_TEMPLATE_EXTRACTOR", "modification": modification}) + face_fitter = service.create_processing_block({"unit_type": "FACE_FITTER", "modification": "tddfa_faster"}) img1: np.ndarray = cv2.imread(img_path_1, cv2.IMREAD_COLOR) img2: np.ndarray = cv2.imread(img_path_2, cv2.IMREAD_COLOR) @@ -81,19 +87,24 @@ def recognition_demo(sdk_path, img_path_1, img_path_2, window, output, mode): if len(ioData2["objects"]) != 1: raise Exception(f"many faces on {img_path_2} image") - face_recognizer(ioData1) - face_recognizer(ioData2) + face_template_extractor(ioData1) + face_template_extractor(ioData2) if mode == "verify": - matcherData = service.create_context({"verification": {"objects": []}}) - matcherData["verification"]["objects"].push_back(ioData1["objects"][0]) - matcherData["verification"]["objects"].push_back(ioData2["objects"][0]) + verification_module = service.create_processing_block( + {"unit_type": "VERIFICATION_MODULE", "modification": modification}) - matcher(matcherData) - verdict = matcherData["verification"]["result"]["verdict"].get_value() - distance = matcherData["verification"]["result"]["distance"].get_value() + verification_data = service.create_context({ + "template1": ioData1["objects"][0]["template"], + "template2": ioData2["objects"][0]["template"] + }) - color = (0, 255, 0) if verdict else (0, 0, 255) + verification_module(verification_data) + + distance = verification_data["result"]["distance"].get_value() + score = verification_data["result"]["score"].get_value() + + color = (0, 255, 0) if score >= 0.85 else (0, 0, 255) img1 = draw_bbox(ioData1["objects"][0], img1, output, color) img2 = draw_bbox(ioData2["objects"][0], img2, output, color) @@ -104,33 +115,44 @@ def recognition_demo(sdk_path, img_path_1, img_path_2, window, output, mode): crop2 = cv2.resize(crop2, (320, 480)) print(f"distance = {distance}") - print(f"verdict = {verdict}") + print(f"score = {score}") + print(f"verdict = {score >= 0.85}") if window == "yes": cv2.imshow("result", np.concatenate((crop1, crop2), axis=1)) cv2.waitKey(0) cv2.destroyAllWindows() else: - matcherData = service.create_context({"search": {"template_index": [], "queries": []}}) - matcherData["search"]["knn"] = 1 - matcherData["search"]["type_index"] = "array" + matcher_module = service.create_processing_block({"unit_type": "MATCHER_MODULE", "modification": modification}) + template_index = service.create_processing_block({"unit_type": "TEMPLATE_INDEX", "modification": modification}) + templates = service.create_context([]) - for obj in ioData1["objects"]: - matcherData["search"]["template_index"].push_back(obj) - matcherData["search"]["queries"].push_back(ioData2["objects"][0]) - matcher(matcherData) + for object in ioData1["objects"]: + templates.push_back(object["template"]) - distance = matcherData["search"]["results"][0]["distance"].get_value() - find_index = matcherData["search"]["results"][0]["index"].get_value() - verdict = matcherData["search"]["results"][0]["verdict"].get_value() + ioData1["templates"] = templates + + template_index(ioData1) + + matcher_data = service.create_context( + {"knn": 1, "template_index": ioData1["template_index"], "queries": []}) + + matcher_data["queries"].push_back(ioData2["objects"][0]) + + matcher_module(matcher_data) + + find_index = matcher_data["results"][0]["index"].get_value() + distance = matcher_data["results"][0]["distance"].get_value() + score = matcher_data["results"][0]["score"].get_value() - print(f"distance = {distance}") print(f"index = {find_index}") - print(f"verdict = {verdict}") + print(f"distance = {distance}") + print(f"score = {score}") + print(f"verdict = {score >= 0.85}") for i in range(len(ioData1["objects"])): obj = ioData1["objects"][i] - color = (0, 255, 0) if i == find_index and verdict else (0, 0, 255) + color = (0, 255, 0) if i == find_index and score >= 0.85 else (0, 0, 255) draw_bbox(obj, img1, output, color) if window == "yes": @@ -138,6 +160,7 @@ def recognition_demo(sdk_path, img_path_1, img_path_2, window, output, mode): cv2.waitKey(0) cv2.destroyAllWindows() + def parse_args(): parser = argparse.ArgumentParser(description='Recognition Demo') parser.add_argument('--mode', default="verify", type=str) @@ -146,6 +169,7 @@ def parse_args(): parser.add_argument('--sdk_path', default="../../../", type=str) parser.add_argument('--window', default="yes", type=str) parser.add_argument('--output', default="no", type=str) + parser.add_argument("--modification", default="1000", type=str) return parser.parse_args() @@ -158,7 +182,8 @@ def parse_args(): try: if args.mode != "verify" and args.mode != "identify": - raise Exception("there is no modifier " + args.mode) - recognition_demo(args.sdk_path, args.input_image, args.input_image2, args.window, args.output, args.mode) + raise Exception("there is no modifier " + args.mode) + recognition_demo(args.sdk_path, args.input_image, args.input_image2, args.window, args.output, args.mode, + args.modification) except Exception as e: - print(f"Error: {e}.") \ No newline at end of file + print(f"Error: {e}.") diff --git a/flutter/face_sdk_3divi/lib/api/context.dart b/flutter/face_sdk_3divi/lib/api/context.dart index 3964586..7c94d06 100644 --- a/flutter/face_sdk_3divi/lib/api/context.dart +++ b/flutter/face_sdk_3divi/lib/api/context.dart @@ -4,7 +4,7 @@ part of face_sdk_3divi; class Context extends _ComplexObject { Context(DynamicLibrary dll_handle, Pointer impl) : super(dll_handle, impl) { - if (impl is Pointer) { + if (_impl.address == Pointer.fromAddress(0).address) { var constructor = dll_handle.lookupFunction< _Context_Construct_c, _Context_Constructor_dart>(_context_namespace + 'create'); @@ -71,7 +71,9 @@ class Context extends _ComplexObject { } void placeValues(data) { - if (data is Map) { + if (data is Context){ + this._insertContext(data); + }else if (data is Map) { this._insertDict(data); } else if ((data is List) & (data is! Uint8List)) { this._insertList(data); @@ -88,6 +90,21 @@ class Context extends _ComplexObject { } } + void _insertContext(Context data){ + if (data != this){ + var exception = _getException(); + + final constructor = _dll_handle.lookupFunction< + _Context_copy_c, + _Context_copy_dart> + (_context_namespace + 'copy'); + + constructor(data._impl, this._impl, exception); + + tdvCheckException(exception, _dll_handle); + } + } + void _insertDict(Map data) { data.forEach((k, v) { this._getOrInsertByKey(k).placeValues(v); diff --git a/flutter/face_sdk_3divi/lib/api/dll_binds.dart b/flutter/face_sdk_3divi/lib/api/dll_binds.dart index df2874f..d615dfa 100644 --- a/flutter/face_sdk_3divi/lib/api/dll_binds.dart +++ b/flutter/face_sdk_3divi/lib/api/dll_binds.dart @@ -299,6 +299,16 @@ typedef _Context_pushBack_dart = void Function( bool, Pointer>); +typedef _Context_copy_c = Void Function( + Pointer, + Pointer, + Pointer>); + +typedef _Context_copy_dart = void Function( + Pointer, + Pointer, + Pointer>); + typedef _Context_putStr_c = Void Function( Pointer, Pointer, diff --git a/flutter/face_sdk_3divi/lib/api/face_attributes_estimator.dart b/flutter/face_sdk_3divi/lib/api/face_attributes_estimator.dart new file mode 100644 index 0000000..38f815d --- /dev/null +++ b/flutter/face_sdk_3divi/lib/api/face_attributes_estimator.dart @@ -0,0 +1,52 @@ +part of face_sdk_3divi; + +class EyesOpennessResult { + bool leftEyeVerdict; + bool rightEyeVerdict; + + double leftEyeScore; + double rightEyeScore; + + EyesOpennessResult(this.leftEyeVerdict, this.rightEyeVerdict, this.leftEyeScore, this.rightEyeScore); +} + +class FaceAttributesEstimator extends _ComplexObject { + FaceAttributesEstimator(DynamicLibrary dll_handle, Pointer impl) : + super(dll_handle, impl); + + /// Create a template from detected face [sample] (from Capturer of VideoWorker). + EyesOpennessResult estimateEyesOpenness(final RawSample sample){ + final process = _dll_handle.lookupFunction<_FaceAttributesEstimator_estimateEyesOpenness, _FaceAttributesEstimator_estimateEyesOpenness> + (_c_namespace + 'FaceAttributesEstimator_estimateEyesOpenness'); + + Pointer leftVerdictPtr = malloc.allocate(sizeOf>()); + Pointer rightVerdictPtr = malloc.allocate(sizeOf>()); + Pointer leftScorePtr = malloc.allocate(sizeOf>()); + Pointer rightScorePtr = malloc.allocate(sizeOf>()); + + final exception = _getException(); + process( + _impl, + sample._impl, + leftVerdictPtr, + rightVerdictPtr, + leftScorePtr, + rightScorePtr, + exception); + + checkException(exception, _dll_handle); + + int leftVerdictValue = leftVerdictPtr.value; + int rightVerdictValue = rightVerdictPtr.value; + + double leftScoreValue = leftScorePtr.value; + double rightScoreValue = rightScorePtr.value; + + malloc.free(leftVerdictPtr); + malloc.free(rightVerdictPtr); + malloc.free(leftScorePtr); + malloc.free(rightScorePtr); + + return EyesOpennessResult(leftVerdictValue == 1, rightVerdictValue == 1, leftScoreValue, rightScoreValue); + } +} \ No newline at end of file diff --git a/flutter/face_sdk_3divi/lib/api/processing_block.dart b/flutter/face_sdk_3divi/lib/api/processing_block.dart index 3bdffa6..2404f1f 100644 --- a/flutter/face_sdk_3divi/lib/api/processing_block.dart +++ b/flutter/face_sdk_3divi/lib/api/processing_block.dart @@ -10,7 +10,7 @@ class ProcessingBlock extends _ComplexObject { final destructor = _dll_handle.lookupFunction< _ProcessingBlock_destroyProcessingBlock_c, _ProcessingBlock_destroyProcessingBlock_dart> - ('TDVProcessingBlock_destroy'); + ('TDVProcessingBlock_destroyBlock'); destructor(_impl, exception); @@ -50,4 +50,4 @@ class ProcessingBlock extends _ComplexObject { return ctx; } -} \ No newline at end of file +} diff --git a/flutter/face_sdk_3divi/lib/api/recognizer.dart b/flutter/face_sdk_3divi/lib/api/recognizer.dart index c54b981..4ba157d 100644 --- a/flutter/face_sdk_3divi/lib/api/recognizer.dart +++ b/flutter/face_sdk_3divi/lib/api/recognizer.dart @@ -95,11 +95,12 @@ class Recognizer extends _ComplexObject { get(_impl, template1._impl, template2._impl, distance, far, frr, score, exception); checkException(exception, _dll_handle); + MatchResult result = MatchResult(distance.value, far.value, frr.value, score.value); malloc.free(distance); malloc.free(score); malloc.free(frr); malloc.free(far); - return MatchResult(distance.value, far.value, frr.value, score.value); + return result; } ///Load the template.
///The format is platform-independent.
@@ -133,7 +134,7 @@ class Recognizer extends _ComplexObject { malloc.free(_templatePointerStructure[0]); malloc.free(_templatePointerStructure[1]); malloc.free(_templatePointerStructure); - malloc.free(byteCount); + // byteCount not free return Template(_dll_handle, templPointer); } diff --git a/include/api/Service.h b/include/api/Service.h index 551ee98..19c7026 100644 --- a/include/api/Service.h +++ b/include/api/Service.h @@ -13,7 +13,10 @@ class Service: public pbio::FacerecService { public: +#ifndef WITHOUT_PROCESSING_BLOCK ProcessingBlock createProcessingBlock(const Context& ctx); +#endif + static Service createService(std::string path_to_dir); private: typedef LightSmartPtr::tPtr DHPtr; @@ -25,12 +28,14 @@ class Service: public pbio::FacerecService std::string path_to_dir; }; +#ifndef WITHOUT_PROCESSING_BLOCK inline ProcessingBlock Service::createProcessingBlock(const Context& ctx) { Context new_ctx = ctx; new_ctx["@sdk_path"] = path_to_dir; return FacerecService::createProcessingBlock(new_ctx); } +#endif inline Service Service::createService(std::string path_to_dir) { diff --git a/include/facerec/libfacerec.h b/include/facerec/libfacerec.h index c085123..0acdfaa 100644 --- a/include/facerec/libfacerec.h +++ b/include/facerec/libfacerec.h @@ -11,9 +11,9 @@ #ifndef FACEREC_LIBFACEREC_H_ #define FACEREC_LIBFACEREC_H_ -#define LIBFACEREC_VERSION "3.19.01" +#define LIBFACEREC_VERSION "3.20.00" -#define LIBFACEREC_VERSION_HEX 0x031901 +#define LIBFACEREC_VERSION_HEX 0x032000 #include "../pbio/FacerecService.h" diff --git a/include/pbio/Context.h b/include/pbio/Context.h index 3427585..0ba7727 100644 --- a/include/pbio/Context.h +++ b/include/pbio/Context.h @@ -1,6 +1,8 @@ #ifndef CONTEXT_H #define CONTEXT_H +#ifndef WITHOUT_PROCESSING_BLOCK + #if defined(_WIN32) #define NOMINMAX #endif @@ -686,4 +688,5 @@ inline void putImage(Context& ctx, unsigned char* data, size_t height, size_t wi } } +#endif // WITHOUT_PROCESSING_BLOCK #endif // CONTEXT_H diff --git a/include/pbio/FacerecService.h b/include/pbio/FacerecService.h index 1b38cde..7b88e24 100644 --- a/include/pbio/FacerecService.h +++ b/include/pbio/FacerecService.h @@ -47,8 +47,11 @@ #include "StructStorage.h" #include "Config.h" #include "ProcessingUnit.h" + +#ifndef WITHOUT_PROCESSING_BLOCK #include "Context.h" #include "ProcessingBlock.h" +#endif namespace pbio { @@ -1083,9 +1086,11 @@ class FacerecService : ComplexObject const int block_type, const char* serializedConfig) const; +#ifndef WITHOUT_PROCESSING_BLOCK Context createContext() const; ProcessingBlock createProcessingBlock(const Context& config) const; +#endif //! @endcond @@ -2067,6 +2072,7 @@ ProcessingUnit::Ptr FacerecService::createProcessingUnit( return ProcessingUnit::Ptr::make(_dll_handle, block_type, serializedConfig); } +#ifndef WITHOUT_PROCESSING_BLOCK inline Context FacerecService::createContext() const { return Context(_dll_handle); @@ -2076,6 +2082,7 @@ inline ProcessingBlock FacerecService::createProcessingBlock(const Context& conf { return ProcessingBlock(_impl, _dll_handle, config); } +#endif //! @endcond diff --git a/include/pbio/ProcessingBlock.h b/include/pbio/ProcessingBlock.h index 4ee2035..88ff3a5 100644 --- a/include/pbio/ProcessingBlock.h +++ b/include/pbio/ProcessingBlock.h @@ -9,6 +9,8 @@ #ifndef PROCESSINGBLOCK_H #define PROCESSINGBLOCK_H +#ifndef WITHOUT_PROCESSING_BLOCK + #include "Context.h" #include "DllHandle.h" #include "ExceptionCheck.h" @@ -81,4 +83,5 @@ class ProcessingBlock }; } +#endif // WITHOUT_PROCESSING_BLOCK #endif // PROCESSINGBLOCK_H diff --git a/include/pbio/RawSample.h b/include/pbio/RawSample.h index f579352..fd86a12 100644 --- a/include/pbio/RawSample.h +++ b/include/pbio/RawSample.h @@ -16,7 +16,9 @@ #include "ExceptionCheck.h" #include "Point.h" #include "RawImage.h" +#ifndef WITHOUT_PROCESSING_BLOCK #include "Context.h" +#endif #include "SmartPtr.h" #include "stl_wraps_impls/WrapOStreamImpl.h" #include "stl_wraps_impls/WrapVectorImpl.h" @@ -859,7 +861,9 @@ class RawSample : public ComplexObject */ RawSample::Ptr downscaleToPreferredSize() const; +#ifndef WITHOUT_PROCESSING_BLOCK Context toContext() const; +#endif //! @cond IGNORED void* getPtr() const; @@ -1345,6 +1349,7 @@ RawSample::Ptr RawSample::downscaleToPreferredSize() const return RawSample::Ptr::make(_dll_handle, raw_sampl_impl); } +#ifndef WITHOUT_PROCESSING_BLOCK inline Context RawSample::toContext() const { @@ -1358,7 +1363,7 @@ Context RawSample::toContext() const return Context(_dll_handle, handle, false); } - +#endif inline diff --git a/python_api/face_sdk_3divi/modules/context.py b/python_api/face_sdk_3divi/modules/context.py index cf1b96f..81b0d99 100644 --- a/python_api/face_sdk_3divi/modules/context.py +++ b/python_api/face_sdk_3divi/modules/context.py @@ -38,7 +38,7 @@ def __getitem__(self, key): @dispatch(str) def __getitem__(self, key): - return self.__getByKey(key) + return self.__getOrInsertByKey(key) def __setitem__(self, key, value): return self.__getOrInsertByKey(key).parser(value) @@ -241,6 +241,15 @@ def parser(self, ctx: bool): def parser(self, ctx: bytes): self.__setDataPtr(ctx) + @dispatch(object) + def parser(self, ctx): + assert isinstance(ctx, Context), "the object does not belong to the base types or Context" + if ctx is not self: + if self.__weak_: + exception = make_exception() + self._dll_handle.copy(ctx._impl, self._impl, exception) + check_exception(exception, self._dll_handle) + def __get_keys(self) -> list: exception = make_exception() diff --git a/python_api/setup.py b/python_api/setup.py index 74a1484..9024a94 100644 --- a/python_api/setup.py +++ b/python_api/setup.py @@ -1,7 +1,7 @@ from setuptools import setup, find_packages from os import path -__version__ = '3.19.1' +__version__ = '3.20.0' __author__ = '3DiVi' __url__ = 'https://face.3divi.com/'