diff --git a/CHANGELOG b/CHANGELOG
index 5aea672..48619ae 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,16 @@
-Changelog (v0.7.6.2)
- - 3D Body Pose Keypoint Tracking
- - Quality improvement in accuracy and stability
- - Property NvAR_Parameter_Config(NVAR_MODE) is now NvAR_Parameter_Config(Mode)
- - Migrated to TensorRT 8.0.1.6
- - Migrated to CUDA 11.3u1
+Changelog (v0.8.1.0)
+--------------------
+ - NEW! Eye Contact feature: an AI algorithm to help users keep their gaze engaged in video communication. The feature jointly estimates a user’s gaze direction and redirects it to frontal in video sequences.
+ - NEW! Face Expression Estimation (Beta) feature estimates facial expression coefficients From the video or the provided facial landmarks. ExpressionApp is added to demonstrate the new Face Expressions feature.
+ - NEW! Default face model for the Face 3D mesh and tracking feature, face_model2.nvf, now ships with the SDK. The old SFM based face_model0.nvf is no longer required.
+ - 3D Body Pose Estimation:
+ - NEW! Added the support for Multi Person Tracking. This feature is supported by the Windows SDK only.
+ - FocalLength is now a NvAR_Parameter_Input. Users can now change FocalLength at every NvAR_Run() without having to call NvAR_Load().
+ - The reference pose returned by the feature has been updated
+ - Facial landmark estimation
+ - NEW! There are now 2 modalities for facial landmark tracking: {0,1} -> {performance, quality}. Make sure to choose the preferred mode for your application. The default for face mesh fitting and expression estimation are 1, and the others are 0.
+ - Head Pose output from the NvAR_Feature_LandmarkDetection feature is now in the OpenGL convention. Changed from X-back(towards the camera), Y-right, Z-down to X-right, Y-up, Z-back(towards the camera).
+ - The sample apps now show the headpose in the OpenGL convention. The color coding of the axes is Red - X , Green - Y, Blue - Z
+ - NvCVImage_Transfer() now sets alpha to 255 or 1.0f when doing RGB -> RGBA. NvCVImage_CompositeRect() has a premultiplied alpha mode added
+ - Migrated to TensorRT 8.4.2.2
+ - Migrated to CUDA 11.6u1
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 154af54..f93adab 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -11,19 +11,95 @@ set(CMAKE_CONFIGURATION_TYPES "Release")
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
-set(CMAKE_CXX_STANDARD 14)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
-set(CMAKE_CXX_EXTENSIONS OFF)
+if(MSVC)
+ set(CMAKE_CXX_STANDARD 14)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
-set(SDK_INCLUDES_PATH ${CMAKE_CURRENT_SOURCE_DIR}/nvar/include)
+ set(SDK_INCLUDES_PATH ${CMAKE_CURRENT_SOURCE_DIR}/nvar/include)
-# Add target for nvARPose
-add_library(nvARPose INTERFACE)
-target_include_directories(nvARPose INTERFACE ${SDK_INCLUDES_PATH})
+ # Add target for nvARPose
+ add_library(nvARPose INTERFACE)
+ target_include_directories(nvARPose INTERFACE ${SDK_INCLUDES_PATH})
-# Add target for NVCVImage
-add_library(NVCVImage INTERFACE)
-target_include_directories(NVCVImage INTERFACE ${SDK_INCLUDES_PATH})
+ # Add target for NVCVImage
+ add_library(NVCVImage INTERFACE)
+ target_include_directories(NVCVImage INTERFACE ${SDK_INCLUDES_PATH})
+
+else()
+ set(CMAKE_CXX_STANDARD 14)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
+
+ # Add target for ARSDK
+ add_library(nvARPose INTERFACE)
+
+ # found in different locations depending on type of package
+ find_path(ARSDK_INCLUDES
+ NAMES nvAR.h
+ PATHS
+ /usr/local/ARSDK/include
+ /usr/include/x86_64-linux-gnu
+ /usr/include
+ ${CMAKE_CURRENT_SOURCE_DIR}/nvar/include
+ REQUIRED
+ )
+
+ target_include_directories(nvARPose INTERFACE ${ARSDK_INCLUDES})
+
+ find_library(ARSDK_LIB
+ NAMES libnvARPose.so
+ PATHS
+ /usr/local/ARSDK/lib
+ /usr/lib/x86_64-linux-gnu
+ /usr/lib64
+ /usr/lib
+ ${CMAKE_CURRENT_SOURCE_DIR}/bin
+ REQUIRED
+ NO_DEFAULT_PATH)
+
+ target_link_libraries(nvARPose INTERFACE "${ARSDK_LIB}")
+
+ message(STATUS "ARSDK_LIB: ${ARSDK_LIB}")
+
+
+ # Add target for NVCVImage
+ add_library(NVCVImage INTERFACE)
+
+ # found in different locations depending on type of package
+ find_path(NVCVImage_INCLUDES
+ NAMES nvCVImage.h
+ PATHS
+ /usr/local/ARSDK/include
+ /usr/include/x86_64-linux-gnu
+ /usr/include
+ ${CMAKE_CURRENT_SOURCE_DIR}/nvar/include
+ REQUIRED
+ )
+
+ target_include_directories(NVCVImage INTERFACE ${NVCVImage_INCLUDES})
+
+
+ find_library(NVCVImage_LIB
+ NAMES libNVCVImage.so
+ PATHS
+ /usr/local/ARSDK/lib
+ /usr/lib/x86_64-linux-gnu
+ /usr/lib64
+ /usr/lib
+ ${CMAKE_CURRENT_SOURCE_DIR}/bin
+ REQUIRED
+ NO_DEFAULT_PATH)
+
+ target_link_libraries(NVCVImage INTERFACE "${NVCVImage_LIB}")
+
+ message(STATUS "NVCVImage_LIB: ${NVCVImage_LIB}")
+ message(STATUS "NVCVImage_INCLUDES_PATH: ${NVCVImage_INCLUDES}")
+
+
+endif()
add_definitions(-DNOMINMAX -DWIN32_LEAN_AND_MEAN)
diff --git a/LICENSE b/LICENSE
index 58871f4..93bd1cc 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2021 NVIDIA Corporation
+Copyright (c) 2020 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/README.MD b/README.MD
index 141c330..58b23bd 100644
--- a/README.MD
+++ b/README.MD
@@ -1,29 +1,37 @@
# README
## NVIDIA MAXINE AR SDK: API Source Code and Sample Applications
-NVIDIA MAXINE AR SDK enables real-time modeling and tracking of human faces from video. The SDK is powered by NVIDIA graphics processing units (GPUs) with Tensor Cores, and as a result, the algorithm throughput is greatly accelerated, and latency is reduced.
+NVIDIA MAXINE AR SDK offers AI-based, real-time 3D face tracking and modeling, as well as body pose estimation based on a standard web camera feed. Developers can create unique AR effects such as overlaying 3D content on a face, driving 3D characters and virtual interactions in real time. The SDK is powered by NVIDIA graphics processing units (GPUs) with Tensor Cores, and as a result, the algorithm throughput is greatly accelerated, and latency is reduced.
The SDK has the following features:
-- **Face detection and tracking**, which detects, localizes, and tracks human faces in images or videos by using bounding boxes.
-- **Facial landmark detection and tracking**, which predicts and tracks the pixel locations of human facial landmark points and head poses in images or videos. It can predict 68 and 126 landmark points. The 68 detected facial landmarks follow the _Multi-PIE 68 point mark-ups_ information in [facial point annotations](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/). The 126 landmark points detector can predict more points on the cheeks, the eyes, and on laugh lines.
-- **Face 3D mesh and tracking**, which reconstructs and tracks a 3D human face and its head pose from the provided facial landmarks.
-- **3D Body Pose and tracking**, which predicts and tracks the 3D human pose from images or videos. It predicts 34 keypoints of body pose in 2D and 3D.
+- **Face tracking**, which detects, localizes, and tracks human faces in images or videos by using bounding boxes.
+- **Face landmark tracking**, which predicts and tracks the pixel locations of human facial landmark points using 68 or 126 landmark points. The 68 detected facial landmarks follow the Multi-PIE 68 point mark-ups information in [facial point annotations](https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/). The 126 landmark points detector can predict more points on the cheeks, the eyes, and on laugh lines. Additionally, it tracks head pose and facial deformation due to head movement and expression in three degrees of freedom in real time.
+- **Face mesh**, which reconstructs and tracks a human face via a 3D mesh, as well as its head pose, from the provided facial landmarks.
+- **Body Pose Estimation**, which predicts and tracks 34 key points of the human body, with joint angles, in 2D and 3D. It also supports multi-person tracking.
+- **Eye contact**, which simulates eye contact by estimating and aligning gaze with the camera to enhance engagement in video communication.
+- **Face Expression Estimation**, which estimates face expression (blendshape) coefficients from the provided facial landmarks.
-
-
+
+
-
-
+
+
-
+
+
+
+
+
-The SDK provides two sample applications that demonstrate the features listed above in real time by using a webcam or offline videos.
-- **FaceTrack App** which demonstrates the face tracking, landmark tracking and 3D mesh tracking features.
-- **BodyTrack App** which demonstrates the 3D Body Pose tracking feature.
+The SDK provides four sample applications that demonstrate the features listed above in real time by using a webcam or offline videos.
+- **FaceTrack App** which demonstrates the face tracking, landmark tracking and face mesh tracking features.
+- **BodyTrack App** which demonstrates the Body Pose estimation feature.
+- **GazeRedirect App** which demonstrates the Eye Contact feature.
+- **ExpressionApp** which demonstrates the Face Expression Estimation feature.
NVIDIA MAXINE AR SDK is distributed in the following parts:
@@ -33,14 +41,12 @@ NVIDIA MAXINE AR SDK is distributed in the following parts:
Please refer to [SDK System guide](https://docs.nvidia.com/deeplearning/maxine/ar-sdk-system-guide/index.html) for configuring and integrating the SDK, compiling and running the sample applications. Please visit the [NVIDIA MAXINE AR SDK](https://developer.nvidia.com/maxine-getting-started) webpage for more information about the SDK.
## System requirements
-The SDK is supported on NVIDIA GPUs that are based on the NVIDIA® Turing™ or Ampere™ architecture and have Tensor Cores.
+The SDK is supported on NVIDIA GPUs that are based on the NVIDIA® Turing™, Ampere™ or Ada™ architecture and have Tensor Cores.
* Windows OS supported: 64-bit Windows 10 or later
* Microsoft Visual Studio: 2017 (MSVC15.0) or later
* CMake: v3.12 or later
-* NVIDIA Graphics Driver for Windows: 465.89 or later
-* NVIDIA CUDA Toolkit: 11.3.1
-* NVIDIA TensorRT: 8.0.1.6
+* NVIDIA Graphics Driver for Windows: 511.65 or later
## NVIDIA MAXINE Branding Guidelines
If you integrate an NVIDIA MAXINE SDK within your product, please follow the required branding guidelines that are available [here](
@@ -64,7 +70,7 @@ The open source repository includes the source code to build the sample applicat
* To complete configuring the Visual Studio solution file, click Finish.
* To generate the Visual Studio Solution file, click Generate.
* Verify that the build folder contains the NvAR_SDK.sln file.
-3. Use Visual Studio to generate the FaceTrack.exe or BodyTrack.exe file from the NvAR_SDK.sln file.
+3. Use Visual Studio to generate the FaceTrack.exe, BodyTrack.exe, GazeRedirect.exe or ExpressionApp.exe file from the NvAR_SDK.sln file.
* In CMake, to open Visual Studio, click Open Project.
* In Visual Studio, select Build > Build Solution.
diff --git a/nvar/include/nvAR_defs.h b/nvar/include/nvAR_defs.h
index 769e9be..5d8b169 100644
--- a/nvar/include/nvAR_defs.h
+++ b/nvar/include/nvAR_defs.h
@@ -38,6 +38,7 @@
#define NvAR_API
#endif // OS dependencies
+// TODO: Change the representation to x,y,z instead of array
typedef struct NvAR_Vector3f
{
float vec[3];
@@ -57,12 +58,24 @@ typedef struct NvAR_BBoxes {
uint8_t max_boxes;
} NvAR_BBoxes;
+typedef struct NvAR_TrackingBBox {
+ NvAR_Rect bbox;
+ uint16_t tracking_id;
+} NvAR_TrackingBBox;
+
+typedef struct NvAR_TrackingBBoxes {
+ NvAR_TrackingBBox *boxes;
+ uint8_t num_boxes;
+ uint8_t max_boxes;
+} NvAR_TrackingBBoxes;
+
typedef struct NvAR_FaceMesh {
NvAR_Vector3f *vertices; ///< Mesh 3D vertex positions.
size_t num_vertices;
- NvAR_Vector3u16 *tvi; ///< Mesh triangle's vertex indices
- size_t num_tri_idx;
+ NvAR_Vector3u16 *tvi; ///< Mesh triangle's vertex indices
+ size_t num_triangles; ///< The number of triangles (previously num_tri_idx)
} NvAR_FaceMesh;
+#define num_tri_idx num_triangles ///< num_tri_idx is confusing and deprecated
typedef struct NvAR_Frustum {
float left;
@@ -95,124 +108,222 @@ typedef struct NvAR_RenderingParams {
// Parameters provided by client application
typedef const char* NvAR_FeatureID;
-#define NvAR_Feature_FaceBoxDetection "FaceBoxDetection"
-#define NvAR_Feature_FaceDetection "FaceDetection" // deprecated in favor of FaceBox
-#define NvAR_Feature_LandmarkDetection "LandmarkDetection"
-#define NvAR_Feature_Face3DReconstruction "Face3DReconstruction"
-#define NvAR_Feature_BodyDetection "BodyDetection"
-#define NvAR_Feature_BodyPoseEstimation "BodyPoseEstimation"
+
+#define NvAR_Feature_FaceBoxDetection "FaceBoxDetection" //
+#define NvAR_Feature_FaceDetection "FaceDetection" // // deprecated in favor of FaceBox
+#define NvAR_Feature_LandmarkDetection "LandmarkDetection" //
+#define NvAR_Feature_Face3DReconstruction "Face3DReconstruction" //
+#define NvAR_Feature_BodyDetection "BodyDetection" //
+#define NvAR_Feature_BodyPoseEstimation "BodyPoseEstimation" //
+#define NvAR_Feature_GazeRedirection "GazeRedirection" //
+#define NvAR_Feature_FaceExpressions "FaceExpressions" //
+#define NvAR_Feature_LivePortrait "LivePortrait" //
+#define NvAR_Feature_FrameSelection "FrameSelection" // !FrameSelection!
#define NvAR_Parameter_Input(Name) "NvAR_Parameter_Input_" #Name
#define NvAR_Parameter_Output(Name) "NvAR_Parameter_Output_" #Name
#define NvAR_Parameter_Config(Name) "NvAR_Parameter_Config_" #Name
#define NvAR_Parameter_InOut(Name) "NvAR_Parameter_InOut_" #Name
+#define NVAR_TEMPORAL_FILTER_FACE_BOX (1 << 0) // 0x001
+#define NVAR_TEMPORAL_FILTER_FACIAL_LANDMARKS (1 << 1) // 0x002
+#define NVAR_TEMPORAL_FILTER_FACE_ROTATIONAL_POSE (1 << 2) // 0x004
+#define NVAR_TEMPORAL_FILTER_FACIAL_EXPRESSIONS (1 << 4) // 0x010
+#define NVAR_TEMPORAL_FILTER_FACIAL_GAZE (1 << 5) // 0x020
+#define NVAR_TEMPORAL_FILTER_ENHANCE_EXPRESSIONS (1 << 8) // 0x100
+
/*
Parameters supported by each NvAR_FeatureID
-*******NvAR_Feature_FaceDetection*******
-Config:
-NvAR_Parameter_Config(FeatureDescription)
-NvAR_Parameter_Config(CUDAStream)
-NvAR_Parameter_Config(TRTModelDir)
-NvAR_Parameter_Config(Temporal)
-
-Input:
-NvAR_Parameter_Input(Image)
-
-Output:
-NvAR_Parameter_Output(BoundingBoxes)
-NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
-
-*******NvAR_Feature_LandmarkDetection*******
-Config:
-NvAR_Parameter_Config(FeatureDescription)
-NvAR_Parameter_Config(CUDAStream)
-NvAR_Parameter_Config(ModelDir)
-NvAR_Parameter_Config(BatchSize)
-NvAR_Parameter_Config(Landmarks_Size)
-NvAR_Parameter_Config(LandmarksConfidence_Size)
-NvAR_Parameter_Config(Temporal)
-
-Input:
-NvAR_Parameter_Input(Image)
-NvAR_Parameter_Input(BoundingBoxes) - OPTIONAL
-
-Output:
-NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL
-NvAR_Parameter_Output(Landmarks)
-NvAR_Parameter_Output(Pose) - OPTIONAL
-NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL
-
-*******NvAR_Feature_Face3DReconstruction*******
-Config:
-NvAR_Parameter_Config(FeatureDescription)
-NvAR_Parameter_Config(ModelDir)
-NvAR_Parameter_Config(Landmarks_Size)
-NvAR_Parameter_Config(CUDAStream) -OPTIONAL
-NvAR_Parameter_Config(Temporal) - OPTIONAL
-NvAR_Parameter_Config(ModelName) - OPTIONAL
-NvAR_Parameter_Config(GPU) - OPTIONAL
-NvAR_Parameter_Config(VertexCount) - QUERY
-NvAR_Parameter_Config(TriangleCount) - QUERY
-NvAR_Parameter_Config(ExpressionCount) - QUERY
-NvAR_Parameter_Config(ShapeEigenValueCount) - QUERY
-
-Input:
-NvAR_Parameter_Input(Width)
-NvAR_Parameter_Input(Height)
-NvAR_Parameter_Input(Image) - OPTIONAL
-NvAR_Parameter_Input(Landmarks) - OPTIONAL
-
-Output:
-NvAR_Parameter_Output(FaceMesh)
-NvAR_Parameter_Output(RenderingParams)
-NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL
-NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
-NvAR_Parameter_Output(Landmarks) - OPTIONAL
-NvAR_Parameter_Output(Pose) - OPTIONAL
-NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL
-NvAR_Parameter_Output(ExpressionCoefficients) - OPTIONAL
-NvAR_Parameter_Output(ShapeEigenValues) - OPTIONAL
-
-*******NvAR_Feature_BodyDetection*******
-Config:
-NvAR_Parameter_Config(FeatureDescription)
-NvAR_Parameter_Config(CUDAStream)
-NvAR_Parameter_Config(TRTModelDir)
-NvAR_Parameter_Config(Temporal)
-
-Input:
-NvAR_Parameter_Input(Image)
-
-Output:
-NvAR_Parameter_Output(BoundingBoxes)
-NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
-
-*******NvAR_Feature_BodyPoseEstimation*******
-Config:
-NvAR_Parameter_Config(FeatureDescription)
-NvAR_Parameter_Config(CUDAStream)
-NvAR_Parameter_Config(ModelDir)
-NvAR_Parameter_Config(BatchSize)
-NvAR_Parameter_Config(Mode)
-NvAR_Parameter_Config(NumKeyPoints)
-NvAR_Parameter_Config(ReferencePose)
-NvAR_Parameter_Config(Temporal)
-NvAR_Parameter_Config(UseCudaGraph)
-NvAR_Parameter_Config(FocalLength)
-
-Input:
-NvAR_Parameter_Input(Image)
-NvAR_Parameter_Input(BoundingBoxes) - OPTIONAL
-
-Output:
-NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL
-NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
-NvAR_Parameter_Output(KeyPoints)
-NvAR_Parameter_Output(KeyPoints3D)
-NvAR_Parameter_Output(JointAngles)
-NvAR_Parameter_Output(KeyPointsConfidence) - OPTIONAL
+*******NvAR_Feature_FaceDetection******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(TRTModelDir) //
+NvAR_Parameter_Config(Temporal) //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+ //
+Output: //
+NvAR_Parameter_Output(BoundingBoxes) //
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL //
+ //
+*******NvAR_Feature_LandmarkDetection******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(ModelDir) //
+NvAR_Parameter_Config(BatchSize) //
+NvAR_Parameter_Config(Landmarks_Size) //
+NvAR_Parameter_Config(LandmarksConfidence_Size) //
+NvAR_Parameter_Config(Temporal) //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+NvAR_Parameter_Input(BoundingBoxes) - OPTIONAL //
+ //
+Output: //
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(Landmarks) //
+NvAR_Parameter_Output(Pose) - OPTIONAL //
+NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL //
+ //
+*******NvAR_Feature_Face3DReconstruction******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(ModelDir) //
+NvAR_Parameter_Config(Landmarks_Size) //
+NvAR_Parameter_Config(CUDAStream) - OPTIONAL //
+NvAR_Parameter_Config(Temporal) - OPTIONAL //
+NvAR_Parameter_Config(GazeMode) - OPTIONAL //
+NvAR_Parameter_Config(ModelName) - OPTIONAL //
+NvAR_Parameter_Config(GPU) - OPTIONAL //
+NvAR_Parameter_Config(VertexCount) - QUERY //
+NvAR_Parameter_Config(TriangleCount) - QUERY //
+NvAR_Parameter_Config(ExpressionCount) - QUERY //
+NvAR_Parameter_Config(ShapeEigenValueCount) - QUERY //
+ //
+Input: //
+NvAR_Parameter_Input(Width) //
+NvAR_Parameter_Input(Height) //
+NvAR_Parameter_Input(Image) - OPTIONAL //
+NvAR_Parameter_Input(Landmarks) - OPTIONAL //
+ //
+Output: //
+NvAR_Parameter_Output(FaceMesh) //
+NvAR_Parameter_Output(RenderingParams) //
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL //
+NvAR_Parameter_Output(Landmarks) - OPTIONAL //
+NvAR_Parameter_Output(Pose) - OPTIONAL //
+NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL //
+NvAR_Parameter_Output(ExpressionCoefficients) - OPTIONAL //
+NvAR_Parameter_Output(ShapeEigenValues) - OPTIONAL //
+ //
+*******NvAR_Feature_BodyDetection******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(TRTModelDir) //
+NvAR_Parameter_Config(Temporal) //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+ //
+Output: //
+NvAR_Parameter_Output(BoundingBoxes) //
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL //
+ //
+*******NvAR_Feature_BodyPoseEstimation******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(ModelDir) //
+NvAR_Parameter_Config(BatchSize) //
+NvAR_Parameter_Config(Mode) //
+NvAR_Parameter_Config(NumKeyPoints) //
+NvAR_Parameter_Config(ReferencePose) //
+NvAR_Parameter_Config(Temporal) //
+NvAR_Parameter_Config(UseCudaGraph) //
+NvAR_Parameter_Config(FocalLength) //
+NvAR_Parameter_Config(TrackPeople) //
+NvAR_Parameter_Config(ShadowTrackingAge) //
+NvAR_Parameter_Config(MaxTargetsTracked) //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+NvAR_Parameter_Input(BoundingBoxes) - OPTIONAL //
+ //
+Output: //
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(TrackingBoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL //
+NvAR_Parameter_Output(KeyPoints) //
+NvAR_Parameter_Output(KeyPoints3D) //
+NvAR_Parameter_Output(JointAngles) //
+NvAR_Parameter_Output(KeyPointsConfidence) - OPTIONAL //
+ //
+*******NvAR_Feature_GazeRedirection******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(ModelDir) //
+NvAR_Parameter_Config(BatchSize) //
+NvAR_Parameter_Config(Landmarks_Size) //
+NvAR_Parameter_Config(GazeRedirect) //
+NvAR_Parameter_Config(Temporal) //
+NvAR_Parameter_Config(DetectClosure) - OPTIONAL //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+NvAR_Parameter_Input(Width) //
+NvAR_Parameter_Input(Height) //
+NvAR_Parameter_Input(Landmarks) - OPTIONAL //
+NvAR_Parameter_Input(LandmarksConfidence) - OPTIONAL //
+ //
+Output: //
+NvAR_Parameter_Output(OutputGazeVector) //
+NvAR_Parameter_Output(OutputHeadTranslation) //
+NvAR_Parameter_Output(HeadPose) //
+NvAR_Parameter_Output(EyeCenters3D) //
+NvAR_Parameter_Output(Image) - OPTIONAL //
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(Landmarks) - OPTIONAL //
+NvAR_Parameter_Output(Pose) - OPTIONAL //
+NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL //
+ //
+*******NvAR_Feature_FaceExpressions******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) - OPTIONAL //
+NvAR_Parameter_Config(ModelDir) - OPTIONAL //
+NvAR_Parameter_Config(BatchSize) - OPTIONAL //
+NvAR_Parameter_Config(Temporal) - OPTIONAL //
+NvAR_Parameter_Config(Landmarks_Size) - QUERY //
+NvAR_Parameter_Config(ExpressionCount) - QUERY //
+ //
+Input: //
+NvAR_Parameter_Input(Image) //
+NvAR_Parameter_Input(Landmarks) - OPTIONAL //
+ //
+Output: //
+NvAR_Parameter_Output(ExpressionCoefficients) //
+NvAR_Parameter_Output(Landmarks) - OPTIONAL //
+NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL //
+NvAR_Parameter_Output(Pose) - OPTIONAL //
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL //
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL //
+
+*******NvAR_Feature_LivePortrait******* //
+Config: //
+NvAR_Parameter_Config(FeatureDescription) //
+NvAR_Parameter_Config(CUDAStream) //
+NvAR_Parameter_Config(ModelDir) //
+NvAR_Parameter_Config(Temporal) //
+NvAR_Parameter_Config(Mode) //
+ //
+Input: //
+NvAR_Parameter_Input(SourceImage) //
+NvAR_Parameter_Input(DriveImage) //
+NvAR_Parameter_Input(BackgroundImage) //
+
+Output: //
+NvAR_Parameter_Output(GeneratedImage) //
+
+*******NvAR_Feature_FrameSelection******* // !FrameSelection!
+Config: // !FrameSelection!
+NvAR_Parameter_Config(FeatureDescription) // !FrameSelection!
+NvAR_Parameter_Config(CUDAStream) // !FrameSelection!
+NvAR_Parameter_Config(ModelDir) // !FrameSelection!
+NvAR_Parameter_Config(Mode) // !FrameSelection!
+ // !FrameSelection!
+Input: // !FrameSelection!
+NvAR_Parameter_Input(Image) // !FrameSelection!
+ // !FrameSelection!
+Output: // !FrameSelection!
+NvAR_Parameter_Output(FrameSelected) // !FrameSelection!
+
*/
diff --git a/nvar/include/nvCVImage.h b/nvar/include/nvCVImage.h
index 014cd94..543df6e 100644
--- a/nvar/include/nvCVImage.h
+++ b/nvar/include/nvCVImage.h
@@ -323,6 +323,12 @@ NvCV_Status NvCV_API NvCVImage_Realloc(NvCVImage *im, unsigned width, unsigned h
void NvCV_API NvCVImage_Dealloc(NvCVImage *im);
+//! Deallocate the image buffer from the image asynchronously on the specified stream. The image is not deallocated.
+//! param[in,out] im the image whose buffer is to be deallocated.
+//! param[int] stream the CUDA stream on which the image buffer is to be deallocated..
+void NvCV_API NvCVImage_DeallocAsync(NvCVImage *im, struct CUstream_st *stream);
+
+
//! Allocate a new image, with storage (C-style constructor).
//! \param[in] width the desired width of the image, in pixels.
//! \param[in] height the desired height of the image, in pixels.
@@ -378,7 +384,7 @@ void NvCV_API NvCVImage_ComponentOffsets(NvCVImage_PixelFormat format, int *rOff
//! | RGB --> RGB | X | X | X | X |
//! | RGB --> RGBA | X | X | X | X |
//! | RGBA --> Y | X | X | | |
-//! | RGBA --> A | | X | | |
+//! | RGBA --> A | X | | | |
//! | RGBA --> RGB | X | X | X | X |
//! | RGBA --> RGBA | X | X | X | X |
//! | RGB --> YUV420 | X | | X | |
@@ -559,7 +565,7 @@ NvCV_Status NvCV_API NvCVImage_Composite(const NvCVImage *fg, const NvCVImage *b
//! \param[in] mat the matte image, indicating where the src should come through.
//! This determines the size of the rectangle to be composited.
//! If this is multi-channel, the alpha channel is used as the matte.
-//! \param[in] mode the composition mode. Only 0 (straight alpha over) is implemented at this time.
+//! \param[in] mode the composition mode: 0 (straight alpha over) or 1 (premultiplied alpha over).
//! \param[out] dst the destination image. This can be the same as fg or bg.
//! \param[in] dstOrg the upper-left corner of the dst image to be updated (NULL implies (0,0)).
//! \param[in] stream the CUDA stream on which the composition is to be performed.
@@ -631,6 +637,23 @@ NvCV_Status NvCV_API NvCVImage_GetYUVPointers(NvCVImage *im,
int *yPixBytes, int *cPixBytes, int *yRowBytes, int *cRowBytes);
+//! Sharpen an image.
+//! The src and dst should be the same type - conversions are not performed.
+//! This function is only implemented for NVCV_CHUNKY NVCV_U8 pixels, of format NVCV_RGB or NVCV_BGR.
+//! \param[in] sharpness the sharpness strength, calibrated so that 1 and 2 yields Adobe's Sharpen and Sharpen More.
+//! \param[in] src the source image to be sharpened.
+//! \param[out] dst the resultant image (may be the same as the src).
+//! \param[in] stream the CUDA stream on which to perform the computations.
+//! \param[in] tmp a temporary working image. This can be NULL, but may result in lower performance.
+//! It is best if it resides on the same processor (CPU or GPU) as the destination.
+//! @return NVCV_SUCCESS if the operation completed successfully.
+//! NVCV_ERR_MISMATCH if the source and destination formats are different.
+//! NVCV_ERR_PIXELFORMAT if the function has not been implemented for the chosen pixel type.
+
+NvCV_Status NvCV_API NvCVImage_Sharpen(float sharpness, const NvCVImage *src, NvCVImage *dst,
+ struct CUstream_st *stream, NvCVImage *tmp);
+
+
#ifdef __cplusplus
} // extern "C"
diff --git a/nvar/include/nvCVStatus.h b/nvar/include/nvCVStatus.h
index dd47ba3..4b6dfa6 100644
--- a/nvar/include/nvCVStatus.h
+++ b/nvar/include/nvCVStatus.h
@@ -77,7 +77,11 @@ typedef enum NvCV_Status {
NVCV_ERR_TRT_ENGINE = -30, ///< There was a problem deserializing the inference runtime engine.
NVCV_ERR_NPP = -31, //!< An error has occurred in the NPP library.
NVCV_ERR_CONFIG = -32, //!< No suitable model exists for the specified parameter configuration.
+ NVCV_ERR_TOOSMALL = -33, //!< A supplied parameter or buffer is not large enough.
+ NVCV_ERR_TOOBIG = -34, //!< A supplied parameter is too big.
+ NVCV_ERR_WRONGSIZE = -35, //!< A supplied parameter is not the expected size.
+ NVCV_ERR_OPENGL = -98, //!< An OpenGL error has occurred.
NVCV_ERR_DIRECT3D = -99, //!< A Direct3D error has occurred.
NVCV_ERR_CUDA_BASE = -100, //!< CUDA errors are offset from this value.
diff --git a/nvar/src/VPIProxy.cpp b/nvar/src/VPIProxy.cpp
new file mode 100644
index 0000000..6d3857d
--- /dev/null
+++ b/nvar/src/VPIProxy.cpp
@@ -0,0 +1,203 @@
+#if defined(linux) || defined(unix) || defined(__linux)
+#warning nvCVImageProxy.cpp not ported
+#else
+/*###############################################################################
+#
+# Copyright 2020 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+#include
+#include "../include/vpi/Status.h"
+#include "../include/vpi/VPI.h"
+#include "../include/vpi/CUDAInterop.h"
+#include "../include/vpi/experimental/ColorNames.h"
+#include "../include/vpi/experimental/HOG.h"
+
+#ifdef _WIN32
+#define _WINSOCKAPI_
+#include
+#include
+#else // !_WIN32
+#include
+typedef void* HMODULE;
+typedef void* HANDLE;
+typedef void* HINSTANCE;
+#endif // _WIN32
+
+// Parameter string does not include the file extension
+#ifdef _WIN32
+#define nvLoadLibrary(library) LoadLibrary(TEXT(library ".dll"))
+#else // !_WIN32
+#define nvLoadLibrary(library) dlopen("lib" library ".so", RTLD_LAZY)
+#endif // _WIN32
+
+
+inline void* nvGetProcAddress(HINSTANCE handle, const char* proc) {
+ if (nullptr == handle) return nullptr;
+#ifdef _WIN32
+ return GetProcAddress(handle, proc);
+#else // !_WIN32
+ return dlsym(handle, proc);
+#endif // _WIN32
+}
+
+inline int nvFreeLibrary(HINSTANCE handle) {
+#ifdef _WIN32
+ return FreeLibrary(handle);
+#else
+ return dlclose(handle);
+#endif
+}
+
+HINSTANCE getVPILib() {
+ TCHAR path[MAX_PATH], tmpPath[MAX_PATH], fullPath[MAX_PATH];
+ static HINSTANCE VPILib = NULL;
+ static bool bSDKPathSet = false;
+ if (!bSDKPathSet) {
+ VPILib = nvLoadLibrary("nvvpi2");
+ if (VPILib) bSDKPathSet = true;
+ }
+ if (!bSDKPathSet) {
+ // There can be multiple apps on the system,
+ // some might include the SDK in the app package and
+ // others might expect the SDK to be installed in Program Files
+ GetEnvironmentVariable(TEXT("NV_VIDEO_EFFECTS_PATH"), path, MAX_PATH);
+ GetEnvironmentVariable(TEXT("NV_AR_SDK_PATH"), tmpPath, MAX_PATH);
+ if (_tcscmp(path, TEXT("USE_APP_PATH")) && _tcscmp(tmpPath, TEXT("USE_APP_PATH"))) {
+ // App has not set environment variable to "USE_APP_PATH"
+ // So pick up the SDK dll and dependencies from Program Files
+ GetEnvironmentVariable(TEXT("ProgramFiles"), path, MAX_PATH);
+ size_t max_len = sizeof(fullPath) / sizeof(TCHAR);
+ _stprintf_s(fullPath, max_len, TEXT("%s\\NVIDIA Corporation\\NVIDIA Video Effects\\"), path);
+ SetDllDirectory(fullPath);
+ VPILib = nvLoadLibrary("nvvpi2");
+ if (!VPILib) {
+ _stprintf_s(fullPath, max_len, TEXT("%s\\NVIDIA Corporation\\NVIDIA AR SDK\\"), path);
+ SetDllDirectory(fullPath);
+ VPILib = nvLoadLibrary("nvvpi2");
+ }
+ }
+ bSDKPathSet = true;
+ }
+ return VPILib;
+}
+
+const char *vpiStatusGetName(VPIStatus code) {
+ static const auto funcPtr = (decltype(vpiStatusGetName) *)nvGetProcAddress(getVPILib(), "vpiStatusGetName");
+
+ if (nullptr == funcPtr) return nullptr;
+ return funcPtr(code);
+}
+
+VPIStatus vpiGetLastStatusMessage(char *msgBuffer, int32_t lenBuffer)
+{
+ static const auto funcPtr = (decltype(vpiGetLastStatusMessage) *)nvGetProcAddress(getVPILib(), "vpiGetLastStatusMessage");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(msgBuffer, lenBuffer);
+}
+
+VPIStatus vpiStreamCreate(uint32_t flags, VPIStream *stream) {
+ static const auto funcPtr = (decltype(vpiStreamCreate)*)nvGetProcAddress(getVPILib(), "vpiStreamCreate");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(flags, stream);
+}
+
+void vpiStreamDestroy(VPIStream stream) {
+ static const auto funcPtr = (decltype(vpiStreamDestroy)*)nvGetProcAddress(getVPILib(), "vpiStreamDestroy");
+
+ if (nullptr == funcPtr) return;
+ return funcPtr(stream);
+}
+
+VPIStatus vpiStreamSync(VPIStream stream) {
+ static const auto funcPtr = (decltype(vpiStreamSync)*)nvGetProcAddress(getVPILib(), "vpiStreamSync");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(stream);
+}
+
+void vpiPayloadDestroy(VPIPayload payload) {
+ static const auto funcPtr = (decltype(vpiPayloadDestroy)*)nvGetProcAddress(getVPILib(), "vpiPayloadDestroy");
+
+ if (nullptr == funcPtr) return;
+ return funcPtr(payload);
+}
+
+void vpiImageDestroy(VPIImage img) {
+ static const auto funcPtr = (decltype(vpiImageDestroy)*)nvGetProcAddress(getVPILib(), "vpiImageDestroy");
+
+ if (nullptr == funcPtr) return;
+ return funcPtr(img);
+}
+
+VPIStatus vpiCreateExtractColorNameFeatures(uint32_t backends, VPIImageFormat outType, VPIPayload *payload) {
+ static const auto funcPtr = (decltype(vpiCreateExtractColorNameFeatures)*)nvGetProcAddress(getVPILib(), "vpiCreateExtractColorNameFeatures");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(backends, outType, payload);
+}
+
+VPIStatus vpiSubmitExtractColorNameFeatures(VPIStream stream, uint32_t backend, VPIPayload payload,
+ VPIImage input, VPIImage *output, int32_t numOutputs) {
+ static const auto funcPtr = (decltype(vpiSubmitExtractColorNameFeatures)*)nvGetProcAddress(getVPILib(), "vpiSubmitExtractColorNameFeatures");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(stream, backend, payload, input, output, numOutputs);
+}
+
+VPIStatus vpiCreateExtractHOGFeatures(uint32_t backends, int32_t width, int32_t height, int32_t features,
+ int32_t cellSize, int32_t numOrientations, int32_t *outNumFeatures,
+ VPIPayload *payload) {
+ static const auto funcPtr = (decltype(vpiCreateExtractHOGFeatures)*)nvGetProcAddress(getVPILib(), "vpiCreateExtractHOGFeatures");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(backends, width, height, features, cellSize, numOrientations, outNumFeatures, payload);
+}
+
+VPIStatus vpiCreateExtractHOGFeaturesBatch(uint32_t backends, int32_t maxBatchWidth, int32_t maxBatchHeight,
+ int32_t imgWidth, int32_t imgHeight, int32_t features, int32_t cellSize,
+ int32_t numOrientations, int32_t *outNumFeatures, VPIPayload *payload)
+{
+ static const auto funcPtr =
+ (decltype(vpiCreateExtractHOGFeaturesBatch) *)nvGetProcAddress(getVPILib(), "vpiCreateExtractHOGFeaturesBatch");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(backends, maxBatchWidth, maxBatchHeight, imgWidth, imgHeight, features, cellSize, numOrientations, outNumFeatures, payload);
+}
+
+VPIStatus vpiSubmitExtractHOGFeatures(VPIStream stream, uint32_t backend, VPIPayload payload, VPIImage input,
+ VPIImage *outFeatures, int32_t numFeatures) {
+ static const auto funcPtr = (decltype(vpiSubmitExtractHOGFeatures)*)nvGetProcAddress(getVPILib(), "vpiSubmitExtractHOGFeatures");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(stream, backend, payload, input, outFeatures, numFeatures);
+}
+
+VPIStatus vpiImageCreateCUDAMemWrapper(const VPIImageData *cudaData, uint32_t flags, VPIImage *img) {
+ static const auto funcPtr = (decltype(vpiImageCreateCUDAMemWrapper)*)nvGetProcAddress(getVPILib(), "vpiImageCreateCUDAMemWrapper");
+
+ if (nullptr == funcPtr) return VPI_ERROR_NOT_IMPLEMENTED;
+ return funcPtr(cudaData, flags, img);
+}
+
+
+#endif // enabling for this file
diff --git a/nvar/src/nvCVImageProxy.cpp b/nvar/src/nvCVImageProxy.cpp
index 4f5199a..4657939 100644
--- a/nvar/src/nvCVImageProxy.cpp
+++ b/nvar/src/nvCVImageProxy.cpp
@@ -132,10 +132,16 @@ NvCV_Status NvCV_API NvCVImage_Realloc(NvCVImage* im, unsigned width, unsigned h
void NvCV_API NvCVImage_Dealloc(NvCVImage* im) {
static const auto funcPtr = (decltype(NvCVImage_Dealloc)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_Dealloc");
-
+
if (nullptr != funcPtr) funcPtr(im);
}
+void NvCV_API NvCVImage_DeallocAsync(NvCVImage* im, CUstream_st* stream) {
+ static const auto funcPtr = (decltype(NvCVImage_DeallocAsync)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_DeallocAsync");
+
+ if (nullptr != funcPtr) funcPtr(im, stream);
+}
+
NvCV_Status NvCV_API NvCVImage_Create(unsigned width, unsigned height, NvCVImage_PixelFormat format,
NvCVImage_ComponentType type, unsigned isPlanar, unsigned onGPU,
unsigned alignment, NvCVImage** out) {
@@ -266,6 +272,14 @@ NvCV_Status NvCV_API NvCVImage_FlipY(const NvCVImage *src, NvCVImage *dst) {
return funcPtr(src, dst);
}
+NvCV_Status NvCV_API NvCVImage_Sharpen(float sharpness, const NvCVImage *src, NvCVImage *dst,
+ struct CUstream_st *stream, NvCVImage *tmp) {
+ static const auto funcPtr = (decltype(NvCVImage_Sharpen)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_Sharpen");
+
+ if (nullptr == funcPtr) return NVCV_ERR_LIBRARY;
+ return funcPtr(sharpness, src, dst, stream, tmp);
+}
+
#ifdef _WIN32
__declspec(dllexport) const char* __cdecl
#else
diff --git a/resources/ar_006.png b/resources/ar_006.png
new file mode 100644
index 0000000..9d0c1f0
Binary files /dev/null and b/resources/ar_006.png differ
diff --git a/resources/ar_007.png b/resources/ar_007.png
new file mode 100644
index 0000000..9e9fdcf
Binary files /dev/null and b/resources/ar_007.png differ
diff --git a/samples/BodyTrack/BodyEngine.cpp b/samples/BodyTrack/BodyEngine.cpp
index d54994a..c94e536 100644
--- a/samples/BodyTrack/BodyEngine.cpp
+++ b/samples/BodyTrack/BodyEngine.cpp
@@ -110,12 +110,21 @@ BodyEngine::Err BodyEngine::createKeyPointDetectionFeature(const char* modelPath
nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(Temporal), bStabilizeBody);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
- nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Config(FocalLength), bFocalLength);
+ nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Config(UseCudaGraph), bUseCudaGraph);
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+#if NV_MULTI_OBJECT_TRACKER
+ nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Config(TrackPeople), bEnablePeopleTracking);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
- nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Config(UseCudaGraph), bUseCudaGraph);
+ nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(ShadowTrackingAge), shadowTrackingAge);
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+
+ nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(ProbationAge), probationAge);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+ nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(MaxTargetsTracked), maxTargetsTracked);
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+#endif
nvErr = NvAR_Load(keyPointDetectHandle);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errInitialization);
@@ -176,18 +185,23 @@ BodyEngine::Err BodyEngine::initKeyPointDetectionIOParams(NvCVImage* inBuf) {
NvCV_Status nvErr = NVCV_SUCCESS;
BodyEngine::Err err = BodyEngine::Err::errNone;
uint output_bbox_size;
-
+#if NV_MULTI_OBJECT_TRACKER
+ uint output_tracking_bbox_size;
+#endif
nvErr = NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Input(Image), inBuf, sizeof(NvCVImage));
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+ nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Input(FocalLength), bFocalLength);
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+
nvErr = NvAR_GetU32(keyPointDetectHandle, NvAR_Parameter_Config(NumKeyPoints), &numKeyPoints);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
- keypoints.assign(batchSize * numKeyPoints, {0.f, 0.f});
- keypoints3D.assign(batchSize * numKeyPoints, {0.f, 0.f, 0.f});
- jointAngles.assign(batchSize * numKeyPoints, {0.f, 0.f, 0.f, 1.f});
+ keypoints.assign(batchSize * numKeyPoints, { 0.f, 0.f });
+ keypoints3D.assign(batchSize * numKeyPoints, { 0.f, 0.f, 0.f });
+ jointAngles.assign(batchSize * numKeyPoints, { 0.f, 0.f, 0.f, 1.f });
keypoints_confidence.assign(batchSize * numKeyPoints, 0.f);
- referencePose.assign(numKeyPoints, {0.f, 0.f, 0.f});
+ referencePose.assign(numKeyPoints, { 0.f, 0.f, 0.f });
const void* pReferencePose;
nvErr = NvAR_GetObject(keyPointDetectHandle, NvAR_Parameter_Config(ReferencePose), &pReferencePose,
@@ -208,18 +222,46 @@ BodyEngine::Err BodyEngine::initKeyPointDetectionIOParams(NvCVImage* inBuf) {
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
nvErr = NvAR_SetF32Array(keyPointDetectHandle, NvAR_Parameter_Output(KeyPointsConfidence),
- keypoints_confidence.data(), batchSize * numKeyPoints);
+ keypoints_confidence.data(), sizeof(float));
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
- output_bbox_size = batchSize;
- if (!bStabilizeBody) output_bbox_size = 25;
- output_bbox_data.assign(output_bbox_size, {0.f, 0.f, 0.f, 0.f});
+#if NV_MULTI_OBJECT_TRACKER
+ if (bEnablePeopleTracking) {
+ output_tracking_bbox_size = maxTargetsTracked;
+ output_tracking_bbox_data.assign(output_tracking_bbox_size, { 0.f, 0.f, 0.f, 0.f, 0 });
+ output_tracking_bboxes.boxes = output_tracking_bbox_data.data();
+ output_tracking_bboxes.max_boxes = (uint8_t)output_tracking_bbox_size;
+ output_tracking_bboxes.num_boxes = 0;
+ nvErr =
+ NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(TrackingBoundingBoxes), &output_tracking_bboxes, sizeof(NvAR_TrackingBBoxes));
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+ }
+ else {
+ output_bbox_data.assign(25, { 0.f, 0.f, 0.f, 0.f });
+ output_bbox_conf_data.assign(25, 0.f);
+ output_bboxes.boxes = output_bbox_data.data();
+ output_bboxes.max_boxes = (uint8_t)output_bbox_data.size();
+ output_bboxes.num_boxes = 0;
+ nvErr = NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+
+ nvErr = NvAR_SetF32Array(keyPointDetectHandle, NvAR_Parameter_Output(BoundingBoxesConfidence),
+ output_bbox_conf_data.data(), output_bboxes.max_boxes);
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+ }
+#else
+ output_bbox_data.assign(25, { 0.f, 0.f, 0.f, 0.f });
+ output_bbox_conf_data.assign(25, 0.f);
output_bboxes.boxes = output_bbox_data.data();
- output_bboxes.max_boxes = (uint8_t)output_bbox_size;
- output_bboxes.num_boxes = (uint8_t)output_bbox_size;
- nvErr =
- NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
+ output_bboxes.max_boxes = (uint8_t)output_bbox_data.size();
+ output_bboxes.num_boxes = 0;
+ nvErr = NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
+ BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+
+ nvErr = NvAR_SetF32Array(keyPointDetectHandle, NvAR_Parameter_Output(BoundingBoxesConfidence),
+ output_bbox_conf_data.data(), output_bboxes.max_boxes);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
+#endif
bail:
return err;
@@ -262,6 +304,9 @@ void BodyEngine::releaseBodyDetectionIOParams() {
void BodyEngine::releaseKeyPointDetectionIOParams() {
NvCVImage_Dealloc(&inputImageBuffer);
if (!output_bbox_data.empty()) output_bbox_data.clear();
+#if NV_MULTI_OBJECT_TRACKER
+ if (!output_tracking_bbox_data.empty()) output_tracking_bbox_data.clear();
+#endif
if (!keypoints.empty()) keypoints.clear();
if (!keypoints3D.empty()) keypoints3D.clear();
if (!jointAngles.empty()) jointAngles.clear();
@@ -338,21 +383,6 @@ void BodyEngine::enlargeAndSquarifyImageBox(float enlarge, NvAR_Rect& box, int F
std::cout << "[bodypose] > NvAR_Run(keyPointDetectHandle): " << duration.count() << " microseconds" << std::endl;
#endif
- if (getAverageKeyPointsConfidence() < confidenceThreshold) {
- return NVCV_ERR_GENERAL;
- } else {
- NvAR_Point2f *pt, *endPt;
- int i = 0;
- for (endPt = (pt = getKeyPoints()) + numKeyPoints; pt != endPt; ++pt, i += 2) {
- for (int j = 1; j < batchSize; j++) {
- pt->x += pt[j * numKeyPoints].x;
- pt->y += pt[j * numKeyPoints].y;
- }
- // average batch of inferences to generate final result keypoints
- pt->x /= batchSize;
- pt->y /= batchSize;
- }
- }
#ifdef DEBUG_PERF_RUNTIME
end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast(end - start);
@@ -367,17 +397,21 @@ NvAR_Point3f* BodyEngine::getKeyPoints3D() { return keypoints3D.data(); }
NvAR_Quaternion* BodyEngine::getJointAngles() { return jointAngles.data(); }
+NvAR_BBoxes* BodyEngine::getBBoxes(){ return &output_bboxes; }
+#if NV_MULTI_OBJECT_TRACKER
+NvAR_TrackingBBoxes* BodyEngine::getTrackingBBoxes() { return &output_tracking_bboxes; }
+#endif
float* BodyEngine::getKeyPointsConfidence() { return keypoints_confidence.data(); }
float BodyEngine::getAverageKeyPointsConfidence() {
float average_confidence = 0.0f;
float* keypoints_confidence_all = getKeyPointsConfidence();
- for (int i = 0; i < batchSize; i++) {
- for (unsigned int j = 0; j < numKeyPoints; j++) {
+ for (int i = 0; i < output_bboxes.num_boxes; i++) {
+ for (unsigned int j = 0; j < numKeyPoints; j++) {
average_confidence += keypoints_confidence_all[i * numKeyPoints + j];
}
}
- average_confidence /= batchSize * numKeyPoints;
+ average_confidence /= output_bboxes.num_boxes * numKeyPoints;
return average_confidence;
}
@@ -413,7 +447,7 @@ unsigned BodyEngine::acquireBodyBox(cv::Mat& src, NvAR_Rect& bodyBox, int varian
}
unsigned BodyEngine::acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refMarks, NvAR_Point3f* refKeyPoints3D,
- NvAR_Quaternion* refJointAngles, NvAR_Rect& bodyBox, int /*variant*/) {
+ NvAR_Quaternion* refJointAngles, NvAR_BBoxes* refBodyBoxes, int /*variant*/) {
unsigned n = 0;
NvCVImage fxSrcChunkyCPU;
(void)NVWrapperForCVMat(&src, &fxSrcChunkyCPU);
@@ -426,14 +460,15 @@ unsigned BodyEngine::acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refM
auto start = std::chrono::high_resolution_clock::now();
#endif
if (findKeyPoints() != NVCV_SUCCESS) return 0;
- bodyBox = output_bboxes.boxes[0];
+ memcpy(refBodyBoxes, getBBoxes(), sizeof(NvAR_BBoxes) );
n = 1;
#ifdef DEBUG_PERF_RUNTIME
auto start2 = std::chrono::high_resolution_clock::now();
#endif
- memcpy(refMarks, getKeyPoints(), sizeof(NvAR_Point2f) * numKeyPoints);
- memcpy(refKeyPoints3D, getKeyPoints3D(), sizeof(NvAR_Point3f) * numKeyPoints);
- memcpy(refJointAngles, getJointAngles(), sizeof(NvAR_Quaternion) * numKeyPoints);
+ memcpy(refMarks, getKeyPoints(), sizeof(NvAR_Point2f) * numKeyPoints * batchSize);
+ memcpy(refKeyPoints3D, getKeyPoints3D(), sizeof(NvAR_Point3f) * numKeyPoints * batchSize);
+ memcpy(refJointAngles, getJointAngles(), sizeof(NvAR_Quaternion) * numKeyPoints * batchSize);
+
#ifdef DEBUG_PERF_RUNTIME
auto end = std::chrono::high_resolution_clock::now();
@@ -446,13 +481,63 @@ unsigned BodyEngine::acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refM
#endif
return n;
}
+#if NV_MULTI_OBJECT_TRACKER
+unsigned BodyEngine::acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refMarks, NvAR_Point3f* refKeyPoints3D,
+ NvAR_Quaternion* refJointAngles, NvAR_TrackingBBoxes* refBodyBoxes, int /*variant*/) {
+ unsigned n = 0;
+ NvCVImage fxSrcChunkyCPU;
+ (void)NVWrapperForCVMat(&src, &fxSrcChunkyCPU);
+ NvCV_Status cvErr = NvCVImage_Transfer(&fxSrcChunkyCPU, &inputImageBuffer, 1.0f, stream, &tmpImage);
+
+ if (NVCV_SUCCESS != cvErr) {
+ return n;
+ }
+#ifdef DEBUG_PERF_RUNTIME
+ auto start = std::chrono::high_resolution_clock::now();
+#endif
+ if (findKeyPoints() != NVCV_SUCCESS) return 0;
+ memcpy(refBodyBoxes, getTrackingBBoxes(), sizeof(NvAR_TrackingBBoxes));
+ n = 1;
+#ifdef DEBUG_PERF_RUNTIME
+ auto start2 = std::chrono::high_resolution_clock::now();
+#endif
+ memcpy(refMarks, getKeyPoints(), sizeof(NvAR_Point2f) * numKeyPoints * batchSize);
+ memcpy(refKeyPoints3D, getKeyPoints3D(), sizeof(NvAR_Point3f) * numKeyPoints * batchSize);
+ memcpy(refJointAngles, getJointAngles(), sizeof(NvAR_Quaternion) * numKeyPoints * batchSize);
+
+#ifdef DEBUG_PERF_RUNTIME
+ auto end = std::chrono::high_resolution_clock::now();
+ auto duration3 = std::chrono::duration_cast(start2 - start);
+ std::cout << "[bodypose] run findKeyPoints(): " << duration3.count() << " microseconds" << std::endl;
+ auto duration2 = std::chrono::duration_cast(end - start2);
+ std::cout << "[bodypose] keypoint copy time: " << duration2.count() << " microseconds" << std::endl;
+ auto duration = std::chrono::duration_cast(end - start);
+ std::cout << "[bodypose] end-to-end time: " << duration.count() << " microseconds" << std::endl;
+#endif
+ return n;
+}
+#endif
void BodyEngine::setBodyStabilization(bool _bStabilizeBody) { bStabilizeBody = _bStabilizeBody; }
void BodyEngine::setMode(int _mode) { nvARMode = _mode; }
-void BodyEngine::setFocalLength(float _bFocalLength) { bFocalLength = _bFocalLength; }
+BodyEngine::Err BodyEngine::setFocalLength(float _bFocalLength) {
+ bFocalLength = _bFocalLength;
+ NvCV_Status nvErr = NvAR_SetF32(keyPointDetectHandle, NvAR_Parameter_Input(FocalLength), bFocalLength);
+ BodyEngine::Err err = BodyEngine::Err::errNone;
+ if (nvErr != NVCV_SUCCESS) err = BodyEngine::Err::errParameter;
+ return err;
+
+}
void BodyEngine::useCudaGraph(bool _bUseCudaGraph) { bUseCudaGraph = _bUseCudaGraph; }
-
+#if NV_MULTI_OBJECT_TRACKER
+void BodyEngine::enablePeopleTracking(bool _bEnablePeopleTracking, unsigned int _shadowTrackingAge, unsigned int _probationAge, unsigned int _maxTargetsTracked) {
+ bEnablePeopleTracking = _bEnablePeopleTracking;
+ shadowTrackingAge = _shadowTrackingAge;
+ probationAge = _probationAge;
+ maxTargetsTracked = _maxTargetsTracked;
+}
+#endif
void BodyEngine::setAppMode(BodyEngine::mode _mode) { appMode = _mode; }
diff --git a/samples/BodyTrack/BodyEngine.h b/samples/BodyTrack/BodyEngine.h
index 68aa4c3..a1669f0 100644
--- a/samples/BodyTrack/BodyEngine.h
+++ b/samples/BodyTrack/BodyEngine.h
@@ -141,17 +141,28 @@ class BodyEngine {
NvAR_Point2f* getKeyPoints();
NvAR_Point3f* getKeyPoints3D();
NvAR_Quaternion* getJointAngles();
+ NvAR_BBoxes* getBBoxes();
+#if NV_MULTI_OBJECT_TRACKER
+ NvAR_TrackingBBoxes* getTrackingBBoxes();
+#endif
float* getKeyPointsConfidence();
float getAverageKeyPointsConfidence();
void enlargeAndSquarifyImageBox(float enlarge, NvAR_Rect& box, int FLAG_variant);
unsigned findLargestBodyBox(NvAR_Rect& bodyBox, int variant = 0);
unsigned acquireBodyBox(cv::Mat& src, NvAR_Rect& bodyBox, int variant = 0);
unsigned acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refMarks, NvAR_Point3f* refKeyPoints3D,
- NvAR_Quaternion* refJointAngles, NvAR_Rect& bodyBox, int variant = 0);
+ NvAR_Quaternion* refJointAngles, NvAR_BBoxes* refBodyBoxes, int variant = 0);
+#if NV_MULTI_OBJECT_TRACKER
+ unsigned acquireBodyBoxAndKeyPoints(cv::Mat& src, NvAR_Point2f* refMarks, NvAR_Point3f* refKeyPoints3D,
+ NvAR_Quaternion* refJointAngles, NvAR_TrackingBBoxes* refBodyBoxes, int variant = 0);
+#endif
void setBodyStabilization(bool);
void setMode(int);
- void setFocalLength(float);
+ BodyEngine::Err setFocalLength(float);
void useCudaGraph(bool); // Using cuda graph improves model latency
+#if NV_MULTI_OBJECT_TRACKER
+ void enablePeopleTracking(bool _bEnablePeopleTracking, unsigned int _shadowTrackingAge = 90, unsigned int _probationAge = 10, unsigned int _maxTargetsTracked = 30);
+#endif
int getNumKeyPoints() { return numKeyPoints; }
std::vector getReferencePose() { return referencePose; }
@@ -165,6 +176,11 @@ class BodyEngine {
std::vector output_bbox_data;
std::vector output_bbox_conf_data;
NvAR_BBoxes output_bboxes{};
+#if NV_MULTI_OBJECT_TRACKER
+ NvAR_TrackingBBoxes output_tracking_bboxes{};
+ std::vector output_tracking_bbox_data;
+#endif
+
int batchSize;
int nvARMode;
std::mt19937 ran;
@@ -178,12 +194,23 @@ class BodyEngine {
char *bdOTAModelPath, *ldOTAModelPath;
float bFocalLength;
bool bUseCudaGraph;
-
+#if NV_MULTI_OBJECT_TRACKER
+ bool bEnablePeopleTracking;
+ unsigned int shadowTrackingAge;
+ unsigned int probationAge;
+ unsigned int maxTargetsTracked;
+#endif
BodyEngine() {
batchSize = 1;
nvARMode = 1;
bStabilizeBody = true;
bUseCudaGraph = true;
+#if NV_MULTI_OBJECT_TRACKER
+ bEnablePeopleTracking = false;
+ shadowTrackingAge = 90;
+ probationAge = 10;
+ maxTargetsTracked = 30;
+#endif
bFocalLength = FOCAL_LENGTH_DEFAULT;
confidenceThreshold = 0.f;
appMode = keyPointDetection;
diff --git a/samples/BodyTrack/BodyTrack.cpp b/samples/BodyTrack/BodyTrack.cpp
index 794bf67..b3d3eed 100644
--- a/samples/BodyTrack/BodyTrack.cpp
+++ b/samples/BodyTrack/BodyTrack.cpp
@@ -37,6 +37,13 @@
#include "nvAR_defs.h"
#include "opencv2/opencv.hpp"
+#if CV_MAJOR_VERSION >= 4
+#define CV_CAP_PROP_FRAME_WIDTH cv::CAP_PROP_FRAME_WIDTH
+#define CV_CAP_PROP_FRAME_HEIGHT cv::CAP_PROP_FRAME_HEIGHT
+#define CV_CAP_PROP_FPS cv::CAP_PROP_FPS
+#define CV_CAP_PROP_FRAME_COUNT cv::CAP_PROP_FRAME_COUNT
+#endif
+
#ifndef M_PI
#define M_PI 3.1415926535897932385
#endif /* M_PI */
@@ -62,6 +69,8 @@
#define DEBUG_RUNTIME
+#define PEOPLE_TRACKING_BATCH_SIZE 8
+
/********************************************************************************
* Command-line arguments
********************************************************************************/
@@ -71,7 +80,10 @@ bool FLAG_debug = false, FLAG_verbose = false, FLAG_temporal = true, FLAG_captur
std::string FLAG_outDir, FLAG_inFile, FLAG_outFile, FLAG_modelPath, FLAG_captureCodec = "avc1",
FLAG_camRes, FLAG_bodyModel;
unsigned int FLAG_appMode = 1, FLAG_mode = 1, FLAG_camindex=0;
-
+#if NV_MULTI_OBJECT_TRACKER
+bool FLAG_enablePeopleTracking = false;
+unsigned int FLAG_shadowTrackingAge = 90, FLAG_probationAge = 10, FLAG_maxTargetsTracked = 30;
+#endif
/********************************************************************************
* Usage
********************************************************************************/
@@ -95,6 +107,12 @@ static void Usage() {
" --model_path= specify the directory containing the TRT models\n"
" --mode[=0|1] Model Mode. 0: High Quality, 1: High Performance\n"
" --app_mode[=(0|1)] App mode. 0: Body detection, 1: Keypoint detection "
+#if NV_MULTI_OBJECT_TRACKER
+ " --enable_people_tracking[=(0|1)] Enables people tracking "
+ " --shadow_tracking_age Shadow Tracking Age after which tracking information of a person is removed. Measured in frames"
+ " --probation_age Length of probationary period. Measured in frames"
+ " --max_targets_tracked Maximum number of targets to be tracked "
+#endif
"(Default).\n"
" --benchmarks[=] run benchmarks\n");
}
@@ -194,6 +212,12 @@ static int ParseMyArgs(int argc, char **argv) {
GetFlagArgVal("mode", arg, &FLAG_mode) ||
GetFlagArgVal("camindex", arg, &FLAG_camindex) ||
GetFlagArgVal("use_cuda_graph", arg, &FLAG_useCudaGraph) ||
+#if NV_MULTI_OBJECT_TRACKER
+ GetFlagArgVal("enable_people_tracking", arg, &FLAG_enablePeopleTracking) ||
+ GetFlagArgVal("shadow_tracking_age", arg, &FLAG_shadowTrackingAge) ||
+ GetFlagArgVal("probation_age", arg, &FLAG_probationAge) ||
+ GetFlagArgVal("max_targets_tracked", arg, &FLAG_maxTargetsTracked) ||
+#endif
GetFlagArgVal("temporal", arg, &FLAG_temporal))) {
continue;
} else if (GetFlagArgVal("help", arg, &help)) {
@@ -231,10 +255,11 @@ enum {
#if 1
class MyTimer {
public:
- void start() { t0 = std::chrono::high_resolution_clock::now(); } /**< Start the timer. */
- void pause() { dt = std::chrono::high_resolution_clock::now() - t0; } /**< Pause the timer. */
- void resume() { t0 = std::chrono::high_resolution_clock::now() - dt; } /**< Resume the timer. */
- void stop() { pause(); } /**< Stop the timer. */
+ MyTimer() { dt = dt.zero(); } /**< Clear the duration to 0. */
+ void start() { t0 = std::chrono::high_resolution_clock::now(); } /**< Start the timer. */
+ void pause() { dt = std::chrono::high_resolution_clock::now() - t0; } /**< Pause the timer. */
+ void resume() { t0 = std::chrono::high_resolution_clock::now() - dt; } /**< Resume the timer. */
+ void stop() { pause(); } /**< Stop the timer. */
double elapsedTimeFloat() const {
return std::chrono::duration(dt).count();
} /**< Report the elapsed time as a float. */
@@ -310,14 +335,23 @@ class DoApp {
Err run();
void drawFPS(cv::Mat &img);
void DrawBBoxes(const cv::Mat &src, NvAR_Rect *output_bbox);
+ //TODO: Look into ways of simplifying the app for these functions.
+ void DrawBBoxes(const cv::Mat &src, NvAR_BBoxes *output_bbox);
+ void DrawBBoxes(const cv::Mat &src, NvAR_TrackingBBoxes *output_bbox);
void DrawKeyPointLine(const cv::Mat& src, NvAR_Point2f* keypoints, int point1, int point2, int color);
- void DrawKeyPointsAndEdges(const cv::Mat &src, NvAR_Point2f *keypoints, int numKeyPoints, NvAR_Rect* output_bbox);
+ void DrawKeyPointsAndEdges(const cv::Mat &src, NvAR_Point2f *keypoints, int numKeyPoints, NvAR_BBoxes* output_bbox);
void drawKalmanStatus(cv::Mat &img);
void drawVideoCaptureStatus(cv::Mat &img);
void processKey(int key);
void writeVideoAndEstResults(const cv::Mat &frame, NvAR_BBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
void writeFrameAndEstResults(const cv::Mat &frame, NvAR_BBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
void writeEstResults(std::ofstream &outputFile, NvAR_BBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
+#if NV_MULTI_OBJECT_TRACKER
+ void DrawKeyPointsAndEdges(const cv::Mat &src, NvAR_Point2f *keypoints, int numKeyPoints, NvAR_TrackingBBoxes* output_bbox);
+ void writeVideoAndEstResults(const cv::Mat &frame, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
+ void writeFrameAndEstResults(const cv::Mat &frame, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
+ void writeEstResults(std::ofstream &outputFile, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f *keypoints = NULL);
+#endif
void getFPS();
static const char *errorStringFromCode(Err code);
@@ -337,6 +371,10 @@ class DoApp {
float expr[6];
bool drawVisualization, showFPS, captureVideo, captureFrame;
float scaleOffsetXY[4];
+#if NV_MULTI_OBJECT_TRACKER
+ std::vector colorCodes = { cv::Scalar(255,255,255) };
+ const unsigned int peopleTrackingBatchSize = 8; // Batch Size has to be 8 when people tracking is enabled
+#endif
};
DoApp *gApp = nullptr;
@@ -347,13 +385,17 @@ void DoApp::processKey(int key) {
case '2':
body_ar_engine.destroyFeatures();
body_ar_engine.setAppMode(BodyEngine::mode::keyPointDetection);
- body_ar_engine.createFeatures(FLAG_modelPath.c_str());
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) body_ar_engine.createFeatures(FLAG_modelPath.c_str());
+ else
+#endif
+ body_ar_engine.createFeatures(FLAG_modelPath.c_str(), 1);
body_ar_engine.initFeatureIOParams();
break;
case '1':
body_ar_engine.destroyFeatures();
body_ar_engine.setAppMode(BodyEngine::mode::bodyDetection);
- body_ar_engine.createFeatures(FLAG_modelPath.c_str());
+ body_ar_engine.createFeatures(FLAG_modelPath.c_str(), 1);
body_ar_engine.initFeatureIOParams();
break;
case 'C':
@@ -381,8 +423,11 @@ DoApp::Err DoApp::initBodyEngine(const char *modelPath) {
if (!cap.isOpened()) return errVideo;
int numKeyPoints = body_ar_engine.getNumKeyPoints();
-
- nvErr = body_ar_engine.createFeatures(modelPath);
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) nvErr = body_ar_engine.createFeatures(modelPath, peopleTrackingBatchSize);
+ else
+#endif
+ nvErr = body_ar_engine.createFeatures(modelPath, 1);
#ifdef DEBUG
detector->setOutputLocation(outputDir);
@@ -425,6 +470,51 @@ void DoApp::DrawBBoxes(const cv::Mat &src, NvAR_Rect *output_bbox) {
if (FLAG_offlineMode) bodyDetectOutputVideo.write(frm);
}
+void DoApp::DrawBBoxes(const cv::Mat &src, NvAR_BBoxes *output_bbox) {
+ cv::Mat frm;
+ if (FLAG_offlineMode)
+ frm = src.clone();
+ else
+ frm = src;
+
+ if (output_bbox) {
+ for (int i = 0; i < output_bbox->num_boxes; i++) {
+ auto color = cv::Scalar(255, 255, 255);
+ cv::rectangle(frm, cv::Point(lround(output_bbox->boxes[i].x), lround(output_bbox->boxes[i].y)),
+ cv::Point(lround(output_bbox->boxes[i].x + output_bbox->boxes[i].width), lround(output_bbox->boxes[i].y + output_bbox->boxes[i].height)),
+ cv::Scalar(255, 0, 0), 2);
+ }
+
+ }
+
+ if (FLAG_offlineMode) bodyDetectOutputVideo.write(frm);
+}
+#if NV_MULTI_OBJECT_TRACKER
+void DoApp::DrawBBoxes(const cv::Mat &src, NvAR_TrackingBBoxes *output_bbox) {
+ cv::Mat frm;
+ if (FLAG_offlineMode)
+ frm = src.clone();
+ else
+ frm = src;
+
+ if (output_bbox) {
+ for (int i = 0; i < output_bbox->num_boxes; i++) {
+ if (colorCodes.size() <= output_bbox->boxes[i].tracking_id)
+ colorCodes.push_back(cv::Scalar(rand() & 0xFF, rand() & 0xFF, rand() & 0xFF));
+ auto color = colorCodes[output_bbox->boxes[i].tracking_id];
+ std::string text = "ID: " + std::to_string(output_bbox->boxes[i].tracking_id);
+
+ cv::rectangle(frm, cv::Point(lround(output_bbox->boxes[i].bbox.x), lround(output_bbox->boxes[i].bbox.y)),
+ cv::Point(lround(output_bbox->boxes[i].bbox.x + output_bbox->boxes[i].bbox.width), lround(output_bbox->boxes[i].bbox.y + output_bbox->boxes[i].bbox.height)),
+ color, 2);
+ cv::putText(frm, text, cv::Point(lround(output_bbox->boxes[i].bbox.x), lround(output_bbox->boxes[i].bbox.y) - 10), cv::FONT_HERSHEY_SIMPLEX, 0.9, color, 2);
+ }
+
+ }
+
+ if (FLAG_offlineMode) bodyDetectOutputVideo.write(frm);
+}
+#endif
void DoApp::writeVideoAndEstResults(const cv::Mat &frm, NvAR_BBoxes output_bboxes, NvAR_Point2f* keypoints) {
if (captureVideo) {
if (!capturedVideo.isOpened()) {
@@ -469,7 +559,54 @@ void DoApp::writeVideoAndEstResults(const cv::Mat &frm, NvAR_BBoxes output_bboxe
}
}
}
-
+#if NV_MULTI_OBJECT_TRACKER
+void DoApp::writeVideoAndEstResults(const cv::Mat &frm, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f* keypoints) {
+ if (captureVideo) {
+ if (!capturedVideo.isOpened()) {
+ const std::string currentCalendarTime = getCalendarTime();
+ const std::string capturedOutputFileName = currentCalendarTime + ".mp4";
+ getFPS();
+ if (frameTime) {
+ float fps = (float)(1.0 / frameTime);
+ capturedVideo.open(capturedOutputFileName, StringToFourcc(FLAG_captureCodec), fps,
+ cv::Size(frm.cols, frm.rows));
+ if (!capturedVideo.isOpened()) {
+ std::cout << "Error: Could not open video: \"" << capturedOutputFileName << "\"\n";
+ return;
+ }
+ if (FLAG_verbose) {
+ std::cout << "Capturing video started" << std::endl;
+ }
+ }
+ else { // If frameTime is 0.f, returns without writing the frame to the Video
+ return;
+ }
+ const std::string outputsFileName = currentCalendarTime + ".txt";
+ bodyEngineVideoOutputFile.open(outputsFileName, std::ios_base::out);
+ if (!bodyEngineVideoOutputFile.is_open()) {
+ std::cout << "Error: Could not open file: \"" << outputsFileName << "\"\n";
+ return;
+ }
+ std::string keyPointDetectionMode = (keypoints == NULL) ? "Off" : "On";
+ bodyEngineVideoOutputFile << "// BodyDetectOn, KeyPointDetect" << keyPointDetectionMode << "\n ";
+ bodyEngineVideoOutputFile
+ << "// kNumPeople, (bbox_x, bbox_y, bbox_w, bbox_h){ kNumPeople}, kNumLMs, [lm_x, lm_y]{kNumLMs}\n";
+ }
+ // Write each frame to the Video
+ capturedVideo << frm;
+ writeEstResults(bodyEngineVideoOutputFile, output_bboxes, keypoints);
+ }
+ else {
+ if (capturedVideo.isOpened()) {
+ if (FLAG_verbose) {
+ std::cout << "Capturing video ended" << std::endl;
+ }
+ capturedVideo.release();
+ if (bodyEngineVideoOutputFile.is_open()) bodyEngineVideoOutputFile.close();
+ }
+ }
+}
+#endif
void DoApp::writeEstResults(std::ofstream &outputFile, NvAR_BBoxes output_bboxes, NvAR_Point2f* keypoints) {
/**
* Output File Format :
@@ -512,7 +649,53 @@ void DoApp::writeEstResults(std::ofstream &outputFile, NvAR_BBoxes output_bboxes
outputFile << "\n";
}
-
+#if NV_MULTI_OBJECT_TRACKER
+void DoApp::writeEstResults(std::ofstream &outputFile, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f* keypoints) {
+ /**
+ * Output File Format :
+ * BodyDetectOn, KeyPointDetectOn
+ * kNumPeople, (bbox_x, bbox_y, bbox_w, bbox_h){ kNumPeople}, kNumKPs, [j_x, j_y]{kNumKPs}
+ */
+
+ int bodyDetectOn = (body_ar_engine.appMode == BodyEngine::mode::bodyDetection ||
+ body_ar_engine.appMode == BodyEngine::mode::keyPointDetection)
+ ? 1
+ : 0;
+ int keyPointDetectOn = (body_ar_engine.appMode == BodyEngine::mode::keyPointDetection)
+ ? 1
+ : 0;
+ outputFile << bodyDetectOn << "," << keyPointDetectOn << "\n";
+
+ if (bodyDetectOn && output_bboxes.num_boxes) {
+ // Append number of bodies detected in the current frame
+ outputFile << unsigned(output_bboxes.num_boxes) << ",";
+ // write outputbboxes to outputFile
+ for (size_t i = 0; i < output_bboxes.num_boxes; i++) {
+ int x1 = (int)output_bboxes.boxes[i].bbox.x, y1 = (int)output_bboxes.boxes[i].bbox.y,
+ width = (int)output_bboxes.boxes[i].bbox.width, height = (int)output_bboxes.boxes[i].bbox.height;
+ unsigned int tracking_id = output_bboxes.boxes[i].tracking_id;
+ outputFile << x1 << "," << y1 << "," << width << "," << height << "," << tracking_id << ",";
+ }
+ }
+ else {
+ outputFile << "0,";
+ }
+ if (keyPointDetectOn && output_bboxes.num_boxes) {
+ int numKeyPoints = body_ar_engine.getNumKeyPoints();
+ // Append number of keypoints
+ outputFile << numKeyPoints << ",";
+ // Append 2 * number of keypoint values
+ NvAR_Point2f *pt, *endPt;
+ for (endPt = (pt = (NvAR_Point2f *)keypoints) + numKeyPoints; pt < endPt; ++pt)
+ outputFile << pt->x << "," << pt->y << ",";
+ }
+ else {
+ outputFile << "0,";
+ }
+
+ outputFile << "\n";
+}
+#endif
void DoApp::writeFrameAndEstResults(const cv::Mat &frm, NvAR_BBoxes output_bboxes, NvAR_Point2f* keypoints) {
if (captureFrame) {
const std::string currentCalendarTime = getCalendarTime();
@@ -537,28 +720,47 @@ void DoApp::writeFrameAndEstResults(const cv::Mat &frm, NvAR_BBoxes output_bboxe
captureFrame = false;
}
}
-
+#if NV_MULTI_OBJECT_TRACKER
+void DoApp::writeFrameAndEstResults(const cv::Mat &frm, NvAR_TrackingBBoxes output_bboxes, NvAR_Point2f* keypoints) {
+ if (captureFrame) {
+ const std::string currentCalendarTime = getCalendarTime();
+ const std::string capturedFrame = currentCalendarTime + ".png";
+ cv::imwrite(capturedFrame, frm);
+ if (FLAG_verbose) {
+ std::cout << "Captured the frame" << std::endl;
+ }
+ // Write Body Engine Outputs
+ const std::string outputFilename = currentCalendarTime + ".txt";
+ std::ofstream outputFile;
+ outputFile.open(outputFilename, std::ios_base::out);
+ if (!outputFile.is_open()) {
+ std::cout << "Error: Could not open file: \"" << outputFilename << "\"\n";
+ return;
+ }
+ std::string keyPointDetectionMode = (keypoints == NULL) ? "Off" : "On";
+ outputFile << "// BodyDetectOn, KeyPointDetect" << keyPointDetectionMode << "\n";
+ outputFile << "// kNumPeople, (bbox_x, bbox_y, bbox_w, bbox_h){ kNumPeople}, kNumLMs, [lm_x, lm_y]{kNumLMs}\n";
+ writeEstResults(outputFile, output_bboxes, keypoints);
+ if (outputFile.is_open()) outputFile.close();
+ captureFrame = false;
+ }
+}
+#endif
void DoApp::DrawKeyPointLine(const cv::Mat& src, NvAR_Point2f* keypoints, int point1, int point2, int color) {
NvAR_Point2f point1_pos = *(keypoints + point1);
NvAR_Point2f point2_pos = *(keypoints + point2);
cv::line(src, cv::Point((int)point1_pos.x, (int)point1_pos.y), cv::Point((int)point2_pos.x, (int)point2_pos.y), cv_colors[color], 2);
}
-
-void DoApp::DrawKeyPointsAndEdges(const cv::Mat& src, NvAR_Point2f* keypoints, int numKeyPoints, NvAR_Rect* output_bbox) {
+#if NV_MULTI_OBJECT_TRACKER
+void DoApp::DrawKeyPointsAndEdges(const cv::Mat& src, NvAR_Point2f* keypoints, int numKeyPoints, NvAR_TrackingBBoxes* output_bbox) {
cv::Mat frm;
if (FLAG_offlineMode)
frm = src.clone();
else
frm = src;
NvAR_Point2f *pt, *endPt;
- for (endPt = (pt = (NvAR_Point2f *)keypoints) + numKeyPoints; pt < endPt; ++pt)
- cv::circle(frm, cv::Point(lround(pt->x), lround(pt->y)), 4, cv::Scalar(180, 180, 180), -1);
-
- if (output_bbox)
- cv::rectangle(frm, cv::Point(lround(output_bbox->x), lround(output_bbox->y)),
- cv::Point(lround(output_bbox->x + output_bbox->width), lround(output_bbox->y + output_bbox->height)),
- cv::Scalar(255, 0, 0), 2);
+ NvAR_Point2f* keypointsBatch8 = keypoints;
int pelvis = 0;
int left_hip = 1;
@@ -595,56 +797,187 @@ void DoApp::DrawKeyPointsAndEdges(const cv::Mat& src, NvAR_Point2f* keypoints, i
int left_thumb_tip = 32;
int right_thumb_tip = 33;
- // center body
- DrawKeyPointLine(frm, keypoints, pelvis, torso, kColorGreen);
- DrawKeyPointLine(frm, keypoints, torso, neck, kColorGreen);
- DrawKeyPointLine(frm, keypoints, neck, pelvis, kColorGreen);
-
- // right side
- DrawKeyPointLine(frm, keypoints, right_ankle, right_knee, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_knee, right_hip, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_hip, pelvis, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_hip, right_shoulder, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_shoulder, right_elbow, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_elbow, right_wrist, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_shoulder, neck, kColorRed);
-
- // right side hand and feet
- DrawKeyPointLine(frm, keypoints, right_wrist, right_pinky_knuckle, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_wrist, right_middle_tip, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_wrist, right_index_knuckle, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_wrist, right_thumb_tip, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_ankle, right_heel, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_ankle, right_big_toe, kColorRed);
- DrawKeyPointLine(frm, keypoints, right_big_toe, right_small_toe, kColorRed);
-
- //left side
- DrawKeyPointLine(frm, keypoints, left_ankle, left_knee, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_knee, left_hip, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_hip, pelvis, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_hip, left_shoulder, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_shoulder, left_elbow, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_elbow, left_wrist, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_shoulder, neck, kColorBlue);
-
- // left side hand and feet
- DrawKeyPointLine(frm, keypoints, left_wrist, left_pinky_knuckle, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_wrist, left_middle_tip, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_wrist, left_index_knuckle, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_wrist, left_thumb_tip, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_ankle, left_heel, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_ankle, left_big_toe, kColorBlue);
- DrawKeyPointLine(frm, keypoints, left_big_toe, left_small_toe, kColorBlue);
-
- // head
- DrawKeyPointLine(frm, keypoints, neck, nose, kColorGreen);
- DrawKeyPointLine(frm, keypoints, nose, right_eye, kColorGreen);
- DrawKeyPointLine(frm, keypoints, right_eye, right_ear, kColorGreen);
- DrawKeyPointLine(frm, keypoints, nose, left_eye, kColorGreen);
- DrawKeyPointLine(frm, keypoints, left_eye, left_ear, kColorGreen);
+ for (int i = 0; i < body_ar_engine.output_tracking_bboxes.num_boxes; i++) {
+
+ keypoints = keypointsBatch8 + (i * 34);
+
+ for (endPt = (pt = (NvAR_Point2f*)keypoints) + numKeyPoints; pt < endPt; ++pt)
+ cv::circle(frm, cv::Point(lround(pt->x), lround(pt->y)), 4, cv::Scalar(180, 180, 180), -1);
+
+ if (output_bbox) {
+ while (colorCodes.size()<= output_bbox->boxes[i].tracking_id)
+ colorCodes.push_back(cv::Scalar(rand() & 0xFF, rand() & 0xFF, rand() & 0xFF));
+ auto color = colorCodes[output_bbox->boxes[i].tracking_id];
+ std::string text = "ID: "+std::to_string(output_bbox->boxes[i].tracking_id);
+ cv::rectangle(frm, cv::Point(lround(output_bbox->boxes[i].bbox.x), lround(output_bbox->boxes[i].bbox.y)),
+ cv::Point(lround(output_bbox->boxes[i].bbox.x + output_bbox->boxes[i].bbox.width), lround(output_bbox->boxes[i].bbox.y + output_bbox->boxes[i].bbox.height)),
+ color, 2);
+ cv::putText(frm, text, cv::Point(lround(output_bbox->boxes[i].bbox.x), lround(output_bbox->boxes[i].bbox.y) - 10), cv::FONT_HERSHEY_SIMPLEX, 0.5, color, 2);
+ }
+
+
+ // center body
+ DrawKeyPointLine(frm, keypoints, pelvis, torso, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, torso, neck, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, neck, pelvis, kColorGreen);
+
+ // right side
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_knee, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_knee, right_hip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_hip, pelvis, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_hip, right_shoulder, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_shoulder, right_elbow, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_elbow, right_wrist, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_shoulder, neck, kColorRed);
+
+ // right side hand and feet
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_pinky_knuckle, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_middle_tip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_index_knuckle, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_thumb_tip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_heel, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_big_toe, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_big_toe, right_small_toe, kColorRed);
+
+ //left side
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_knee, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_knee, left_hip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_hip, pelvis, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_hip, left_shoulder, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_shoulder, left_elbow, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_elbow, left_wrist, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_shoulder, neck, kColorBlue);
+
+ // left side hand and feet
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_pinky_knuckle, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_middle_tip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_index_knuckle, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_thumb_tip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_heel, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_big_toe, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_big_toe, left_small_toe, kColorBlue);
+
+ // head
+ DrawKeyPointLine(frm, keypoints, neck, nose, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, nose, right_eye, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, right_eye, right_ear, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, nose, left_eye, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, left_eye, left_ear, kColorGreen);
+ }
if (FLAG_offlineMode) keyPointsOutputVideo.write(frm);
}
+#endif
+void DoApp::DrawKeyPointsAndEdges(const cv::Mat& src, NvAR_Point2f* keypoints, int numKeyPoints, NvAR_BBoxes* output_bbox) {
+ cv::Mat frm;
+ if (FLAG_offlineMode)
+ frm = src.clone();
+ else
+ frm = src;
+ NvAR_Point2f *pt, *endPt;
+ NvAR_Point2f* keypointsBatch8 = keypoints;
+
+ int pelvis = 0;
+ int left_hip = 1;
+ int right_hip = 2;
+ int torso = 3;
+ int left_knee = 4;
+ int right_knee = 5;
+ int neck = 6;
+ int left_ankle = 7;
+ int right_ankle = 8;
+ int left_big_toe = 9;
+ int right_big_toe = 10;
+ int left_small_toe = 11;
+ int right_small_toe = 12;
+ int left_heel = 13;
+ int right_heel = 14;
+ int nose = 15;
+ int left_eye = 16;
+ int right_eye = 17;
+ int left_ear = 18;
+ int right_ear = 19;
+ int left_shoulder = 20;
+ int right_shoulder = 21;
+ int left_elbow = 22;
+ int right_elbow = 23;
+ int left_wrist = 24;
+ int right_wrist = 25;
+ int left_pinky_knuckle = 26;
+ int right_pinky_knuckle = 27;
+ int left_middle_tip = 28;
+ int right_middle_tip = 29;
+ int left_index_knuckle = 30;
+ int right_index_knuckle = 31;
+ int left_thumb_tip = 32;
+ int right_thumb_tip = 33;
+
+ for (int i = 0; i < body_ar_engine.output_bboxes.num_boxes; i++) {
+
+ keypoints = keypointsBatch8 + (i * 34);
+
+ for (endPt = (pt = (NvAR_Point2f*)keypoints) + numKeyPoints; pt < endPt; ++pt)
+ cv::circle(frm, cv::Point(lround(pt->x), lround(pt->y)), 4, cv::Scalar(180, 180, 180), -1);
+
+ if (output_bbox) {
+ cv::rectangle(frm, cv::Point(lround(output_bbox->boxes[i].x), lround(output_bbox->boxes[i].y)),
+ cv::Point(lround(output_bbox->boxes[i].x + output_bbox->boxes[i].width), lround(output_bbox->boxes[i].y + output_bbox->boxes[i].height)),
+ cv::Scalar(255, 0, 0), 2);
+ }
+
+
+ // center body
+ DrawKeyPointLine(frm, keypoints, pelvis, torso, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, torso, neck, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, neck, pelvis, kColorGreen);
+
+ // right side
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_knee, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_knee, right_hip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_hip, pelvis, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_hip, right_shoulder, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_shoulder, right_elbow, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_elbow, right_wrist, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_shoulder, neck, kColorRed);
+
+ // right side hand and feet
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_pinky_knuckle, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_middle_tip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_index_knuckle, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_wrist, right_thumb_tip, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_heel, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_ankle, right_big_toe, kColorRed);
+ DrawKeyPointLine(frm, keypoints, right_big_toe, right_small_toe, kColorRed);
+
+ //left side
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_knee, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_knee, left_hip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_hip, pelvis, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_hip, left_shoulder, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_shoulder, left_elbow, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_elbow, left_wrist, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_shoulder, neck, kColorBlue);
+
+ // left side hand and feet
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_pinky_knuckle, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_middle_tip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_index_knuckle, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_wrist, left_thumb_tip, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_heel, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_ankle, left_big_toe, kColorBlue);
+ DrawKeyPointLine(frm, keypoints, left_big_toe, left_small_toe, kColorBlue);
+
+ // head
+ DrawKeyPointLine(frm, keypoints, neck, nose, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, nose, right_eye, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, right_eye, right_ear, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, nose, left_eye, kColorGreen);
+ DrawKeyPointLine(frm, keypoints, left_eye, left_ear, kColorGreen);
+
+ }
+
+ if (FLAG_offlineMode) keyPointsOutputVideo.write(frm);
+}
DoApp::Err DoApp::acquireFrame() {
Err err = errNone;
@@ -682,8 +1015,20 @@ DoApp::Err DoApp::acquireBodyBox() {
printf("]\n");
}
if (FLAG_captureOutputs) {
- writeFrameAndEstResults(frame, body_ar_engine.output_bboxes);
- writeVideoAndEstResults(frame, body_ar_engine.output_bboxes);
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) {
+ writeFrameAndEstResults(frame, body_ar_engine.output_tracking_bboxes);
+ writeVideoAndEstResults(frame, body_ar_engine.output_tracking_bboxes);
+ }
+ else {
+ writeFrameAndEstResults(frame, body_ar_engine.output_bboxes);
+ writeVideoAndEstResults(frame, body_ar_engine.output_bboxes);
+ }
+#else
+ writeFrameAndEstResults(frame, body_ar_engine.output_bboxes);
+ writeVideoAndEstResults(frame, body_ar_engine.output_bboxes);
+#endif
+
}
if (0 == n) return errNoBody;
@@ -701,18 +1046,29 @@ DoApp::Err DoApp::acquireBodyBox() {
DoApp::Err DoApp::acquireBodyBoxAndKeyPoints() {
Err err = errNone;
int numKeyPoints = body_ar_engine.getNumKeyPoints();
- NvAR_Rect output_bbox;
- std::vector keypoints2D(numKeyPoints);
- std::vector keypoints3D(numKeyPoints);
- std::vector jointAngles(numKeyPoints);
+ NvAR_BBoxes output_bbox;
+ NvAR_TrackingBBoxes output_tracking_bbox;
+ std::vector keypoints2D(numKeyPoints * 8);
+ std::vector keypoints3D(numKeyPoints * 8);
+ std::vector jointAngles(numKeyPoints * 8);
+
#ifdef DEBUG_PERF_RUNTIME
auto start = std::chrono::high_resolution_clock::now();
#endif
+ unsigned n;
// get keypoints in original image resolution coordinate space
- unsigned n = body_ar_engine.acquireBodyBoxAndKeyPoints(frame, keypoints2D.data(), keypoints3D.data(),
- jointAngles.data(), output_bbox, 0);
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking)
+ n = body_ar_engine.acquireBodyBoxAndKeyPoints(frame, keypoints2D.data(), keypoints3D.data(),
+ jointAngles.data(), &output_tracking_bbox, 0);
+
+ else
+#endif
+ n = body_ar_engine.acquireBodyBoxAndKeyPoints(frame, keypoints2D.data(), keypoints3D.data(),
+ jointAngles.data(), &output_bbox, 0);
+
#ifdef DEBUG_PERF_RUNTIME
auto end = std::chrono::high_resolution_clock::now();
@@ -734,17 +1090,37 @@ DoApp::Err DoApp::acquireBodyBoxAndKeyPoints() {
printf("]\n");
}
if (FLAG_captureOutputs) {
- writeFrameAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
- writeVideoAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) {
+ writeFrameAndEstResults(frame, body_ar_engine.output_tracking_bboxes, keypoints2D.data());
+ writeVideoAndEstResults(frame, body_ar_engine.output_tracking_bboxes, keypoints2D.data());
+ }
+ else {
+ writeFrameAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
+ writeVideoAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
+ }
+#else
+ writeFrameAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
+ writeVideoAndEstResults(frame, body_ar_engine.output_bboxes, keypoints2D.data());
+#endif
+
}
if (0 == n) return errNoBody;
#ifdef VISUALIZE
if (drawVisualization) {
- DrawKeyPointsAndEdges(frame, keypoints2D.data(), numKeyPoints, &output_bbox);
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) DrawKeyPointsAndEdges(frame, keypoints2D.data(), numKeyPoints, &output_tracking_bbox);
+ else
+#endif
+ DrawKeyPointsAndEdges(frame, keypoints2D.data(), numKeyPoints, &output_bbox);
if (FLAG_offlineMode) {
- DrawBBoxes(frame, &output_bbox);
+#if NV_MULTI_OBJECT_TRACKER
+ if (FLAG_enablePeopleTracking) DrawBBoxes(frame, &output_tracking_bbox);
+ else
+#endif
+ DrawBBoxes(frame, &output_bbox);
}
}
#endif // VISUALIZE
@@ -920,6 +1296,8 @@ DoApp::Err DoApp::run() {
}
cv::imshow(windowTitle, frame);
}
+
+
if (!FLAG_offlineMode) {
int n = cv::waitKey(1);
@@ -991,7 +1369,9 @@ int main(int argc, char **argv) {
if (FLAG_useCudaGraph) printf("Enable capturing cuda graph = %d\n", FLAG_useCudaGraph);
app.body_ar_engine.useCudaGraph(FLAG_useCudaGraph);
-
+#if NV_MULTI_OBJECT_TRACKER
+ app.body_ar_engine.enablePeopleTracking(FLAG_enablePeopleTracking, FLAG_shadowTrackingAge, FLAG_probationAge, FLAG_maxTargetsTracked);
+#endif
doErr = DoApp::errBodyModelInit;
if (FLAG_modelPath.empty()) {
printf("WARNING: Model path not specified. Please set --model_path=/path/to/trt/and/body/models, "
diff --git a/samples/BodyTrack/BodyTrack.exe b/samples/BodyTrack/BodyTrack.exe
index 6257e1e..cf24b32 100644
Binary files a/samples/BodyTrack/BodyTrack.exe and b/samples/BodyTrack/BodyTrack.exe differ
diff --git a/samples/BodyTrack/CMakeLists.txt b/samples/BodyTrack/CMakeLists.txt
index a023f8c..8bb2db9 100644
--- a/samples/BodyTrack/CMakeLists.txt
+++ b/samples/BodyTrack/CMakeLists.txt
@@ -4,12 +4,14 @@ set(SOURCE_FILES BodyEngine.cpp
)
set(HEADER_FILES BodyEngine.h)
-set(SOURCE_FILES ${SOURCE_FILES}
- ../../nvar/src/nvARProxy.cpp
- ../../nvar/src/nvCVImageProxy.cpp)
+if(MSVC)
+ set(SOURCE_FILES ${SOURCE_FILES}
+ ../../nvar/src/nvARProxy.cpp
+ ../../nvar/src/nvCVImageProxy.cpp)
-set(HEADER_FILES ${HEADER_FILES}
- ../utils/RenderingUtils.h)
+ set(HEADER_FILES ${HEADER_FILES}
+ ../utils/RenderingUtils.h)
+endif(MSVC)
# Set Visual Studio source filters
source_group("Source Files" FILES ${SOURCE_FILES})
@@ -21,6 +23,7 @@ target_include_directories(BodyTrack PUBLIC
${SDK_INCLUDES_PATH}
)
+if(MSVC)
target_link_libraries(BodyTrack PUBLIC
opencv346
utils_sample
@@ -29,11 +32,21 @@ target_link_libraries(BodyTrack PUBLIC
set(ARSDK_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../../bin)
set(OPENCV_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../external/opencv/bin)
set(PATH_STR "PATH=%PATH%" ${OPENCV_PATH_STR})
-set(CMD_ARG_STR "")
-set_target_properties(BodyTrack PROPERTIES
- FOLDER SampleApps
- VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
- VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}"
+set(CMD_ARG_STR "--model_path=\"${CMAKE_CURRENT_SOURCE_DIR}/../../bin/models\"")
+ set_target_properties(BodyTrack PROPERTIES
+ FOLDER SampleApps
+ VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
+ VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}"
+ )
+elseif(UNIX)
+ find_package(PNG REQUIRED)
+ find_package(JPEG REQUIRED)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
+ target_link_libraries(BodyTrack PUBLIC
+ nvARPose
+ NVCVImage
+ OpenCV
+ utils_sample
)
-
+endif(MSVC)
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
index fa5fdf2..e22fe00 100644
--- a/samples/CMakeLists.txt
+++ b/samples/CMakeLists.txt
@@ -6,3 +6,5 @@ target_link_libraries(utils_sample INTERFACE GLM)
add_subdirectory(external)
add_subdirectory(FaceTrack)
add_subdirectory(BodyTrack)
+add_subdirectory(GazeRedirect)
+add_subdirectory(ExpressionApp)
diff --git a/samples/ExpressionApp/BackEndOpenGL/FaceIO.cpp b/samples/ExpressionApp/BackEndOpenGL/FaceIO.cpp
new file mode 100644
index 0000000..c97e577
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/FaceIO.cpp
@@ -0,0 +1,2378 @@
+/*###############################################################################
+#
+# Copyright 2019-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include "FaceIO.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifdef _MSC_VER
+ #define strcasecmp _stricmp
+ #define strncasecmp _strnicmp
+#endif /* _MSC_VER */
+
+#define BAIL_IF_ERR(err) do { if ((err) != 0) { goto bail; } } while(0)
+#define BAIL_IF_ZERO(x, err, code) do { if ((x) == 0) { err = code; goto bail; } } while(0)
+#define BAIL_IF_NONZERO(x, err, code) do { if ((x) != 0) { err = code; goto bail; } } while(0)
+#define BAIL_IF_FALSE(x, err, code) do { if (!(x)) { err = code; goto bail; } } while(0)
+#define BAIL_IF_TRUE(x, err, code) do { if ((x)) { err = code; goto bail; } } while(0)
+#define BAIL_IF_NULL(x, err, code) do { if ((void*)(x) == NULL) { err = code; goto bail; } } while(0)
+#define BAIL_IF_NEGATIVE(x, err, code) do { if ((x) < 0) { err = code; goto bail; } } while(0)
+#define BAIL_IF_NONPOSITIVE(x, err, code) do { if (!((x) > 0)) { err = code; goto bail; } } while(0)
+#define BAIL(err, code) do { err = code; goto bail; } while(0)
+
+#ifndef __BYTE_ORDER__ /* How bytes are packed into a 32 bit word */
+ #define __ORDER_LITTLE_ENDIAN__ 3210 /* First byte in the least significant position */
+ #define __ORDER_BIG_ENDIAN__ 0123 /* First byte in the most significant position */
+ #ifdef _MSC_VER
+ #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+ #endif /* _MSC_VER */
+#endif /* __BYTE_ORDER__ */
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define IOFOURCC(w, o, r, d) (((w) << 24) | ((o) << 16) | ((r) << 8) | ((d) << 0))
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define IOFOURCC(w, o, r, d) (((d) << 24) | ((r) << 16) | ((o) << 8) | ((w) << 0))
+#endif /* __BYTE_ORDER__ */
+
+/********************************************************************************
+ ********************************************************************************
+ ********************************************************************************
+ ***** Utilities *****
+ ********************************************************************************
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * HasSuffix
+ ********************************************************************************/
+
+static bool HasSuffix(const char *str, const char *suf) {
+ size_t strSize = strlen(str),
+ sufSize = strlen(suf);
+ if (strSize < sufSize)
+ return false;
+ return (0 == strcasecmp(suf, str + strSize - sufSize));
+}
+
+/********************************************************************************
+ * CopyDoubleToSingleVector
+ * This works in-place.
+ ********************************************************************************/
+
+static void CopyDoubleToSingleVector(const double *d, float *s, uint32_t n) {
+ for (float *send = s + n; s != send;) *s++ = (float)(*d++);
+}
+
+/********************************************************************************
+ * CopyUInt32to16Vector
+ * This works in-place.
+ ********************************************************************************/
+
+static void CopyUInt32to16Vector(const uint32_t *fr, uint16_t *to, uint32_t n) {
+ for (uint16_t *toEnd = to + n; to != toEnd;) *to++ = (uint16_t)(*fr++);
+}
+
+/********************************************************************************
+ * FaceIOErrorStringFromCode
+ ********************************************************************************/
+
+const char* FaceIOErrorStringFromCode(FaceIOErr err) {
+ struct ErrStr { FaceIOErr err; const char *str; };
+ static const ErrStr lut[] = {
+ { kIOErrNone, "no error" },
+ { kIOErrFileNotFound, "The file was not found" },
+ { kIOErrFileOpen, "The file could not be opened" },
+ { kIOErrEOF, "A premature end-of-file was encountered" },
+ { kIOErrRead, "An error occurred while reading a file" },
+ { kIOErrWrite, "An error occurred while writing a file" },
+ { kIOErrSyntax, "A parsing syntax error has been encountered" },
+ { kIOErrFormat, "The file has an unknown format" },
+ { kIOErrParameter, "The parameter has an invalid value" },
+ };
+ for (const ErrStr *p = lut; p != &lut[sizeof(lut)/sizeof(lut[0])]; ++p)
+ if (p->err == err)
+ return p->str;
+ static char msg[16];
+ snprintf(msg, sizeof(msg), "ERR#%d", (int)err);
+ return msg;
+}
+
+/********************************************************************************
+ * PrintIOError
+ ********************************************************************************/
+
+static void PrintIOError(const char *file, FaceIOErr err) {
+ if (kIOErrNone == err) return;
+ printf("\"%s\": %s\n", file, FaceIOErrorStringFromCode(err));
+}
+
+/********************************************************************************
+ * PrintUnknownFormatMessage
+ ********************************************************************************/
+
+static void PrintUnknownFormatMessage(const char *file, const char *msg = nullptr) {
+ if (!msg) msg = "";
+ printf("\"%s\": Unknown format %s\n", file, msg);
+}
+
+/********************************************************************************
+ * ReadFileIntoString
+ ********************************************************************************/
+
+static FaceIOErr ReadFileIntoString(const char *file, bool isText, std::string &str) {
+ FaceIOErr err = kIOErrNone;
+ FILE *fd = NULL;
+ long z;
+ size_t y;
+ #ifndef _MSC_VER
+ fd = fopen(file, (isText ? "r" : "rb"));
+ #else /* _MSC_VER */
+ BAIL_IF_NONZERO(fopen_s(&fd, file, (isText ? "r" : "rb")), err, kIOErrFileNotFound);
+ #endif /* _MSC_VER */
+ BAIL_IF_NULL(fd, err, kIOErrFileNotFound);
+ fseek(fd, 0L, SEEK_END);
+ z = ftell(fd);
+ BAIL_IF_NEGATIVE(z, err, kIOErrRead); /* If there was a seek error, return an appropriate error code */
+ fseek(fd, 0L, SEEK_SET);
+ str.resize((unsigned long)z);
+ y = fread(&str[0], 1, z, fd); /* y differs from z when reading text on Windows, .. */
+ str.resize(y); /* ... because CRLF --> NL */
+bail:
+ if (fd) fclose(fd);
+ return err;
+}
+
+typedef struct IbugSfmMapper {
+ unsigned numLandmarks;
+ uint16_t landmarkMap[50][2];
+ uint16_t rightContour[8];
+ uint16_t leftContour[8];
+}IbugSfmMapper;
+
+const IbugSfmMapper ibugMapping = {
+ 68,
+ // Landmarks Mapping for 68 points
+ {
+ // 1 to 8 are the right contour landmarks
+ {9, 33}, // chin bottom
+ // 10 to 17 are the left contour landmarks
+ {18, 225}, // right eyebrow outer-corner (18)
+ {19, 229}, // right eyebrow between middle and outer corner
+ {20, 233}, // right eyebrow middle, vertical middle (20)
+ {21, 2086}, // right eyebrow between middle and inner corner
+ {22, 157}, // right eyebrow inner-corner (19)
+ {23, 590}, // left eyebrow inner-corner (23)
+ {24, 2091}, // left eyebrow between inner corner and middle
+ {25, 666}, // left eyebrow middle (24)
+ {26, 662}, // left eyebrow between middle and outer corner
+ {27, 658}, // left eyebrow outer-corner (22)
+ {28, 2842}, // bridge of the nose (parallel to upper eye lids)
+ {29, 379}, // middle of the nose, a bit below the lower eye lids
+ {30, 272}, // above nose-tip (1cm or so)
+ {31, 114}, // nose-tip (3)
+ {32, 100}, // right nostril, below nose, nose-lip junction
+ {33, 2794}, // nose-lip junction
+ {34, 270}, // nose-lip junction (28)
+ {35, 2797}, // nose-lip junction
+ {36, 537}, // left nostril, below nose, nose-lip junction
+ {37, 177}, // right eye outer-corner (1)
+ {38, 172}, // right eye pupil top right (from subject's perspective)
+ {39, 191}, // right eye pupil top left
+ {40, 181}, // right eye inner-corner (5)
+ {41, 173}, // right eye pupil bottom left
+ {42, 174}, // right eye pupil bottom right
+ {43, 614}, // left eye inner-corner (8)
+ {44, 624}, // left eye pupil top right
+ {45, 605}, // left eye pupil top left
+ {46, 610}, // left eye outer-corner (2)
+ {47, 607}, // left eye pupil bottom left
+ {48, 606}, // left eye pupil bottom right
+ {49, 398}, // right mouth corner (12)
+ {50, 315}, // upper lip right top outer
+ {51, 413}, // upper lip middle top right
+ {52, 329}, // upper lip middle top (14)
+ {53, 825}, // upper lip middle top left
+ {54, 736}, // upper lip left top outer
+ {55, 812}, // left mouth corner (13)
+ {56, 841}, // lower lip left bottom outer
+ {57, 693}, // lower lip middle bottom left
+ {58, 411}, // lower lip middle bottom (17)
+ {59, 264}, // lower lip middle bottom right
+ {60, 431}, // lower lip right bottom outer
+ // 61 not defined - would be right inner corner of the mouth
+ {62, 416}, // upper lip right bottom outer
+ {63, 423}, // upper lip middle bottom
+ {64, 828}, // upper lip left bottom outer
+ // 65 not defined - would be left inner corner of the mouth
+ {66, 817}, // lower lip left top outer
+ {67, 442}, // lower lip middle top
+ {68, 404}, // lower lip right top outer
+ },
+ {1, 2, 3, 4, 5, 6, 7, 8}, // ibug right contour points
+ {10, 11, 12, 13, 14, 15, 16, 17} // ibug left contour points
+};
+
+/********************************************************************************
+ ********************************************************************************
+ * NVF Encapsulated object definitions
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Object encapsulation. The type is usually a FOURCC.
+ ********************************************************************************/
+
+struct EOTypeSize {
+ uint32_t type, size;
+};
+
+/********************************************************************************
+ * FOURCC types
+ ********************************************************************************/
+
+#define FOURCC_ADJACENT_FACES IOFOURCC('A', 'J', 'F', 'C')
+#define FOURCC_ADJACENT_VERTICES IOFOURCC('A', 'J', 'V', 'X')
+#define FOURCC_BASIS IOFOURCC('B', 'S', 'I', 'S')
+#define FOURCC_BLEND_SHAPES IOFOURCC('B', 'L', 'N', 'D')
+#define FOURCC_COLOR IOFOURCC('C', 'O', 'L', 'R')
+#define FOURCC_EIGENVALUES IOFOURCC('E', 'I', 'V', 'L')
+#define FOURCC_FILE_TYPE IOFOURCC('N', 'F', 'A', 'C')
+#define FOURCC_IBUG IOFOURCC('I', 'B', 'U', 'G')
+#define FOURCC_LANDMARK_MAP IOFOURCC('L', 'M', 'R', 'K')
+#define FOURCC_LEFT_CONTOUR IOFOURCC('L', 'C', 'T', 'R')
+#define FOURCC_MEAN IOFOURCC('M', 'E', 'A', 'N')
+#define FOURCC_MODEL IOFOURCC('M', 'O', 'D', 'L')
+#define FOURCC_MODEL_CONTOUR IOFOURCC('M', 'C', 'T', 'R')
+#define FOURCC_NAME IOFOURCC('N', 'A', 'M', 'E')
+#define FOURCC_RIGHT_CONTOUR IOFOURCC('R', 'C', 'T', 'R')
+#define FOURCC_SHAPE IOFOURCC('S', 'H', 'A', 'P')
+#define FOURCC_TEXTURE_COORDS IOFOURCC('T', 'X', 'C', 'O')
+#define FOURCC_EOTOC IOFOURCC('T', 'O', 'C', '0')
+#define FOURCC_TOPOLOGY IOFOURCC('T', 'O', 'P', 'O')
+#define FOURCC_TRIANGLE_LIST IOFOURCC('T', 'R', 'N', 'G')
+#define FOURCC_NVLM IOFOURCC('N', 'V', 'L', 'M')
+#define FOURCC_PARTITIONS IOFOURCC('P', 'R', 'T', 'S')
+#define FOURCC_PART IOFOURCC('P', 'A', 'R', 'T')
+#define FOURCC_MATERIAL IOFOURCC('M', 'T', 'R', 'L')
+
+
+/********************************************************************************
+ * NVFFileHeader
+ ********************************************************************************/
+
+struct NVFFileHeader {
+ enum { // Illustrate 2 bit packets packed into a byte by the specified scheme
+ LITTLE_ENDIAN_CODE = 0xE4, // = 11 10 01 00 // or 0x10 = 0001 0000 -- 4 bit packets
+ BIG_ENDIAN_CODE = 0x1B // = 00 01 10 11 // or 0x01 = 0000 0001
+ };
+ unsigned type;
+ unsigned size;
+ unsigned char endian, sizeBits, indexBits, zero;
+ unsigned tocLoc;
+
+ NVFFileHeader() {
+ type = FOURCC_FILE_TYPE;
+ size = sizeof(*this) - sizeof(type) - sizeof(size);
+ endian = LITTLE_ENDIAN_CODE;
+ sizeBits = 32; // 32 bits for the object size
+ indexBits = 16; // 16 bits for indices
+ zero = 0;
+ tocLoc = 0;
+ }
+ void clear() { memset(this, 0, sizeof(*this)); }
+ static unsigned EOTOCOffset() {
+ return (char *)(&((NVFFileHeader *)nullptr)->tocLoc) - (char *)(&((NVFFileHeader *)nullptr)->type);
+ }
+};
+namespace { // anonymous
+
+/********************************************************************************
+ ********************************************************************************
+ * EOWriter - Encapsulated Object Writer
+ ********************************************************************************
+ ********************************************************************************/
+
+class EOWriter
+{
+public:
+
+ EOWriter();
+ ~EOWriter();
+
+ /// Open a file for writing.
+ /// @param[in] fileName the name of the file to be written to.
+ /// @return kIOErrNone if the file was opened successfully.
+ /// @return kIOErrWrite if there were problems opening the file for writing.
+ FaceIOErr open(const char *fileName);
+
+ /// Close the file without writing the table of contents, and flush the output.
+ FaceIOErr close();
+
+ /// Write the table of contents and close the file.
+ /// @param[in] tocRefOffset the offset from the beginning of the file where the location of the TOC is stored.
+ /// @return kIOErrNone if the operation was completed successfully.
+ FaceIOErr writeTocAndClose(unsigned tocRefOffset);
+
+ /// Get the file descriptor.
+ /// @return the file descriptor, or NULL if there is no open file.
+ FILE* fileDescriptor();
+
+ /// Write the object encapsulation header.
+ /// @param[in] type a FOURCC 4-byte code.
+ /// @param[in] size a 4-byte size for the remainder of the object.
+ /// @param[in] tag a tag for the TOC. 0 makes no entry into the TOC.
+ /// Usually the outer objects are in the TOC. But one may also include
+ /// subobjects in the TOC, or no objects at all.
+ /// @return kIOErrNone if the operation was completed successfully.
+ FaceIOErr writeEncapsulationHeader(unsigned type, unsigned size, unsigned tag = 0);
+
+ /// Write an encapsulated opaque object.
+ /// @param[in] type a FOURCC 4-byte code.
+ /// @param[in] size the size in bytes of the data to be written.
+ /// @param[in] data the data to be written.
+ /// @param[in] tag a tag for the TOC. 0 makes no entry into the TOC.
+ /// Usually the outer objects are in the TOC. But one may also include
+ /// subobjects in the TOC, or no objects at all.
+ /// @return kIOErrNone if the operation was completed successfully.
+ FaceIOErr writeOpaqueObject(unsigned type, unsigned size, const void *data, unsigned tag = 0);
+ FaceIOErr writeOpaqueObject(unsigned type, size_t size, const void *data, unsigned tag = 0)
+ { return writeOpaqueObject(type, (unsigned)size, data, tag); }
+
+ /// Write opaque data
+ /// @param[in] size the size in bytes of the data to be written.
+ /// @param[in] data the data to be written.
+ /// @return kIOErrNone if the operation was completed successfully.
+ FaceIOErr writeData(unsigned size, const void *data);
+ FaceIOErr writeData(size_t size, const void *data) { return writeData((unsigned)size, data); }
+
+private:
+ struct Impl;
+ Impl *pimpl;
+};
+
+struct EOTOC {
+ unsigned _tag;
+ unsigned _offset;
+ EOTOC(unsigned tag, unsigned offset) {
+ _tag = tag;
+ _offset = offset;
+ }
+};
+
+struct EOEncapsulation {
+ unsigned _type, _size;
+ EOEncapsulation(unsigned type, unsigned size) {
+ _type = type;
+ _size = size;
+ }
+};
+
+struct EOWriter::Impl {
+ FILE *fd;
+ std::vector toc;
+
+ void addToEOTOC(unsigned tag) {
+ if (tag) toc.emplace_back(tag, (unsigned)ftell(fd));
+ }
+};
+
+EOWriter::EOWriter() {
+ pimpl = new EOWriter::Impl;
+ pimpl->fd = nullptr;
+}
+
+EOWriter::~EOWriter() {
+ if (pimpl->fd) fclose(pimpl->fd);
+ delete pimpl;
+}
+
+FILE *EOWriter::fileDescriptor() { return pimpl->fd; }
+
+FaceIOErr EOWriter::open(const char *fileName) {
+ pimpl->fd = nullptr;
+ #ifndef _MSC_VER
+ pimpl->fd = fopen(fileName, "wb");
+ #else /* _MSC_VER */
+ if (0 != fopen_s(&pimpl->fd, fileName, "wb"))
+ pimpl->fd = nullptr;
+ #endif /* _MSC_VER */
+ return pimpl->fd ? kIOErrNone : kIOErrWrite;
+}
+
+FaceIOErr EOWriter::close() {
+ fclose(pimpl->fd);
+ pimpl->fd = nullptr;
+ return kIOErrNone;
+}
+
+FaceIOErr EOWriter::writeEncapsulationHeader(unsigned type, unsigned size, unsigned tag) {
+ EOEncapsulation eo(type, size);
+ pimpl->addToEOTOC(tag);
+ size_t z = fwrite(&eo, sizeof(eo._type), sizeof(eo) / sizeof(eo._type), pimpl->fd);
+ return (z == (sizeof(eo) / sizeof(eo._type))) ? kIOErrNone : kIOErrWrite;
+}
+
+FaceIOErr EOWriter::writeData(unsigned size, const void *data) {
+ return (1 == fwrite(data, size, 1, pimpl->fd)) ? kIOErrNone : kIOErrWrite;
+}
+
+FaceIOErr EOWriter::writeOpaqueObject(unsigned type, unsigned size, const void *data, unsigned tag) {
+ FaceIOErr err = writeEncapsulationHeader(type, size, tag);
+ if (!err) err = writeData(size, data);
+ return err;
+}
+
+FaceIOErr EOWriter::writeTocAndClose(unsigned tocRefOffset) {
+ FaceIOErr err = kIOErrNone;
+ unsigned numRecords = (unsigned)pimpl->toc.size();
+ if (numRecords) {
+ unsigned offset = ftell(pimpl->fd),
+ size = sizeof(unsigned) + numRecords * sizeof(EOTOC);
+ err = writeEncapsulationHeader(FOURCC_EOTOC, size, 0);
+ if (kIOErrNone == err) {
+ unsigned recordSize = sizeof(EOTOC);
+ BAIL_IF_ERR(
+ err = (1 == fwrite(&recordSize, sizeof(recordSize), 1, pimpl->fd))
+ ? kIOErrNone
+ : kIOErrWrite);
+ BAIL_IF_ERR(err = (numRecords == fwrite(pimpl->toc.data(), sizeof(EOTOC),
+ numRecords, pimpl->fd))
+ ? kIOErrNone
+ : kIOErrWrite);
+ BAIL_IF_NONZERO(fseek(pimpl->fd, tocRefOffset, SEEK_SET), err,
+ kIOErrWrite);
+ BAIL_IF_ERR(err = (1 == fwrite(&offset, sizeof(offset), 1, pimpl->fd))
+ ? kIOErrNone
+ : kIOErrWrite);
+ }
+ }
+ fclose(pimpl->fd);
+ pimpl->fd = nullptr;
+bail:
+ return err;
+}
+
+/********************************************************************************
+ ********************************************************************************
+ * JSON reader and writer
+ ********************************************************************************
+ ********************************************************************************/
+
+enum JSONNodeType {
+ kJSONObject,
+ kJSONArray,
+ kJSONString,
+ kJSONNumber,
+ kJSONMember,
+ kJSONBoolean,
+ kJSONNull
+};
+
+struct JSONInfo {
+ void *userData;
+ JSONNodeType type;
+ const char *value;
+ double number;
+};
+
+typedef FaceIOErr (*JSONOpenNodeProc)(JSONInfo *info);
+typedef FaceIOErr (*JSONCloseNodeProc)(JSONInfo *info);
+
+class JSONReader {
+ public:
+ JSONReader(JSONOpenNodeProc openNode, JSONCloseNodeProc closeNode);
+ ~JSONReader() {}
+
+ /// Parse from a string.
+ /// @param[in] str the string to be parsed.
+ /// @param[in] len the length of the string to be parsed; if 0, it is
+ /// assumed to be NULL-terminated.
+ /// @param[in,out] state the parser state.
+ /// @return kIOErrNone if the operation was completed successfully.
+ /// @return kIOErrSyntax if a syntax error occurred during parsing.
+ /// @return kIOErrEOF if the file ended sooner than expected.
+ FaceIOErr parse(const char *str, size_t len, void *state);
+
+ /// Parse from a file.
+ /// @param[in] fileName the file to be parsed.
+ /// @param[in,out] state the parser state.
+ /// @return kIOErrNone if the operation was completed successfully.
+ /// @return kIOErrSyntax if a syntax error occurred during parsing.
+ /// @return kIOErrEOF if the file ended sooner than expected.
+ FaceIOErr parse(const char *fileName, void *state);
+
+ private:
+ JSONOpenNodeProc _openNode;
+ JSONCloseNodeProc _closeNode;
+ const char *_jsonStr;
+ size_t _jsonLen;
+ JSONInfo _infoBack;
+ void skipWhiteSpace();
+ FaceIOErr readNumber();
+ FaceIOErr readString();
+ FaceIOErr readValue();
+ FaceIOErr readArray();
+ FaceIOErr readObject();
+ FaceIOErr getString(std::string *str);
+};
+
+class JSONWriter {
+ public:
+ JSONWriter();
+ ~JSONWriter();
+ FaceIOErr open(const char *file);
+ FaceIOErr close();
+ void openObject(const char *tag = nullptr);
+ void closeObject();
+ void openArray(const char *tag = nullptr);
+ void closeArray();
+ void writeNumericArray(unsigned n, const float *v, unsigned maxRow = 0, const char *tag = nullptr);
+ void writeNumericArray(unsigned n, const double *v, unsigned maxRow = 0, const char *tag = nullptr);
+ void writeNumericArray(unsigned n, const int *v, unsigned maxRow = 0, const char *tag = nullptr);
+ void writeNumericArray(unsigned n, const unsigned short *v, unsigned maxRow = 0, const char *tag = nullptr);
+ void writeNumericArray(unsigned n, const unsigned char *v, unsigned maxRow = 0, const char *tag = nullptr);
+ void writeNumber(double number, const char *tag = nullptr);
+ void writeString(const char *str, const char *tag = nullptr);
+ void writeBool(bool value, const char *tag = nullptr);
+ void writeNull(const char *tag = nullptr);
+ private:
+ class Impl;
+ Impl *pimpl;
+};
+
+
+JSONReader::JSONReader(JSONOpenNodeProc openNode, JSONCloseNodeProc closeNode) {
+ _openNode = openNode;
+ _closeNode = closeNode;
+ _jsonStr = nullptr;
+ _jsonLen = 0;
+ _infoBack.userData = nullptr;
+ _infoBack.type = kJSONNull;
+ _infoBack.value = nullptr;
+ _infoBack.number = NAN;
+}
+
+void JSONReader::skipWhiteSpace() {
+ for (; _jsonLen && isspace(*_jsonStr); ++_jsonStr, --_jsonLen) {}
+}
+
+FaceIOErr JSONReader::getString(std::string *str) {
+ FaceIOErr err = kIOErrNone;
+ const char *s;
+ char c, quote;
+ size_t n;
+
+ str->clear();
+ BAIL_IF_FALSE(_jsonLen > 2, err, kIOErrEOF); /* not enough characters */
+ n = _jsonLen;
+ c = *(s = _jsonStr);
+ BAIL_IF_FALSE(('"' == c || '\'' == c), err, kIOErrSyntax); /* missing quotes */
+ quote = c;
+ for (--n, ++s; n--; s++)
+ if (quote == *s) /* TODO: check for escapes */
+ break;
+ n = s - _jsonStr + 1;
+ BAIL_IF_FALSE(n >= 2, err, kIOErrEOF); /* not enough characters */
+ str->assign(_jsonStr + 1, n - 2);
+ _jsonStr += n;
+ _jsonLen -= n;
+
+bail:
+ if (kIOErrSyntax == err) printf("Expecting a string, but found no quotes\n");
+ return err;
+}
+
+FaceIOErr JSONReader::readString() {
+ FaceIOErr err;
+ std::string str;
+
+ BAIL_IF_ERR(err = getString(&str));
+ _infoBack.type = kJSONString;
+ _infoBack.value = str.c_str();
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ err = (*_closeNode)(&_infoBack);
+
+bail:
+ _infoBack.value = nullptr; /* Make Coverity happy */
+ return err;
+}
+
+FaceIOErr JSONReader::readObject() {
+ FaceIOErr err;
+ std::string str;
+
+ _infoBack.type = kJSONObject; /* Open an object node */
+ _infoBack.value = nullptr;
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ BAIL_IF_FALSE('{' == _jsonStr[0], err, kIOErrSyntax); /* This is where we always start */
+ --_jsonLen;
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ ++_jsonStr;
+ skipWhiteSpace();
+
+ if ('}' != _jsonStr[0]) {
+ do {
+ skipWhiteSpace();
+ BAIL_IF_ERR(err = getString(&str)); /* Every member must have a name */
+ skipWhiteSpace();
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ BAIL_IF_FALSE(':' == _jsonStr[0], err, kIOErrSyntax); /* Followed by a colon */
+ --_jsonLen;
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ ++_jsonStr;
+ skipWhiteSpace();
+ _infoBack.type = kJSONMember; /* Start a new member */
+ _infoBack.value = str.c_str();
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ BAIL_IF_ERR(err = readValue()); /* Get the member */
+ _infoBack.type = kJSONMember; /* Close the member */
+ BAIL_IF_ERR(err = (*_closeNode)(&_infoBack));
+ skipWhiteSpace();
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ if ('}' == _jsonStr[0]) /* Look for ending */
+ break; /* Normal object exit */
+ BAIL_IF_FALSE(',' == _jsonStr[0], err, kIOErrNotValue); /* Comma-separated members */
+ --_jsonLen;
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ ++_jsonStr;
+ } while (1);
+ }
+
+ --_jsonLen; /* Always ('}' == _jsonStr[0]) here */
+ ++_jsonStr;
+ _infoBack.type = kJSONObject; /* Close the object node */
+ _infoBack.value = nullptr;
+ err = (*_closeNode)(&_infoBack);
+
+bail:
+ _infoBack.value = nullptr; /* Make Coverity happy */
+ return err;
+}
+
+FaceIOErr JSONReader::readArray() {
+ FaceIOErr err;
+
+ _infoBack.type = kJSONArray;
+ _infoBack.value = nullptr;
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ BAIL_IF_FALSE('[' == _jsonStr[0], err, kIOErrSyntax); /* We always enter in this state */
+ --_jsonLen;
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ ++_jsonStr;
+ skipWhiteSpace();
+
+ if (']' != _jsonStr[0]) {
+ do {
+ skipWhiteSpace();
+ BAIL_IF_ERR(err = readValue());
+ skipWhiteSpace();
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ if (']' == _jsonStr[0]) break;
+ BAIL_IF_FALSE(',' == _jsonStr[0], err, kIOErrEOF);
+ --_jsonLen;
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+ ++_jsonStr;
+ } while (1);
+ }
+
+ --_jsonLen; /* Always (']' == _jsonStr[0]) here */
+ ++_jsonStr;
+ _infoBack.type = kJSONArray;
+ _infoBack.value = nullptr;
+ err = (*_closeNode)(&_infoBack);
+
+bail:
+ return err;
+}
+
+FaceIOErr JSONReader::readNumber() {
+ FaceIOErr err = kIOErrNone;
+ char *endPtr;
+ size_t n;
+ std::string str;
+
+ _infoBack.number = strtod(_jsonStr, &endPtr); /* Parse the number */
+ n = endPtr - _jsonStr;
+ BAIL_IF_FALSE(n <= _jsonLen, err, kIOErrEOF);
+ BAIL_IF_ZERO(n, err, kIOErrSyntax);
+ str.assign(_jsonStr, n);
+ _jsonStr += n;
+ _infoBack.type = kJSONNumber; /* Open bracket */
+ _infoBack.value = str.c_str();
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ err = (*_closeNode)(&_infoBack); /* Close bracket */
+
+bail:
+ _infoBack.value = nullptr; /* Make Coverity happy */
+ return err;
+}
+
+FaceIOErr JSONReader::readValue() {
+ static const char kTrue[] = "true", kFalse[] = "false", kNull[] = "null";
+ FaceIOErr err = kIOErrNotValue;
+
+ skipWhiteSpace();
+ BAIL_IF_ZERO(_jsonLen, err, kIOErrEOF);
+
+ if ('{' == _jsonStr[0]) // Object
+ return readObject();
+
+ if ('[' == _jsonStr[0]) // Array
+ return readArray();
+
+ if ('"' == _jsonStr[0] || '\'' == _jsonStr[0]) // String
+ return readString();
+
+ if (('0' <= _jsonStr[0] && _jsonStr[0] <= '9') || // Number
+ '-' == _jsonStr[0] || '+' == _jsonStr[0] || '.' == _jsonStr[0])
+ return readNumber();
+
+ if (_jsonLen >= 4 && !strncasecmp(_jsonStr, kTrue, 4)) // Boolean true
+ { // TODO: look out for "true" token prefix
+ _jsonStr += 4;
+ _jsonLen -= 4;
+ _infoBack.type = kJSONBoolean;
+ _infoBack.value = kTrue;
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ return (*_closeNode)(&_infoBack);
+ }
+
+ if (_jsonLen >= 5 && !strncasecmp(_jsonStr, kFalse, 5)) // Boolean false
+ { // TODO: look out for "false" token prefix
+ _jsonStr += 5;
+ _jsonLen -= 5;
+ _infoBack.type = kJSONBoolean;
+ _infoBack.value = kFalse;
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ return (*_closeNode)(&_infoBack);
+ }
+
+ if (_jsonLen >= 4 && !strncasecmp(_jsonStr, kNull, 4)) // null
+ { // TODO: look out for " null" token prefix
+ _jsonStr += 4;
+ _jsonLen -= 4;
+ _infoBack.type = kJSONNull;
+ _infoBack.value = kNull;
+ BAIL_IF_ERR(err = (*_openNode)(&_infoBack));
+ return (*_closeNode)(&_infoBack);
+ }
+
+bail:
+ return err;
+}
+
+FaceIOErr JSONReader::parse(const char *str, size_t len, void *userState) {
+ FaceIOErr err;
+
+ _jsonStr = str;
+ _jsonLen = len ? len : strlen(str);
+ _infoBack.userData = userState;
+ err = readValue();
+ if (kIOErrNone != err) printf("Syntax error at \"%.32s...\"\n", _jsonStr);
+ return err;
+}
+
+FaceIOErr JSONReader::parse(const char *fileName, void *userState) {
+ std::string str;
+ FaceIOErr err = ReadFileIntoString(fileName, true, str);
+ if (kIOErrNone == err) err = parse(str.c_str(), str.size(), userState);
+ return err;
+}
+
+
+class JSONWriter::Impl {
+ public:
+ int level;
+ FILE *fd;
+ std::stack count;
+ void doIndent();
+ void maybeComma();
+ void maybeTag(const char *tag);
+ void commaIndentTag(const char *tag);
+ template void writeArray(unsigned n, const T *v, unsigned maxRow, const char *fmt, const char *tag);
+ void openObject(const char *tag);
+ void closeObject();
+ void openArray(const char *tag);
+ void closeArray();
+};
+
+void JSONWriter::Impl::doIndent() {
+ for (int i = level * 2; i-- > 0;) putc(' ', fd);
+}
+
+void JSONWriter::Impl::maybeComma() {
+ if (count.top())
+ putc(',', fd);
+ count.top()++;
+}
+
+void JSONWriter::Impl::maybeTag(const char *tag) {
+ if (!tag) return;
+ fprintf(fd, "\"%s\": ", tag);
+}
+
+void JSONWriter::Impl::commaIndentTag(const char *tag) {
+ maybeComma();
+ if (level) putc('\n', fd);
+ doIndent();
+ maybeTag(tag);
+}
+void JSONWriter::Impl::openObject(const char *tag)
+{
+ commaIndentTag(tag);
+ putc('{', fd);
+ ++(level);
+ count.push(0);
+}
+
+void JSONWriter::Impl::closeObject()
+{
+ --(level);
+ putc('\n', fd);
+ doIndent();
+ putc('}', fd);
+ count.pop();
+}
+
+void JSONWriter::Impl::openArray(const char *tag)
+{
+ commaIndentTag(tag);
+ putc('[', fd);
+ ++(level);
+ count.push(0);
+}
+
+void JSONWriter::Impl::closeArray()
+{
+ --(level);
+ putc('\n', fd);
+ doIndent();
+ putc(']', fd);
+ count.pop();
+}
+
+JSONWriter::JSONWriter() {
+ pimpl = new Impl();
+ pimpl->level = 0;
+ pimpl->fd = nullptr;
+ pimpl->count.push(0);
+}
+
+JSONWriter::~JSONWriter() {
+ if (pimpl->fd && pimpl->fd != stdout) fclose(pimpl->fd);
+ delete pimpl;
+}
+
+FaceIOErr JSONWriter::open(const char *file) {
+ if (pimpl->fd && pimpl->fd != stdout) (void)(this->close());
+ if (file && file[0]) {
+ #ifndef _MSC_VER
+ pimpl->fd = fopen(file, "w");
+ #else /* _MSC_VER */
+ if (0 != fopen_s(&pimpl->fd, file, "w"))
+ pimpl->fd = nullptr;
+ #endif /* _MSC_VER */
+ } else {
+ pimpl->fd = stdout;
+ }
+ return pimpl->fd ? kIOErrNone : kIOErrWrite;
+}
+
+FaceIOErr JSONWriter::close() {
+ putc('\n', pimpl->fd);
+ if (pimpl->fd && pimpl->fd != stdout) (void)fclose(pimpl->fd);
+ pimpl->fd = nullptr;
+ return kIOErrNone;
+}
+
+void JSONWriter::openObject(const char *tag) {
+ pimpl->openObject(tag);
+}
+
+void JSONWriter::closeObject() {
+ pimpl->closeObject();
+}
+
+void JSONWriter::openArray(const char *tag) {
+ pimpl->openArray(tag);
+}
+
+void JSONWriter::closeArray() {
+ pimpl->closeArray();
+}
+
+template
+void JSONWriter::Impl::writeArray(unsigned n, const T * v, unsigned maxRow, const char *fmt, const char * tag)
+{
+ openArray(tag);
+ putc('\n', fd);
+ if (n) {
+ for (; n > maxRow; n -= maxRow) {
+ doIndent();
+ for (unsigned i = maxRow; i--; ++v) {
+ fprintf(fd, fmt, *v, (i ? ' ' : '\n'));
+ putc(',', fd);
+ putc((i ? ' ' : '\n'), fd);
+ }
+ }
+ doIndent();
+ for (; n--; ++v) {
+ fprintf(fd, fmt, *v, (n ? ", " : ""));
+ if (n)
+ fprintf(fd, ", ");
+ }
+ }
+ closeArray();
+}
+
+
+void JSONWriter::writeNumericArray(unsigned n, const float *v, unsigned maxRow, const char *tag) {
+ pimpl->writeArray(n, v, maxRow, "%.8g", tag);
+}
+
+void JSONWriter::writeNumericArray(unsigned n, const double *v, unsigned maxRow, const char *tag) {
+ pimpl->writeArray(n, v, maxRow, "%.17g", tag);
+}
+
+void JSONWriter::writeNumericArray(unsigned n, const int *v, unsigned maxRow, const char *tag) {
+ pimpl->writeArray(n, v, maxRow, "%d", tag);
+}
+
+void JSONWriter::writeNumericArray(unsigned n, const unsigned short *v, unsigned maxRow, const char *tag) {
+ pimpl->writeArray(n, v, maxRow, "%u", tag);
+}
+
+void JSONWriter::writeNumericArray(unsigned n, const unsigned char *v, unsigned maxRow, const char *tag) {
+ pimpl->writeArray(n, v, maxRow, "%u", tag);
+}
+
+void JSONWriter::writeNumber(double number, const char *tag) {
+ pimpl->commaIndentTag(tag);
+ fprintf(pimpl->fd, "%.17g", number);
+}
+
+void JSONWriter::writeString(const char *str, const char *tag) {
+ pimpl->commaIndentTag(tag);
+ fprintf(pimpl->fd, "\"%s\"", str);
+}
+
+void JSONWriter::writeBool(bool value, const char *tag) {
+ pimpl->commaIndentTag(tag);
+ fprintf(pimpl->fd, "%s", value ? "true" : "false");
+}
+
+void JSONWriter::writeNull(const char *tag) {
+ pimpl->commaIndentTag(tag);
+ fprintf(pimpl->fd, "null");
+}
+
+
+} // namespace anonymous
+
+
+/*******************************************************************************
+********************************************************************************
+********************************************************************************
+***** NVF Output *****
+********************************************************************************
+********************************************************************************
+********************************************************************************/
+
+namespace /* anonymous */ {
+
+template
+uint32_t NVFSize(uint32_t n, const ElementType *v) {
+ return (uint32_t)(n * sizeof(*v));
+}
+
+template
+FaceIOErr NVFWriteOpaqueObject(uint32_t type, uint32_t size, const ElementType *v, EOWriter &wtr, unsigned tag = 0) {
+ return wtr.writeOpaqueObject(type, size * sizeof(*v), v, tag);
+}
+
+} // namespace
+
+static uint32_t NVFSizeShapeModel(const FaceIOAdapter *fac) {
+ uint32_t size = 8 + NVFSize(fac->getShapeMeanSize(), fac->getShapeMean())
+ + 8 + NVFSize(fac->getShapeModesSize(), fac->getShapeModes())
+ + sizeof(uint32_t)
+ + 8 + NVFSize(fac->getShapeEigenvaluesSize(), fac->getShapeEigenvalues())
+ + 8 + NVFSize(fac->getTriangleListSize(), fac->getTriangleList());
+ return size;
+}
+
+static FaceIOErr NVFWriteShapeModel(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ uint32_t numModes = fac->getShapeNumModes(), sizeNumMode = sizeof(uint32_t),
+ sizeMean = NVFSize(fac->getShapeMeanSize(), fac->getShapeMean()),
+ sizeModes = NVFSize(fac->getShapeModesSize(), fac->getShapeModes()),
+ sizeEigen = NVFSize(fac->getShapeEigenvaluesSize(), fac->getShapeEigenvalues()),
+ sizeTriList = NVFSize(fac->getTriangleListSize(), fac->getTriangleList()),
+ size = 8 + sizeMean + 8 + sizeModes + sizeNumMode + 8 + sizeEigen + 8 + sizeTriList;
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_SHAPE, size, 0));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_MEAN, sizeMean, fac->getShapeMean(), 0));
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_BASIS, sizeModes + sizeNumMode, 0));
+ BAIL_IF_ERR(err = wtr.writeData(sizeNumMode, &numModes));
+ BAIL_IF_ERR(err = wtr.writeData(sizeModes, fac->getShapeModes()));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_EIGENVALUES, sizeEigen, fac->getShapeEigenvalues(), 0));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_TRIANGLE_LIST, sizeTriList, fac->getTriangleList(), 0));
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeColorModel(const FaceIOAdapter *fac) {
+ uint32_t sizeMean = NVFSize(fac->getColorMeanSize(), fac->getColorMean()),
+ sizeModes = NVFSize(fac->getColorModesSize(), fac->getColorModes()),
+ sizeEigen = NVFSize(fac->getColorEigenvaluesSize(), fac->getColorEigenvalues()),
+ sizeTriList = 0, // NVFSize(fac->getTriangleListSize(), fac->getTriangleList());
+ size = sizeMean + sizeModes + sizeEigen + sizeTriList;
+ if (size) size += 8 + 8 + 8 + 8;
+ return size;
+}
+
+static FaceIOErr NVFWriteColorModel(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err = kIOErrNone;
+ uint32_t numModes = fac->getColorNumModes(), sizeNumMode = sizeof(uint32_t),
+ sizeMean = NVFSize(fac->getColorMeanSize(), fac->getColorMean()),
+ sizeModes = NVFSize(fac->getColorModesSize(), fac->getColorModes()),
+ sizeEigen = NVFSize(fac->getColorEigenvaluesSize(), fac->getColorEigenvalues()),
+ sizeTriList = 0, // NVFSize(fac->getTriangleListSize(), fac->getTriangleList());
+ size = 8 + sizeMean + 8 + sizeModes + sizeNumMode + 8 + sizeEigen + 8 + sizeTriList;
+ if (sizeMean + sizeModes + sizeEigen != 0) {
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_COLOR, size, 0));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_MEAN, sizeMean, fac->getColorMean(), 0));
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_BASIS, sizeModes + sizeNumMode, 0));
+ BAIL_IF_ERR(err = wtr.writeData(sizeNumMode, &numModes));
+ BAIL_IF_ERR(err = wtr.writeData(sizeModes, fac->getColorModes()));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_EIGENVALUES, sizeEigen, fac->getColorEigenvalues(), 0));
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_TRIANGLE_LIST, sizeTriList, fac->getTriangleList(), 0));
+ }
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeMorphableModel(const FaceIOAdapter *fac) {
+ uint32_t size = 8 + NVFSizeShapeModel(fac),
+ textureSize = NVFSize(fac->getTextureCoordinatesSize(), fac->getTextureCoordinates()),
+ colorSize = NVFSizeColorModel(fac);
+ if (textureSize) size += 8 + textureSize;
+ if (colorSize) size += 8 + colorSize;
+ return size;
+}
+
+static FaceIOErr NVFWriteMorphableModel(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ uint32_t size;
+
+ /* Write the shape model */
+ BAIL_IF_ERR(err = NVFWriteShapeModel(fac, wtr));
+
+ /* Write the color model */
+ BAIL_IF_ERR(err = NVFWriteColorModel(fac, wtr));
+
+ /* Write the texture coordinates */
+ if (0 != (size = NVFSize(fac->getTextureCoordinatesSize(), fac->getTextureCoordinates())))
+ BAIL_IF_ERR(err = wtr.writeOpaqueObject(FOURCC_TEXTURE_COORDS, size, fac->getTextureCoordinates()));
+
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeString(const char *str) {
+ return (strlen(str) + 3) & ~3; // bump up to multiple of 4
+}
+
+static FaceIOErr NVFWriteString(const char *str, EOWriter &wtr) {
+ uint32_t strSize = (uint32_t)strlen(str);
+ FaceIOErr err = wtr.writeData(strSize, str);
+ uint32_t pad = (uint32_t)(NVFSizeString(str) - strSize);
+ BAIL_IF_ERR(err);
+ if (pad) err = wtr.writeData(pad, "\0\0\0");
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeBlendShapes(const FaceIOAdapter *fac) {
+ uint32_t size = sizeof(uint32_t), // sizeof(numShapes)
+ numShapes = fac->getNumBlendShapes(), i;
+ for (i = 0; i < numShapes; ++i) {
+ size += 8 + NVFSizeString(fac->getBlendShapeName(i));
+ size += 8 + NVFSize(fac->getBlendShapeSize(i), fac->getBlendShape(i));
+ }
+ return size;
+}
+
+static FaceIOErr NVFWriteBlendShapes(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err = kIOErrNone;
+ uint32_t numShapes = fac->getNumBlendShapes();
+ uint32_t i;
+
+ BAIL_IF_ERR(err = wtr.writeData(sizeof(numShapes), &numShapes));
+ for (i = 0; i < numShapes; ++i) {
+ const char *str = fac->getBlendShapeName(i);
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_NAME, NVFSizeString(str)));
+ BAIL_IF_ERR(err = NVFWriteString(str, wtr));
+ BAIL_IF_ERR(err = NVFWriteOpaqueObject(FOURCC_SHAPE, fac->getBlendShapeSize(i), fac->getBlendShape(i), wtr));
+ }
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeIbugMappings(const FaceIOAdapter *fac) {
+ uint32_t size;
+ size = 8 + NVFSize(fac->getIbugLandmarkMappingsSize(), fac->getIbugLandmarkMappings())
+ + 8 + NVFSize(fac->getIbugRightContourSize(), fac->getIbugRightContour())
+ + 8 + NVFSize(fac->getIbugLeftContourSize(), fac->getIbugLeftContour());
+ return size;
+}
+
+static FaceIOErr NVFWriteIbugMappings(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ BAIL_IF_ERR(err = NVFWriteOpaqueObject(FOURCC_LANDMARK_MAP, fac->getIbugLandmarkMappingsSize(),
+ fac->getIbugLandmarkMappings(), wtr));
+ BAIL_IF_ERR(err = NVFWriteOpaqueObject(FOURCC_RIGHT_CONTOUR, fac->getIbugRightContourSize(),
+ fac->getIbugRightContour(), wtr));
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_LEFT_CONTOUR, fac->getIbugLeftContourSize(), fac->getIbugLeftContour(), wtr));
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeModelContours(const FaceIOAdapter *fac) {
+ uint32_t size = 8 + NVFSize(fac->getModelRightContourSize(), fac->getModelRightContour())
+ + 8 + NVFSize(fac->getModelLeftContourSize(), fac->getModelLeftContour());
+ return size;
+}
+
+static FaceIOErr NVFWriteModelContours(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ BAIL_IF_ERR(err = NVFWriteOpaqueObject(FOURCC_RIGHT_CONTOUR, fac->getModelRightContourSize(),
+ fac->getModelRightContour(), wtr));
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_LEFT_CONTOUR, fac->getModelLeftContourSize(), fac->getModelLeftContour(), wtr));
+bail:
+ return err;
+}
+
+static uint32_t NVFSizeTopology(const FaceIOAdapter *fac) {
+ uint32_t size = 8 + NVFSize(fac->getAdjacentFacesSize(), fac->getAdjacentFaces()) + 8 +
+ NVFSize(fac->getAdjacentVerticesSize(), fac->getAdjacentVertices());
+ return size;
+}
+
+static FaceIOErr NVFWriteTopology(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_ADJACENT_FACES, fac->getAdjacentFacesSize(), fac->getAdjacentFaces(), wtr));
+ BAIL_IF_ERR(err = NVFWriteOpaqueObject(FOURCC_ADJACENT_VERTICES, fac->getAdjacentVerticesSize(),
+ fac->getAdjacentVertices(), wtr));
+bail:
+ return err;
+}
+
+
+static uint32_t NVFSizeNVLM(const FaceIOAdapter *fac) {
+ uint32_t size = 8 + NVFSize(fac->getNvlmLandmarksSize(), fac->getNvlmLandmarks())
+ + 8 + NVFSize(fac->getNvlmRightContourSize(), fac->getNvlmRightContour())
+ + 8 + NVFSize(fac->getNvlmLeftContourSize(), fac->getNvlmLeftContour());
+ return size;
+}
+
+static FaceIOErr NVFWriteNVLM(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err;
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_LANDMARK_MAP, fac->getNvlmLandmarksSize(), fac->getNvlmLandmarks(), wtr));
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_RIGHT_CONTOUR, fac->getNvlmRightContourSize(), fac->getNvlmRightContour(), wtr));
+ BAIL_IF_ERR(
+ err = NVFWriteOpaqueObject(FOURCC_LEFT_CONTOUR, fac->getNvlmLeftContourSize(), fac->getNvlmLeftContour(), wtr));
+
+bail:
+ return err;
+}
+
+struct TPart {
+ uint32_t partitionIndex, faceIndex, numFaces, vertexIndex, numVertices;
+ int32_t smoothingGroup;
+};
+
+static uint32_t NVFSizePartitions(const FaceIOAdapter *fac) {
+ uint32_t numPartitions = fac->getNumPartitions(),
+ size = sizeof(numPartitions),
+ i, z;
+ for (i = 0; i < numPartitions; ++i) {
+ size += 8; // type, size
+ size += sizeof(TPart); // partitionIndex, faceIndex, numFaces, vertexIndex, numVertices, smoothingGroup
+ if (0 != (z = NVFSizeString(fac->getPartitionName(i)))) size += 8 + z;
+ if (0 != (z = NVFSizeString(fac->getPartitionMaterialName(i)))) size += 8 + z;
+ }
+ return size;
+}
+
+
+static FaceIOErr NVFWritePartitions(const FaceIOAdapter *fac, EOWriter &wtr) {
+ FaceIOErr err = kIOErrNone;
+ uint32_t numPartitions = fac->getNumPartitions(), i;
+ EOTypeSize ts;
+ TPart part;
+ const char *name, *mtrl;
+ uint32_t nameSize, mtrlSize;
+
+ ts.type = FOURCC_PART;
+ BAIL_IF_ERR(err = wtr.writeData(sizeof(numPartitions), &numPartitions));
+
+ for (i = 0; i < numPartitions; ++i) {
+ part.partitionIndex = fac->getPartition(
+ i, &part.faceIndex, &part.numFaces, &part.vertexIndex, &part.numVertices, &part.smoothingGroup);
+ nameSize = (nullptr != (name = fac->getPartitionName(i)) && name[0]) ? NVFSizeString(name) : 0;
+ mtrlSize = (nullptr != (mtrl = fac->getPartitionMaterialName(i)) && mtrl[0]) ? NVFSizeString(mtrl) : 0;
+ ts.size = sizeof(part);
+ if (nameSize) ts.size += 8 + nameSize;
+ if (mtrlSize) ts.size += 8 + mtrlSize;
+ BAIL_IF_ERR(err = wtr.writeData(sizeof(ts), &ts)); // {type,size}
+ BAIL_IF_ERR(err = wtr.writeData(sizeof(part), &part)); // unencapsulated 6 int16s
+ if (nameSize) {
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_NAME, nameSize));
+ BAIL_IF_ERR(err = NVFWriteString(name, wtr));
+ }
+ if (mtrlSize) {
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_MATERIAL, mtrlSize));
+ BAIL_IF_ERR(err = NVFWriteString(mtrl, wtr));
+ }
+ }
+
+bail:
+ return err;
+}
+
+
+/********************************************************************************
+ * API WriteNVFFaceModel API *
+ ********************************************************************************/
+
+FaceIOErr WriteNVFFaceModel(FaceIOAdapter *fac, const char *fileName) {
+ FaceIOErr err;
+ EOWriter wtr;
+ NVFFileHeader header;
+
+ err = wtr.open(fileName);
+ if (kIOErrNone != err) {
+ PrintIOError(fileName, err);
+ goto bail;
+ }
+
+ BAIL_IF_ERR(err = wtr.writeData(sizeof(header), &header));
+
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_MODEL, NVFSizeMorphableModel(fac), FOURCC_MODEL));
+ BAIL_IF_ERR(err = NVFWriteMorphableModel(fac, wtr));
+
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_IBUG, NVFSizeIbugMappings(fac), FOURCC_IBUG));
+ BAIL_IF_ERR(err = NVFWriteIbugMappings(fac, wtr));
+
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_BLEND_SHAPES, NVFSizeBlendShapes(fac), FOURCC_BLEND_SHAPES));
+ BAIL_IF_ERR(err = NVFWriteBlendShapes(fac, wtr));
+
+ BAIL_IF_ERR(err =
+ wtr.writeEncapsulationHeader(FOURCC_MODEL_CONTOUR, NVFSizeModelContours(fac), FOURCC_MODEL_CONTOUR));
+ BAIL_IF_ERR(err = NVFWriteModelContours(fac, wtr));
+
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_TOPOLOGY, NVFSizeTopology(fac), FOURCC_TOPOLOGY));
+ BAIL_IF_ERR(err = NVFWriteTopology(fac, wtr));
+
+ if (fac->getNvlmLandmarksSize()) {
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_NVLM, NVFSizeNVLM(fac), FOURCC_NVLM));
+ BAIL_IF_ERR(err = NVFWriteNVLM(fac, wtr));
+ }
+
+ if (fac->getNumPartitions()) {
+ BAIL_IF_ERR(err = wtr.writeEncapsulationHeader(FOURCC_PARTITIONS, NVFSizePartitions(fac), FOURCC_PARTITIONS));
+ BAIL_IF_ERR(err = NVFWritePartitions(fac, wtr));
+ }
+
+ BAIL_IF_ERR(err = wtr.writeTocAndClose(NVFFileHeader::EOTOCOffset()));
+
+bail:
+ return err;
+}
+
+/********************************************************************************
+ ********************************************************************************
+ ********************************************************************************
+ ***** NVF Input *****
+ ********************************************************************************
+ ********************************************************************************
+ ********************************************************************************/
+
+
+static size_t SafeRead(void *ptr, size_t size, size_t num, FILE *fd) {
+ if (ptr) return(fread(ptr, size, num, fd));
+ else return fseek(fd, (long)(size * num), SEEK_CUR) ? 0 : num;
+}
+
+static FaceIOErr NVFReadShapeModel(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n, numModes;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_MEAN:
+ n = ts.size / sizeof(*fac->getShapeMean());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getShapeMean(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_BASIS:
+ BAIL_IF_FALSE((1 == fread(&numModes, sizeof(numModes), 1, fd)), numModes, 0); // The number of modes
+ ts.size -= sizeof(numModes); // The byte size of all modes
+ n = ts.size / sizeof(*fac->getShapeModes()); // The elements size of all nodes
+ BAIL_IF_FALSE(1 == SafeRead(fac->getShapeModes(n / numModes, numModes), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_EIGENVALUES:
+ n = ts.size / sizeof(*fac->getShapeEigenvalues());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getShapeEigenvalues(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_TRIANGLE_LIST:
+ n = ts.size / sizeof(*fac->getTriangleList());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getTriangleList(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadColorModel(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n, numModes;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_MEAN:
+ n = ts.size / sizeof(*fac->getColorMean());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getColorMean(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_BASIS:
+ BAIL_IF_FALSE((1 == fread(&numModes, sizeof(numModes), 1, fd)), numModes, 0); // The number of modes
+ ts.size -= sizeof(numModes); // The byte size of all modes
+ n = ts.size / sizeof(*fac->getColorModes()); // The elements size of all nodes
+ BAIL_IF_FALSE(1 == SafeRead(fac->getColorModes(n / numModes, numModes), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_EIGENVALUES:
+ n = ts.size / sizeof(*fac->getColorEigenvalues());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getColorEigenvalues(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_TRIANGLE_LIST:
+ (void)fseek(fd, ts.size, SEEK_CUR); // Skip color triangle list -- it doesn't make sense
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadMorphableModel(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_SHAPE:
+ BAIL_IF_ERR(err = NVFReadShapeModel(fac, ts.size, fd));
+ break;
+ case FOURCC_COLOR:
+ BAIL_IF_ERR(err = NVFReadColorModel(fac, ts.size, fd));
+ break;
+ case FOURCC_TEXTURE_COORDS:
+ n = ts.size / sizeof(*fac->getTextureCoordinates());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getTextureCoordinates(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadString(std::string &str, uint32_t size, FILE *fd) {
+ str.resize(size);
+ FaceIOErr err = (size == fread(&str[0], 1, size, fd)) ? kIOErrNone : kIOErrRead;
+ BAIL_IF_ERR(err);
+ for (; size; --size)
+ if (str[size - 1]) break;
+ str.resize(size); // remove pad
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadBlendShapes(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ uint32_t numShapes;
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ std::string name;
+ float *shape;
+
+ BAIL_IF_FALSE(1 == fread(&numShapes, sizeof(numShapes), 1, fd), err, kIOErrEOF);
+ fac->setNumBlendShapes(numShapes);
+
+ for (uint32_t idxShape = 0, idxName = 0; size >= sizeof(ts);) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_NAME:
+ BAIL_IF_FALSE(idxName < numShapes, err, kIOErrRead);
+ BAIL_IF_ERR(err = NVFReadString(name, ts.size, fd));
+ fac->setBlendShapeName(idxName++, name.c_str());
+ break;
+ case FOURCC_SHAPE:
+ BAIL_IF_FALSE(idxShape < numShapes, err, kIOErrRead);
+ shape = fac->getBlendShape(idxShape++, ts.size / sizeof(*shape));
+ BAIL_IF_FALSE(1 == SafeRead(shape, ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadIbugMappings(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_LANDMARK_MAP:
+ n = ts.size / sizeof(*fac->getIbugLandmarkMappings());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getIbugLandmarkMappings(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_RIGHT_CONTOUR:
+ n = ts.size / sizeof(*fac->getIbugRightContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getIbugRightContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_LEFT_CONTOUR:
+ n = ts.size / sizeof(*fac->getIbugLeftContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getIbugLeftContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadModelContours(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ ;
+ EOTypeSize ts;
+ uint32_t n;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_RIGHT_CONTOUR:
+ n = ts.size / sizeof(*fac->getModelRightContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getModelRightContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_LEFT_CONTOUR:
+ n = ts.size / sizeof(*fac->getModelLeftContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getModelLeftContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadTopology(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_ADJACENT_FACES:
+ n = ts.size / sizeof(*fac->getAdjacentFaces());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getAdjacentFaces(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_ADJACENT_VERTICES:
+ n = ts.size / sizeof(*fac->getAdjacentVertices());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getAdjacentVertices(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadNvlm(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t n;
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_LANDMARK_MAP:
+ n = ts.size / sizeof(*fac->getNvlmLandmarks());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getNvlmLandmarks(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_RIGHT_CONTOUR:
+ n = ts.size / sizeof(*fac->getIbugRightContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getNvlmRightContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ case FOURCC_LEFT_CONTOUR:
+ n = ts.size / sizeof(*fac->getIbugLeftContour());
+ BAIL_IF_FALSE(1 == SafeRead(fac->getNvlmLeftContour(n), ts.size, 1, fd), err, kIOErrRead);
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadPart(uint32_t i, FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ TPart part;
+ std::string name;
+
+ BAIL_IF_FALSE(sizeof(part) <= size, err, kIOErrEOF);
+ BAIL_IF_FALSE(1 == fread(&part, sizeof(part), 1, fd), err, kIOErrEOF);
+ size -= sizeof(part);
+ fac->setPartition(i, part.faceIndex, part.numFaces, part.vertexIndex, part.numVertices, part.smoothingGroup);
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_NAME:
+ BAIL_IF_ERR(err = NVFReadString(name, ts.size, fd));
+ fac->setPartitionName(i, name.c_str());
+ break;
+ case FOURCC_MATERIAL:
+ BAIL_IF_ERR(err = NVFReadString(name, ts.size, fd));
+ fac->setPartitionMaterialName(i, name.c_str());
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+ BAIL_IF_NONZERO(size, err, kIOErrRead); // We are out-of-sync if size != 0
+bail:
+ return err;
+}
+
+static FaceIOErr NVFReadPartitions(FaceIOAdapter *fac, uint32_t size, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ EOTypeSize ts;
+ uint32_t numPartitions, partIx = 0;
+
+ BAIL_IF_FALSE(sizeof(numPartitions) <= size, err, kIOErrEOF);
+ BAIL_IF_FALSE(1 == fread(&numPartitions, sizeof(numPartitions), 1, fd), err, kIOErrEOF);
+ size -= sizeof(numPartitions);
+ fac->setNumPartitions(numPartitions);
+
+ for (; size >= sizeof(ts); ++partIx) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_PART:
+ BAIL_IF_ERR(err = NVFReadPart(partIx, fac, ts.size, fd));
+ break;
+ default:
+ BAIL_IF_NONZERO(fseek(fd, ts.size, SEEK_CUR), err, kIOErrEOF); // We skip over objects we don't understand
+ break;
+ }
+ }
+ BAIL_IF_NONZERO(size, err, kIOErrRead); // We are out-of-sync if size != 0
+bail:
+ return err;
+}
+
+/********************************************************************************
+ * API ReadNVFFaceModel API *
+ ********************************************************************************/
+
+FaceIOErr ReadNVFFaceModel(const char *fileName, FaceIOAdapter *fac) {
+ FaceIOErr err = kIOErrNone;
+ FILE *fd = nullptr;
+ EOTypeSize ts;
+ NVFFileHeader header;
+ uint32_t size;
+
+#ifndef _MSC_VER
+ fd = fopen(fileName, "rb");
+#else /* _MSC_VER */
+ if (0 != fopen_s(&fd, fileName, "rb"))
+ fd = nullptr;
+#endif /* _MSC_VER */
+ if (!fd) {
+ PrintIOError(fileName, err = kIOErrFileOpen);
+ goto bail;
+ }
+
+ /* Find the length of the file */
+ BAIL_IF_NEGATIVE(fseek(fd, 0L, SEEK_END), err, kIOErrRead);
+ size = ftell(fd);
+ BAIL_IF_NEGATIVE(fseek(fd, 0L, SEEK_SET), err, kIOErrRead);
+ BAIL_IF_FALSE(size >= sizeof(header), err, kIOErrEOF);
+
+ /* Validate header */
+ BAIL_IF_FALSE(1 == fread(&header, sizeof(header), 1, fd), err, kIOErrRead);
+ BAIL_IF_FALSE(FOURCC_FILE_TYPE == header.type, err, kIOErrFormat);
+ BAIL_IF_FALSE(8 <= header.size, err, kIOErrFormat); // TODO: robustify
+ BAIL_IF_FALSE(NVFFileHeader::LITTLE_ENDIAN_CODE == header.endian, err, kIOErrFormat); // We only handle little-endian
+ BAIL_IF_FALSE(32 == header.sizeBits, err, kIOErrFormat); // We only use 32 bit sizes
+ BAIL_IF_FALSE(16 == header.indexBits, err, kIOErrFormat); // We only use 16 bit indices
+ size -= sizeof(header);
+ if (header.size > 8) { /* Forward compatibility */
+ uint32_t extra = header.size - 8;
+ BAIL_IF_FALSE(size >= extra, err, kIOErrEOF);
+ size -= extra;
+ BAIL_IF_NEGATIVE(fseek(fd, extra, SEEK_CUR), err, kIOErrRead);
+ }
+
+ while (size >= sizeof(ts)) {
+ BAIL_IF_FALSE(1 == fread(&ts, sizeof(ts), 1, fd), err, kIOErrEOF);
+ size -= sizeof(ts);
+ BAIL_IF_FALSE(ts.size <= size, err, kIOErrEOF);
+ size -= ts.size;
+ switch (ts.type) {
+ case FOURCC_MODEL:
+ BAIL_IF_ERR(err = NVFReadMorphableModel(fac, ts.size, fd));
+ break;
+ case FOURCC_IBUG:
+ BAIL_IF_ERR(err = NVFReadIbugMappings(fac, ts.size, fd));
+ break;
+ case FOURCC_BLEND_SHAPES:
+ BAIL_IF_ERR(err = NVFReadBlendShapes(fac, ts.size, fd));
+ break;
+ case FOURCC_MODEL_CONTOUR:
+ BAIL_IF_ERR(err = NVFReadModelContours(fac, ts.size, fd));
+ break;
+ case FOURCC_TOPOLOGY:
+ BAIL_IF_ERR(err = NVFReadTopology(fac, ts.size, fd));
+ break;
+ case FOURCC_NVLM:
+ BAIL_IF_ERR(err = NVFReadNvlm(fac, ts.size, fd));
+ break;
+ case FOURCC_PARTITIONS:
+ BAIL_IF_ERR(err = NVFReadPartitions(fac, ts.size, fd));
+ break;
+ case FOURCC_EOTOC:
+ BAIL_IF_NEGATIVE(fseek(fd, ts.size, SEEK_CUR), err, kIOErrRead); // Skip the EOTOC
+ break;
+ default:
+ BAIL(err, kIOErrSyntax); // TODO: We should just skip over objects we don't understand
+ }
+ }
+
+bail:
+ if (fd) fclose(fd);
+ return err;
+}
+
+/********************************************************************************
+ ********************************************************************************
+ ********************************************************************************
+ ***** EOS Input *****
+ ********************************************************************************
+ ********************************************************************************
+ ********************************************************************************/
+
+#define DEBUG_PARSER 0
+
+static FaceIOErr EOSReadModesSize(uint32_t *oneModeSize, uint32_t *numModes, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ BAIL_IF_FALSE(1 == fread(oneModeSize, sizeof(*oneModeSize), 1, fd), err, kIOErrRead);
+ BAIL_IF_FALSE(1 == fread(numModes, sizeof(*numModes), 1, fd), err, kIOErrRead);
+bail:
+ return err;
+}
+
+static FaceIOErr EOSReadShapeModel(FaceIOAdapter *fac, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ uint32_t modeSize, numModes;
+ uint16_t *tri;
+ float *data;
+ long long z;
+
+ if (DEBUG_PARSER) printf("Reading Shape Model @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ if (DEBUG_PARSER) printf("Reading mean @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd)); // Coverity thinks "read" means "taint"
+ if (0 != (modeSize *= numModes)) {
+ BAIL_IF_NULL(data = fac->getShapeMean(modeSize), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Reading pca_basis @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ if (0 != (modeSize * numModes)) {
+ BAIL_IF_NULL(data = fac->getShapeModes(modeSize, numModes), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * numModes * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Reading eigenvalues @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ if (modeSize *= numModes) {
+ BAIL_IF_NULL(data = fac->getShapeEigenvalues(modeSize), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Reading triangle_list(%lld) @ %ld (%#lx)\n", z, ftell(fd), ftell(fd));
+ BAIL_IF_FALSE(1 == fread(&z, sizeof(z), 1, fd), err, kIOErrRead);
+ z *= 3;
+ tri = fac->getTriangleList((uint32_t)z * 2); // Big enough for uint32_t now, we resize to uint16_t later
+ BAIL_IF_FALSE(z == (long long)fread(tri, sizeof(uint32_t), z, fd), err, kIOErrRead);
+ CopyUInt32to16Vector((uint32_t *)tri, tri, (uint32_t)z);
+ fac->setTriangleListSize((uint32_t)z); // Now it is the actual size
+
+bail:
+ if (err) printf("Error reading Shape Model\n");
+ return err;
+}
+
+static FaceIOErr EOSReadColorModel(FaceIOAdapter *fac, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ uint32_t modeSize, numModes;
+ float *data;
+ long long z;
+
+ if (DEBUG_PARSER) printf("Reading Color Model @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ if (DEBUG_PARSER) printf("Reading mean @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ if (modeSize *= numModes) {
+ BAIL_IF_NULL(data = fac->getColorMean(modeSize), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Reading pca_basis @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ if (modeSize * numModes) {
+ BAIL_IF_NULL(data = fac->getColorModes(modeSize, numModes), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * numModes * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Reading eigenvalues @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ if (modeSize *= numModes) {
+ BAIL_IF_NULL(data = fac->getColorEigenvalues(modeSize), err, kIOErrNullPointer);
+ BAIL_IF_FALSE(1 == fread(data, modeSize * sizeof(*data), 1, fd), err, kIOErrRead);
+ }
+
+ if (DEBUG_PARSER) printf("Skipping triangle_list(%lld) @ %ld (%#lx)\n", z, ftell(fd), ftell(fd));
+ BAIL_IF_FALSE(1 == fread(&z, sizeof(z), 1, fd), err, kIOErrRead);
+ z *= 3 * sizeof(uint32_t);
+ (void)fseek(fd, (long)z, SEEK_CUR);
+
+bail:
+ if (err) printf("Error reading Color Model\n");
+ return err;
+}
+
+static FaceIOErr EOSReadMorphableModel(FaceIOAdapter *fac, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ int version;
+ size_t n1;
+ long long z;
+ float *tex;
+
+ if (DEBUG_PARSER) printf("Reading TmorphableModel @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ n1 = fread(&version, sizeof(version), 1, fd);
+ BAIL_IF_FALSE(n1 == 1, err, kIOErrRead);
+ if (DEBUG_PARSER) printf("Version(%d)\n", version);
+ if (DEBUG_PARSER) printf("Reading shape_model @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadShapeModel(fac, fd));
+ if (DEBUG_PARSER) printf("Reading color_model @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_ERR(err = EOSReadColorModel(fac, fd));
+ BAIL_IF_FALSE(1 == fread(&z, sizeof(z), 1, fd), err, kIOErrRead);
+ z *= 2; /* 2 floats per texture coordinate */
+ tex = fac->getTextureCoordinates((uint32_t)z * 2); /* allocate enough space for double precision */
+ if (DEBUG_PARSER) printf("Reading texture_coordinates(%lld) @ %ld (%#lx)\n", z, ftell(fd), ftell(fd));
+ BAIL_IF_FALSE(1 == fread(tex, sizeof(double) * z, 1, fd), err, kIOErrRead);
+ CopyDoubleToSingleVector((double *)tex, tex, (uint32_t)z);
+ fac->setTextureCoordinatesSize((uint32_t)z); /* resize for single precision */
+bail:
+ if (err) printf("Error reading TmorphableModel\n");
+ return err;
+}
+
+static FaceIOErr EOSReadString(std::string &str, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ long long z;
+ if (DEBUG_PARSER) printf("Reading string @ %ld (%#lx)\n", ftell(fd), ftell(fd));
+ BAIL_IF_FALSE(1 == fread(&z, sizeof(z), 1, fd), err, kIOErrRead);
+ str.resize(z);
+ BAIL_IF_FALSE(z == (long long)fread(&str[0], sizeof(str[0]), z, fd), err, kIOErrRead);
+bail:
+ if (err) printf("Error reading EOS string\n");
+ return err;
+}
+
+static FaceIOErr EOSReadBlendShapes(FaceIOAdapter *fac, FILE *fd) {
+ FaceIOErr err = kIOErrNone;
+ size_t numShapes;
+ uint32_t i, modeSize, numModes;
+ float *modes;
+ long long z;
+ std::string name;
+
+ BAIL_IF_FALSE(1 == fread(&z, sizeof(z), 1, fd), err, kIOErrRead);
+ numShapes = z;
+ fac->setNumBlendShapes((uint32_t)numShapes);
+ for (i = 0; i < (uint32_t)numShapes; ++i) {
+ BAIL_IF_ERR(err = EOSReadString(name, fd));
+ fac->setBlendShapeName(i, name.c_str());
+ BAIL_IF_ERR(err = EOSReadModesSize(&modeSize, &numModes, fd));
+ modes = fac->getBlendShape(i, modeSize *= numModes);
+ BAIL_IF_FALSE(1 == fread(modes, modeSize * sizeof(*modes), 1, fd), err, kIOErrRead);
+ }
+
+bail:
+ if (err) printf("Error reading TblendShapes\n");
+ return err;
+}
+
+struct EOSContoursReaderState {
+ enum {
+ STATE_NULL,
+ STATE_MODEL_CONTOUR,
+ STATE_RIGHT_CONTOUR,
+ STATE_LEFT_CONTOUR,
+ STATE_RIGHT_CONTOUR_ARRAY,
+ STATE_LEFT_CONTOUR_ARRAY,
+ STATE_ERROR
+ };
+ int state, nest;
+ FaceIOAdapter *fac;
+ EOSContoursReaderState() {
+ state = STATE_NULL;
+ nest = 0;
+ fac = nullptr;
+ };
+};
+
+static FaceIOErr EOSContoursOpenNode(JSONInfo *info) {
+ EOSContoursReaderState *st = (EOSContoursReaderState *)(info->userData);
+ switch (info->type) {
+ case kJSONObject:
+ ++(st->nest);
+ break;
+ case kJSONArray:
+ ++(st->nest);
+ switch (st->state) {
+ case EOSContoursReaderState::STATE_RIGHT_CONTOUR:
+ st->state = EOSContoursReaderState::STATE_RIGHT_CONTOUR_ARRAY;
+ break;
+ case EOSContoursReaderState::STATE_LEFT_CONTOUR:
+ st->state = EOSContoursReaderState::STATE_LEFT_CONTOUR_ARRAY;
+ break;
+ default:
+ st->state = EOSContoursReaderState::STATE_ERROR;
+ break;
+ }
+ break;
+ case kJSONNumber:
+ switch (st->state) {
+ case EOSContoursReaderState::STATE_RIGHT_CONTOUR_ARRAY:
+ st->fac->appendModelRightContour((uint16_t)info->number);
+ break;
+ case EOSContoursReaderState::STATE_LEFT_CONTOUR_ARRAY:
+ st->fac->appendModelLeftContour((uint16_t)info->number);
+ break;
+ default:
+ st->state = EOSContoursReaderState::STATE_ERROR;
+ break;
+ }
+ break;
+ case kJSONMember:
+ if (!strcmp(info->value, "model_contour"))
+ st->state = EOSContoursReaderState::STATE_MODEL_CONTOUR;
+ else if (!strcmp(info->value, "right_contour"))
+ st->state = EOSContoursReaderState::STATE_RIGHT_CONTOUR;
+ else if (!strcmp(info->value, "left_contour"))
+ st->state = EOSContoursReaderState::STATE_LEFT_CONTOUR;
+ else
+ st->state = EOSContoursReaderState::STATE_ERROR;
+ break;
+ }
+ return (EOSContoursReaderState::STATE_ERROR == st->state) ? kIOErrSyntax : kIOErrNone;
+}
+
+static FaceIOErr EOSContoursCloseNode(JSONInfo *info) {
+ EOSContoursReaderState *st = (EOSContoursReaderState *)(info->userData);
+ switch (info->type) {
+ case kJSONObject:
+ --(st->nest);
+ break;
+ case kJSONArray:
+ --(st->nest);
+ st->state = EOSContoursReaderState::STATE_NULL;
+ break;
+ }
+ return kIOErrNone;
+}
+
+static FaceIOErr EOSReadContours(FaceIOAdapter *fac, const char *fileName) {
+ JSONReader rdr(&EOSContoursOpenNode, &EOSContoursCloseNode);
+ EOSContoursReaderState st;
+ FaceIOErr err;
+
+ fac->setModelRightContourSize(0);
+ fac->setModelLeftContourSize(0);
+ st.fac = fac;
+ err = rdr.parse(fileName, &st);
+ return err;
+}
+
+struct EOSTopologyReaderState {
+ enum {
+ STATE_NULL,
+ STATE_EDGES,
+ STATE_FACES,
+ STATE_VERTICES,
+ STATE_FACES_ARRAY,
+ STATE_VERTICES_ARRAY,
+ STATE_FACES_ARRAY_OBJ,
+ STATE_VERTICES_ARRAY_OBJ,
+ STATE_ERROR
+ };
+ int state, nest;
+ FaceIOAdapter *fac;
+ EOSTopologyReaderState() {
+ state = STATE_NULL;
+ nest = 0;
+ fac = nullptr;
+ };
+};
+
+static FaceIOErr EOSTopologyOpenNode(JSONInfo *info) {
+ EOSTopologyReaderState *st = (EOSTopologyReaderState *)(info->userData);
+ switch (info->type) {
+ case kJSONObject:
+ ++(st->nest);
+ switch (st->state) {
+ case EOSTopologyReaderState::STATE_FACES_ARRAY:
+ st->state = EOSTopologyReaderState::STATE_FACES_ARRAY_OBJ;
+ break;
+ case EOSTopologyReaderState::STATE_VERTICES_ARRAY:
+ st->state = EOSTopologyReaderState::STATE_VERTICES_ARRAY_OBJ;
+ break;
+ default:
+ break;
+ }
+ break;
+ case kJSONArray:
+ ++(st->nest);
+ switch (st->state) {
+ case EOSTopologyReaderState::STATE_FACES:
+ st->state = EOSTopologyReaderState::STATE_FACES_ARRAY;
+ break;
+ case EOSTopologyReaderState::STATE_VERTICES:
+ st->state = EOSTopologyReaderState::STATE_VERTICES_ARRAY;
+ break;
+ default:
+ st->state = EOSTopologyReaderState::STATE_ERROR;
+ break;
+ }
+ break;
+ case kJSONNumber:
+ switch (st->state) {
+ case EOSTopologyReaderState::STATE_FACES_ARRAY:
+ case EOSTopologyReaderState::STATE_FACES_ARRAY_OBJ:
+ st->fac->appendAdjacentFace((uint16_t)info->number);
+ break;
+ case EOSTopologyReaderState::STATE_VERTICES_ARRAY:
+ case EOSTopologyReaderState::STATE_VERTICES_ARRAY_OBJ:
+ st->fac->appendAdjacentVertex((uint16_t)info->number);
+ break;
+ default:
+ st->state = EOSTopologyReaderState::STATE_ERROR;
+ break;
+ }
+ break;
+ case kJSONMember:
+ if (!strcmp(info->value, "edge_topology"))
+ st->state = EOSTopologyReaderState::STATE_EDGES;
+ else if (!strcmp(info->value, "adjacent_faces"))
+ st->state = EOSTopologyReaderState::STATE_FACES;
+ else if (!strcmp(info->value, "adjacent_vertices"))
+ st->state = EOSTopologyReaderState::STATE_VERTICES;
+ else if ((!strcmp(info->value, "value0") || !strcmp(info->value, "value1")) &&
+ (EOSTopologyReaderState::STATE_FACES_ARRAY_OBJ == st->state ||
+ EOSTopologyReaderState::STATE_VERTICES_ARRAY_OBJ == st->state)) {
+ } else
+ st->state = EOSTopologyReaderState::STATE_ERROR;
+ break;
+ }
+ return (EOSTopologyReaderState::STATE_ERROR == st->state) ? kIOErrSyntax : kIOErrNone;
+}
+
+static FaceIOErr EOSTopologyCloseNode(JSONInfo *info) {
+ EOSTopologyReaderState *st = (EOSTopologyReaderState *)(info->userData);
+ switch (info->type) {
+ case kJSONObject:
+ --(st->nest);
+ switch (st->state) {
+ case EOSTopologyReaderState::STATE_FACES_ARRAY_OBJ:
+ st->state = EOSTopologyReaderState::STATE_FACES_ARRAY;
+ break;
+ case EOSTopologyReaderState::STATE_VERTICES_ARRAY_OBJ:
+ st->state = EOSTopologyReaderState::STATE_VERTICES_ARRAY;
+ break;
+ default:
+ st->state = EOSTopologyReaderState::STATE_ERROR;
+ break;
+ }
+ break;
+ case kJSONArray:
+ --(st->nest);
+ st->state = EOSTopologyReaderState::STATE_NULL;
+ break;
+ }
+ return kIOErrNone;
+}
+
+static FaceIOErr EOSReadTopology(FaceIOAdapter *fac, const char *fileName) {
+ JSONReader rdr(&EOSTopologyOpenNode, &EOSTopologyCloseNode);
+ EOSTopologyReaderState st;
+ FaceIOErr err;
+
+ fac->setAdjacentFacesSize(0);
+ fac->setAdjacentVerticesSize(0);
+ st.fac = fac;
+ err = rdr.parse(fileName, &st);
+ return err;
+}
+
+/********************************************************************************
+ * API ReadEOSFaceModel API *
+ ********************************************************************************/
+
+FaceIOErr ReadEOSFaceModel(const char *shape, unsigned int /*ibugNumLandmarks*/, const char *blendShapes, const char *contours,
+ const char *topology, FaceIOAdapter *fac) {
+ FaceIOErr err = kIOErrNone;
+ FILE *fd;
+
+ if (shape) /* Shape */
+ {
+ if (!HasSuffix(shape, ".bin")) {
+ PrintUnknownFormatMessage(shape, "for shape");
+ BAIL(err, kIOErrFormat);
+ }
+#ifndef _MSC_VER
+ fd = fopen(shape, "rb");
+#else /* _MSC_VER */
+ if (0 != fopen_s(&fd, shape, "rb"))
+ fd = nullptr;
+#endif /* _MSC_VER */
+ if (!fd) {
+ PrintIOError(shape, err = kIOErrFileOpen);
+ goto bail;
+ }
+ err = EOSReadMorphableModel(fac, fd);
+ fclose(fd);
+ BAIL_IF_ERR(err);
+ }
+
+ if (0) /* IBUG */
+ {
+ /* Insert ibug mappings reading code here */
+ } else {
+
+ unsigned n = sizeof(ibugMapping.landmarkMap) / sizeof(ibugMapping.landmarkMap[0][0]);
+ memcpy(fac->getIbugLandmarkMappings(n), &ibugMapping.landmarkMap[0][0],
+ n * sizeof(*fac->getIbugLandmarkMappings()));
+ n = sizeof(ibugMapping.rightContour) / sizeof(ibugMapping.rightContour[0]);
+ memcpy(fac->getIbugRightContour(n), ibugMapping.rightContour, n * sizeof(*fac->getIbugRightContour()));
+ n = sizeof(ibugMapping.leftContour) / sizeof(ibugMapping.leftContour[0]);
+ memcpy(fac->getIbugLeftContour(n), ibugMapping.leftContour, n * sizeof(*fac->getIbugLeftContour()));
+ }
+
+ if (blendShapes) /* Blend Shapes */
+ {
+ if (!HasSuffix(blendShapes, ".bin")) {
+ PrintUnknownFormatMessage(blendShapes, "for blend shapes");
+ BAIL(err, kIOErrFormat);
+ }
+#ifndef _MSC_VER
+ fd = fopen(blendShapes, "rb");
+#else /* _MSC_VER */
+ if (0 != fopen_s(&fd, blendShapes, "rb"))
+ fd = nullptr;
+#endif /* _MSC_VER */
+ if (!fd) {
+ PrintIOError(blendShapes, err = kIOErrFileOpen);
+ goto bail;
+ }
+ err = EOSReadBlendShapes(fac, fd);
+ fclose(fd);
+ BAIL_IF_ERR(err);
+ }
+
+ if (contours) /* Contours */
+ {
+ if (!HasSuffix(contours, ".json")) {
+ PrintUnknownFormatMessage(contours, "for contours");
+ BAIL(err, kIOErrFormat);
+ }
+ err = EOSReadContours(fac, contours);
+ if (kIOErrNone != err) {
+ PrintIOError(contours, err);
+ goto bail;
+ }
+ }
+
+ if (topology) /* Topology */
+ {
+ if (!HasSuffix(topology, ".json")) {
+ PrintUnknownFormatMessage(topology, "for topology");
+ BAIL(err, kIOErrFormat);
+ }
+ err = EOSReadTopology(fac, topology);
+ if (kIOErrNone != err) {
+ PrintIOError(topology, err);
+ goto bail;
+ }
+ }
+bail:
+ return err;
+}
+
+/********************************************************************************
+ ********************************************************************************
+ ********************************************************************************
+ ***** JSON Output *****
+ ********************************************************************************
+ ********************************************************************************
+ ********************************************************************************/
+
+/* The maximum number of elements to print in one row when printing an array */
+#define MAX_ROW_SIZE 12
+
+static void JSONPrintModes(uint32_t size, uint32_t numModes, const float *data, JSONWriter &wtr, const char *tag) {
+ uint32_t modeSize = size / numModes;
+ uint32_t maxCols = (modeSize < MAX_ROW_SIZE) ? modeSize : MAX_ROW_SIZE;
+ wtr.openObject(tag);
+ if (modeSize > 1) wtr.writeNumber(modeSize, "mode_size");
+ if (numModes > 1) wtr.writeNumber(numModes, "num_modes");
+ wtr.writeNumericArray(size, data, maxCols, "data");
+ wtr.closeObject();
+}
+
+static void JSONPrintShapeModel(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ wtr.openObject(tag);
+ JSONPrintModes(fac->getShapeMeanSize(), 1, fac->getShapeMean(), wtr, "mean"); // This is really a vector
+ JSONPrintModes(fac->getShapeModesSize(), fac->getShapeNumModes(), fac->getShapeModes(), wtr, "pca_basis");
+ JSONPrintModes(fac->getShapeEigenvaluesSize(), fac->getShapeEigenvaluesSize(), fac->getShapeEigenvalues(), wtr,
+ "eigenvalues");
+ wtr.writeNumericArray(fac->getTriangleListSize(), fac->getTriangleList(), 3, "triangles");
+ wtr.closeObject();
+}
+
+static void JSONPrintColorModel(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ wtr.openObject(tag);
+ JSONPrintModes(fac->getColorMeanSize(), 1, fac->getColorMean(), wtr, "mean"); // This is really a vector
+ JSONPrintModes(fac->getColorModesSize(), fac->getColorNumModes(), fac->getColorModes(), wtr, "pca_basis");
+ JSONPrintModes(fac->getColorEigenvaluesSize(), fac->getColorEigenvaluesSize(), fac->getColorEigenvalues(), wtr,
+ "eigenvalues");
+ wtr.writeNumericArray(fac->getTriangleListSize(), fac->getTriangleList(), 3, "triangles");
+ wtr.closeObject();
+}
+
+static void JSONPrintMorphableModel(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ wtr.openObject(tag);
+ JSONPrintShapeModel(fac, wtr, "shape_model");
+ if (fac->getColorMeanSize() + fac->getColorModesSize() + fac->getColorEigenvaluesSize() != 0)
+ JSONPrintColorModel(fac, wtr, "color_model"); // Don't print color unless there is something there
+ if (fac->getTextureCoordinatesSize())
+ wtr.writeNumericArray(fac->getTextureCoordinatesSize(), fac->getTextureCoordinates(), 2, "texture_coordinates");
+ wtr.closeObject();
+}
+
+static void JSONPrintBlendShapes(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ uint32_t i, n;
+ wtr.openArray(tag);
+ for (i = 0, n = fac->getNumBlendShapes(); i < n; ++i) {
+ wtr.openObject(nullptr);
+ wtr.writeString(fac->getBlendShapeName(i), "name");
+ JSONPrintModes(fac->getBlendShapeSize(i), 1, fac->getBlendShape(i), wtr, "blend_shape"); // This is really a vector
+ wtr.closeObject();
+ }
+ wtr.closeArray();
+}
+
+static void JSONPrintIbugMappings(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ uint32_t n;
+ wtr.openObject(tag);
+ wtr.writeNumericArray(fac->getIbugLandmarkMappingsSize(), fac->getIbugLandmarkMappings(), 2, "landmark_mappings");
+ n = (fac->getIbugRightContourSize());
+ wtr.writeNumericArray(n, fac->getIbugRightContour(), n, "right_contour"); // straight across
+ n = fac->getIbugLeftContourSize();
+ wtr.writeNumericArray(n, fac->getIbugLeftContour(), n, "left_contour"); // straight across
+ wtr.closeObject();
+}
+
+static FaceIOErr JSONPrintContours(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ wtr.openObject(tag);
+ wtr.writeNumericArray(fac->getModelRightContourSize(), fac->getModelRightContour(),
+ fac->getModelRightContourSize(), "right_contour");
+ wtr.writeNumericArray(fac->getModelLeftContourSize(), fac->getModelLeftContour(),
+ fac->getModelLeftContourSize(), "left_contour");
+ wtr.closeObject();
+ return kIOErrNone;
+}
+
+static FaceIOErr JSONPrintTopology(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ if (!tag) tag = "edge_topology";
+ wtr.openObject(tag);
+ wtr.writeNumericArray(fac->getAdjacentFacesSize(), fac->getAdjacentFaces(), 2, "adjacent_faces");
+ wtr.writeNumericArray(fac->getAdjacentVerticesSize(), fac->getAdjacentVertices(), 2, "adjacent_vertices");
+ wtr.closeObject();
+ return kIOErrNone;
+}
+
+static void JSONPrintNvlm(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ uint32_t n;
+ wtr.openObject(tag);
+ wtr.writeNumericArray(fac->getNvlmLandmarksSize(), fac->getNvlmLandmarks(), 2, "landmark_mapping");
+ n = (fac->getNvlmRightContourSize());
+ wtr.writeNumericArray(n, fac->getNvlmRightContour(), n, "right_contour"); // straight across
+ n = fac->getNvlmLeftContourSize();
+ wtr.writeNumericArray(n, fac->getNvlmLeftContour(), n, "left_contour"); // straight across
+ wtr.closeObject();
+}
+
+static void JSONPrintPartitions(const FaceIOAdapter *fac, JSONWriter &wtr, const char *tag) {
+ uint32_t i, n;
+ wtr.openArray(tag);
+ for (i = 0, n = fac->getNumPartitions(); i < n; ++i) {
+ uint32_t partitionIndex, faceIndex, numFaces, vertexIndex, numVertices;
+ int32_t smoothingGroup;
+ const char *str;
+ wtr.openObject(nullptr);
+ partitionIndex = fac->getPartition(i, &faceIndex, &numFaces, &vertexIndex, &numVertices, &smoothingGroup);
+ wtr.writeNumber(partitionIndex, "partition_index");
+ wtr.writeNumber(faceIndex, "face_index");
+ wtr.writeNumber(numFaces, "num_faces");
+ wtr.writeNumber(vertexIndex, "vertex_index");
+ wtr.writeNumber(numVertices, "num_vertices");
+ wtr.writeNumber(smoothingGroup, "smoothing_group");
+ if (nullptr != (str = fac->getPartitionName(i)) && str[0])
+ wtr.writeString(str, "name");
+ if (nullptr != (str = fac->getPartitionMaterialName(i)) && str[0])
+ wtr.writeString(str, "material");
+ wtr.closeObject();
+ }
+ wtr.closeArray();
+}
+
+
+/********************************************************************************
+ * API PrintJSONFaceModel API *
+ ********************************************************************************/
+
+FaceIOErr PrintJSONFaceModel(FaceIOAdapter *fac, const char *file) {
+ FaceIOErr err;
+ JSONWriter wtr;
+
+ err = wtr.open(file);
+ if (kIOErrNone != err) {
+ PrintIOError(file, err);
+ return err;
+ }
+
+ wtr.openObject();
+ JSONPrintMorphableModel(fac, wtr, "morphable_model");
+ JSONPrintIbugMappings(fac, wtr, "ibug_mappings");
+ JSONPrintBlendShapes(fac, wtr, "blend_shapes");
+ JSONPrintContours(fac, wtr, "contours");
+ JSONPrintTopology(fac, wtr, "edge_topology");
+ if (fac->getNvlmLandmarksSize())
+ JSONPrintNvlm(fac, wtr, "nvidia_mappings");
+ if (fac->getNumPartitions())
+ JSONPrintPartitions(fac, wtr, "partitions");
+ wtr.closeObject();
+
+ return err;
+}
diff --git a/samples/ExpressionApp/BackEndOpenGL/FaceIO.h b/samples/ExpressionApp/BackEndOpenGL/FaceIO.h
new file mode 100644
index 0000000..2c42dd0
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/FaceIO.h
@@ -0,0 +1,231 @@
+/*###############################################################################
+#
+# Copyright 2019-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __FACE_IO__
+#define __FACE_IO__
+
+#include
+
+enum FaceIOErr {
+ kIOErrNone,
+ kIOErrFileNotFound,
+ kIOErrFileOpen,
+ kIOErrEOF,
+ kIOErrRead,
+ kIOErrWrite,
+ kIOErrSyntax,
+ kIOErrFormat,
+ kIOErrNotValue,
+ kIOErrNullPointer,
+ kIOErrParameter,
+};
+
+const char* FaceIOErrorStringFromCode(FaceIOErr err);
+
+/********************************************************************************
+ ********************************************************************************
+ ********************************************************************************
+ ***** IO Adapter *****
+ ********************************************************************************
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * FaceIOAdapter.
+ * Subclass from this and supply the accessors.
+ ********************************************************************************/
+
+class FaceIOAdapter {
+public:
+ virtual uint32_t getShapeMeanSize() const { return 0; } /* The size of the mean shape mean, in elements. */
+ virtual uint32_t getShapeModesSize() const { return 0; } /* The total size of all shape modes (numModes*modeSize) */
+ virtual uint32_t getShapeNumModes() const { return 0; } /* The number of shape modes. */
+ virtual uint32_t getShapeEigenvaluesSize() const { return 0; } /* The number of shape eigenvalues
+ (should equal the number of modes) */
+ virtual float* getShapeMean(uint32_t /*size*/) { return nullptr; } /* Get a pointer to the shape mean.
+ If a nonzero size if supplied, it is resized first. */
+ virtual float* getShapeModes(uint32_t /*modeSize*/, uint32_t /*numModes*/) { return nullptr; } /* Get a pointer to the
+ shape modes, resizing first, if the parameters are nonzero. */
+ virtual float* getShapeEigenvalues(uint32_t /*numModes*/) { return nullptr; } /* Get a pointer to the shape eigenvalues,
+ resizing first if numModes is nonzero. */
+
+ virtual uint32_t getColorMeanSize() const { return 0; } /* The color mean ... */
+ virtual uint32_t getColorModesSize() const { return 0; } /* ... and modes */
+ virtual uint32_t getColorNumModes() const { return 0; }
+ virtual uint32_t getColorEigenvaluesSize() const { return 0; }
+ virtual float* getColorMean(uint32_t /*size*/) { return nullptr; }
+ virtual float* getColorModes(uint32_t /*modeSize*/, uint32_t /*numModes*/) { return nullptr; }
+ virtual float* getColorEigenvalues(uint32_t /*numModes*/) { return nullptr; }
+
+ virtual void setTriangleListSize(uint32_t /*size*/) {} /* The triangle list */
+ virtual uint32_t getTriangleListSize() const = 0;
+ virtual uint16_t* getTriangleList(uint32_t /*size*/) { return nullptr; }
+
+ virtual void setTextureCoordinatesSize(uint32_t /*size*/) {} /* The texture coordinates */
+ virtual uint32_t getTextureCoordinatesSize() const { return 0; }
+ virtual float* getTextureCoordinates(uint32_t /*size*/) { return nullptr; }
+
+ virtual void setNumBlendShapes(uint32_t /*numShapes*/) {} /* The blend shapes */
+ virtual void setBlendShapeName(uint32_t /*i*/, const char* /*name*/) {}
+ virtual uint32_t getNumBlendShapes() const { return 0; }
+ virtual const char* getBlendShapeName(uint32_t /*i*/) const { return nullptr; }
+ virtual uint32_t getBlendShapeSize(uint32_t /*i*/) const { return 0; }
+ virtual float* getBlendShape(uint32_t /*i*/, uint32_t /*size*/) { return nullptr; }
+
+ virtual void setIbugLandmarkMappingsSize(uint32_t /*n*/) {} /* The mappings from IBUG landmarks to vertex index */
+ virtual uint32_t getIbugLandmarkMappingsSize() const { return 0; }
+ virtual uint16_t* getIbugLandmarkMappings(uint32_t /*size*/) { return nullptr; }
+ virtual void appendIbugLandmarkMapping(uint16_t /*i*/) {}
+ virtual void appendIbugLandmarkMapping(uint16_t /*i*/, uint16_t /*j*/) {}
+
+ virtual void setIbugRightContourSize(uint32_t /*n*/) {} /* The IBUG contour on the right side of the face */
+ virtual uint32_t getIbugRightContourSize() const { return 0; }
+ virtual uint16_t* getIbugRightContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendIbugRightContour(uint16_t /*i*/) {}
+
+ virtual void setIbugLeftContourSize(uint32_t /*n*/) {} /* The IBUG contour on the left side of the face */
+ virtual uint32_t getIbugLeftContourSize() const { return 0; }
+ virtual uint16_t* getIbugLeftContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendIbugLeftContour(uint16_t /*i*/) {}
+
+ virtual void setModelRightContourSize(uint32_t /*n*/) {} /* The right contour of our model */
+ virtual uint32_t getModelRightContourSize() const { return 0; }
+ virtual uint16_t* getModelRightContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendModelRightContour(uint16_t /*i*/) {}
+
+ virtual void setModelLeftContourSize(uint32_t /*n*/) {} /* The left contour of our model */
+ virtual uint32_t getModelLeftContourSize() const { return 0; }
+ virtual uint16_t* getModelLeftContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendModelLeftContour(uint16_t /*i*/) {}
+
+ virtual void setAdjacentFacesSize(uint32_t /*n*/) {} /* The topology of adjacent faces to each edge */
+ virtual uint32_t getAdjacentFacesSize() const { return 0; }
+ virtual uint16_t* getAdjacentFaces(uint32_t /*size*/) { return nullptr; }
+ virtual void appendAdjacentFace(uint16_t /*i*/) {}
+ virtual void appendAdjacentFaces(uint16_t /*i*/, uint16_t /*j*/) {}
+
+ virtual void setAdjacentVerticesSize(uint32_t /*n*/) {} /* The topology of adjacent vertices to each edge */
+ virtual uint32_t getAdjacentVerticesSize() const { return 0; }
+ virtual uint16_t* getAdjacentVertices(uint32_t /*size*/) { return nullptr; }
+ virtual void appendAdjacentVertex(uint16_t /*i*/) {}
+ virtual void appendAdjacentVertices(uint16_t /*i*/, uint16_t /*j*/) {}
+
+ virtual void setNvlmLandmarksSize(uint32_t /*n*/) {} /* The tracked landmarks */
+ virtual uint32_t getNvlmLandmarksSize() const { return 0; }
+ virtual uint16_t* getNvlmLandmarks(uint32_t /*size*/) { return nullptr; }
+ virtual void appendNvlmLandmark(uint16_t /*i*/) {}
+
+ virtual void setNvlmRightContourSize(uint32_t /*n*/) {} /* The tracked right jawline contour */
+ virtual uint32_t getNvlmRightContourSize() const { return 0; }
+ virtual uint16_t* getNvlmRightContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendNvlmRightContour(uint16_t /*i*/) {}
+
+ virtual void setNvlmLeftContourSize(uint32_t /*n*/) {}; /* The tracked left jawline contour */
+ virtual uint32_t getNvlmLeftContourSize() const { return 0; }
+ virtual uint16_t* getNvlmLeftContour(uint32_t /*size*/) { return nullptr; }
+ virtual void appendNvlmLeftContour(uint16_t /*i*/) {}
+
+ virtual void setNumPartitions(uint32_t /*n*/) {}
+ virtual void setPartitionName(uint32_t /*i*/, const char* /*name*/) {}
+ virtual void setPartitionMaterialName(uint32_t /*i*/, const char* /*name*/) {}
+ virtual void setPartition(uint32_t /*i*/, uint32_t /*faceIndex*/, uint32_t /*numFaces*/,
+ uint32_t /*vertexIndex*/, uint32_t /*numVertices*/, int32_t /*smoothingGroup*/) {}
+ virtual uint32_t getNumPartitions() const { return 0; }
+ virtual const char* getPartitionName(uint32_t /*i*/) const { return nullptr; }
+ virtual const char* getPartitionMaterialName(uint32_t /*i*/) const { return nullptr; }
+ virtual int16_t getPartition(uint32_t /*i*/, uint32_t* faceIndex, uint32_t* numFaces, uint32_t* vertexIndex,
+ uint32_t* numVertices, int32_t* smoothingGroup) const
+ { if (faceIndex) *faceIndex = 0u; if (numFaces) *numFaces = 0u; if (vertexIndex) *vertexIndex = 0u;
+ if (numVertices) *numVertices = 0u; if (smoothingGroup) *smoothingGroup = -1; return /*partitionIndex*/-1;
+ }
+
+
+ /* Const accessors do not have the ability to resize. */
+ const float* getShapeMean() const { return const_cast(this)->getShapeMean(0); }
+ const float* getShapeModes() const { return const_cast(this)->getShapeModes(0, 0); }
+ const float* getShapeEigenvalues() const { return const_cast(this)->getShapeEigenvalues(0); }
+ const float* getColorMean() const { return const_cast(this)->getColorMean(0); }
+ const float* getColorModes() const { return const_cast(this)->getColorModes(0, 0); }
+ const float* getColorEigenvalues() const { return const_cast(this)->getColorEigenvalues(0); }
+ const float* getTextureCoordinates() const { return const_cast(this)->getTextureCoordinates(0); }
+ const uint16_t* getTriangleList() const { return const_cast(this)->getTriangleList(0); }
+ const float* getBlendShape(uint32_t i) const { return const_cast(this)->getBlendShape(i, 0); }
+ const uint16_t* getIbugLandmarkMappings() const { return const_cast(this)->getIbugLandmarkMappings(0); }
+ const uint16_t* getIbugRightContour() const { return const_cast(this)->getIbugRightContour(0); }
+ const uint16_t* getIbugLeftContour() const { return const_cast(this)->getIbugLeftContour(0); }
+ const uint16_t* getModelRightContour() const { return const_cast(this)->getModelRightContour(0); }
+ const uint16_t* getModelLeftContour() const { return const_cast(this)->getModelLeftContour(0); }
+ const uint16_t* getAdjacentFaces() const { return const_cast(this)->getAdjacentFaces(0); }
+ const uint16_t* getAdjacentVertices() const { return const_cast(this)->getAdjacentVertices(0); }
+ const uint16_t* getNvlmLandmarks() const { return const_cast(this)->getNvlmLandmarks(0); }
+ const uint16_t* getNvlmRightContour() const { return const_cast(this)->getNvlmRightContour(0); }
+ const uint16_t* getNvlmLeftContour() const { return const_cast(this)->getNvlmLeftContour(0); }
+};
+
+/** Write the face model as an NVF model.
+ * @param[in] fac the face I/O adapter for the target data structure.
+ * @param[in] fileName the desired name of the output file.
+ * @return kIOErrNone if the file was written completed successfully.
+ * @return kIOErrFileOpen if the file could not be opened.
+ * @return kIOErrWrite if an error occurred while writing the file.
+ */
+FaceIOErr WriteNVFFaceModel(FaceIOAdapter* fac, const char* fileName);
+
+/** Read a face model from an NVF file.
+ * @param[in] fileName the name of the file to be read.
+ * @param[in,out] fac the face I/O adapter for the target data structure.
+ * @return kIOErrNone if the file was read successfully.
+ * @return kIOErrFileNotFound if the file was not found .
+ * @return kIOErrFileOpen if the file could not be opened.
+ * @return kIOErrRead if an error occurred while reading the file.
+ * @return kIOErrSyntax if a syntax error has been encountered while reading the file.
+ */
+FaceIOErr ReadNVFFaceModel(const char* fileName, FaceIOAdapter* fac);
+
+/** Read a face model from five OES files.
+ * @param[in] shape the name of the shape file to be read.
+ * @param[in] ibugNumandmarks the number of Ibug landmarks.
+ * @param[in] blendShapes the name of the blend shapes file to be read.
+ * @param[in] contours the name of the contours file to be read.
+ * @param[in] topology the name of the topology file to be read.
+ * @param[in,out] fac the face I/O adapter for the target data structure.
+ * @return kIOErrNone if the file was read successfully.
+ * @return kIOErrFileNotFound if the file was not found .
+ * @return kIOErrFileOpen if the file could not be opened.
+ * @return kIOErrRead if an error occurred while reading the file.
+ * @return kIOErrSyntax if a syntax error has been encountered while reading the file.
+ */
+FaceIOErr ReadEOSFaceModel(const char* shape, const unsigned ibugNumLandmarks, const char* blendShapes,
+ const char* contours, const char* topology, FaceIOAdapter* fac);
+
+/** Write the face model as a JSON model.
+ * @param[in] fac the face I/O adapter for the target data structure.
+ * @param[in] fileName the desired name of the output file.
+ * If NULL is supplied, it is written to the standard output.
+ * @return kIOErrNone if the file was written completed successfully.
+ * @return kIOErrFileOpen if the file could not be opened.
+ * @return kIOErrWrite if an error occurred while writing the file.
+ */
+FaceIOErr PrintJSONFaceModel(FaceIOAdapter* fac, const char* fileName);
+
+#endif /* __FACE_IO__ */
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLMaterial.cpp b/samples/ExpressionApp/BackEndOpenGL/GLMaterial.cpp
new file mode 100644
index 0000000..7ca2578
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLMaterial.cpp
@@ -0,0 +1,314 @@
+/*###############################################################################
+#
+# Copyright 2016-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include "GLMaterial.h"
+#include
+#include
+#include
+#include
+#include
+
+
+#ifdef _MSC_VER
+ #define strcasecmp _stricmp
+#endif // _MSC_VER
+
+
+ ////////////////////////////////////////////////////////////////////////////////
+ ///// UTILITY FUNCTIONS /////
+ ////////////////////////////////////////////////////////////////////////////////
+
+
+static void SplitString(const std::string &s, std::vector&tokens) {
+ tokens.clear();
+ std::string token;
+ std::istringstream tokenStream(s);
+ while (std::getline(tokenStream, token, ' ')) {
+ if (0 == token.size())
+ continue;
+ tokens.push_back(token);
+ }
+}
+
+
+static void SetColorFromStringArray(GLSpectrum3f& color, std::string* strs) {
+ color.r = strtof(strs[0].c_str(), nullptr);
+ color.g = strtof(strs[1].c_str(), nullptr);
+ color.b = strtof(strs[2].c_str(), nullptr);
+}
+
+
+static bool StrToBool(const char* str) {
+ return strcasecmp(str, "true") == 0 ||
+ strcasecmp(str, "on") == 0 ||
+ strcasecmp(str, "yes") == 0 ||
+ strcasecmp(str, "1") == 0;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+///// GLMaterial /////
+////////////////////////////////////////////////////////////////////////////////
+
+
+GLMaterial::~GLMaterial() {
+ //if (diffuseTexture)
+ // delete diffuseTexture;
+}
+
+
+GLMaterial::GLMaterial() {
+ clear();
+}
+
+
+GLMaterial::GLMaterial(const GLMaterial& mtl) {
+ *this = mtl;
+}
+
+
+void GLMaterial::setTextureFile(const char* file) {
+ if (file) diffuseTextureFile = file;
+ else diffuseTextureFile.clear();
+}
+
+
+void GLMaterial::clear() {
+ diffuseColor.set(1.f, 1.f, 1.f);
+ ambientColor.set(0.f, 0.f, 0.f);
+ specularColor.set(0.f, 0.f, 0.f);
+ transmissionColor.set(0.f, 0.f, 0.f);
+ specularExponent = 0.f;
+ opacity = 1.f;
+ diffuseTexture = nullptr;
+ diffuseTextureFile.clear();
+ illuminationModel = kUnspecifiedIlluminationModel;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+///// GLMaterialLibrary /////
+////////////////////////////////////////////////////////////////////////////////
+
+
+struct GLMaterialName {
+ GLMaterial mtl;
+ std::string name;
+ GLMaterialName() {}
+ GLMaterialName(const GLMaterial& matParam, const char* nameParam) {
+ mtl = matParam;
+ name = nameParam;
+ }
+};
+
+
+struct GLMaterialLibrary::Impl {
+ std::vector lib;
+};
+
+
+GLMaterialLibrary::GLMaterialLibrary() {
+ pimpl = new Impl;
+}
+
+
+GLMaterialLibrary::~GLMaterialLibrary() {
+ delete pimpl;
+}
+
+
+void GLMaterialLibrary::clear() {
+ pimpl->lib.clear();
+}
+
+
+unsigned GLMaterialLibrary::numMaterials() const {
+ return unsigned(pimpl->lib.size());
+}
+
+
+NvCV_Status GLMaterialLibrary::addMaterial(const GLMaterial& mtrl, const char* name) {
+ if (getMaterial(name))
+ return NVCV_ERR_SELECTOR;
+ pimpl->lib.emplace_back(mtrl, name);
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status GLMaterialLibrary::addDiffuseMaterial(const GLSpectrum3f& color, const char* name) {
+ GLMaterial mtrl;
+ if (getMaterial(name))
+ return NVCV_ERR_SELECTOR;
+ mtrl.diffuseColor = color;
+ pimpl->lib.emplace_back(mtrl, name);
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status GLMaterialLibrary::removeMaterial(const char* name) {
+ unsigned i, n;
+ for (i = 0, n = unsigned(pimpl->lib.size()); i < n; ++i) {
+ if (name == pimpl->lib[i].name) {
+ pimpl->lib.erase(pimpl->lib.begin() + i);
+ return NVCV_SUCCESS;
+ }
+ }
+ return NVCV_ERR_FEATURENOTFOUND;
+}
+
+
+GLMaterial* GLMaterialLibrary::newMaterial(const char* name) {
+ if (getMaterial(name))
+ return nullptr;
+ size_t z = pimpl->lib.size();
+ pimpl->lib.resize(z + 1);
+ GLMaterialName* mtn = &pimpl->lib[z];
+ mtn->name = name;
+ return &mtn->mtl;
+}
+
+
+const GLMaterial* GLMaterialLibrary::getMaterial(const char* name) const {
+ GLMaterialName *mp, *mEnd;
+
+ for (mEnd = (mp = pimpl->lib.data()) + pimpl->lib.size(); mp < mEnd; ++mp)
+ if (name == mp->name)
+ return &mp->mtl;
+ return nullptr;
+}
+
+
+const GLMaterial* GLMaterialLibrary::getMaterial(unsigned i, const char** name) const {
+ if (i < pimpl->lib.size()) {
+ const GLMaterialName& matn = pimpl->lib[i];
+ if (name)
+ *name = matn.name.c_str();
+ return &matn.mtl;
+ }
+ if (name)
+ name = nullptr;
+ return nullptr;
+}
+
+
+NvCV_Status GLMaterialLibrary::read(const char* name) {
+ unsigned lineNum;
+ std::vector tokens;
+ GLMaterial *mtl = nullptr;
+
+ clear();
+ std::ifstream fd(name);
+ if (!fd.is_open())
+ return NVCV_ERR_READ;
+ std::string line;
+
+ for (lineNum = 1; std::getline(fd, line); ++lineNum) {
+ SplitString(line, tokens);
+ if (!tokens.size())
+ continue;
+ if (tokens[0][0] == '#') {
+ continue;
+ }
+ if (tokens[0] == "newmtl" && 2 == tokens.size()) {
+ mtl = newMaterial(tokens[1].c_str());
+ continue;
+ }
+ if (tokens[0] == "Ka" && 4 == tokens.size()) {
+ SetColorFromStringArray(mtl->ambientColor, &tokens[1]);
+ continue;
+ }
+ if (tokens[0] == "Kd" && 4 == tokens.size()) {
+ SetColorFromStringArray(mtl->diffuseColor, &tokens[1]);
+ continue;
+ }
+ if (tokens[0] == "Ks" && 4 == tokens.size()) {
+ SetColorFromStringArray(mtl->specularColor, &tokens[1]);
+ continue;
+ }
+ if (tokens[0] == "Tf" && 4 == tokens.size()) {
+ SetColorFromStringArray(mtl->transmissionColor, &tokens[1]);
+ continue;
+ }
+ if (tokens[0] == "illum" && 2 == tokens.size()) {
+ mtl->illuminationModel = (unsigned char)strtol(tokens[1].c_str(), nullptr, 10);
+ continue;
+ }
+ if (tokens[0] == "d" && 2 == tokens.size()) {
+ /* We don't support "_d" */
+ mtl->opacity = strtof(tokens[1].c_str(), nullptr);
+ continue;
+ }
+ if (tokens[0] == "Ns" && 2 == tokens.size()) { /* We don't support "-d" */
+ mtl->specularExponent = strtof(tokens[1].c_str(), nullptr);
+ continue;
+ }
+ if (tokens[0] == "sharpness") { /* We don't support sharpness */
+ continue;
+ }
+ if (tokens[0] == "Ni") { /* We don't support index of refraction */
+ continue;
+ }
+ if (tokens[0] == "map_Kd") {
+ for (unsigned i = 1; i < tokens.size(); ++i) {
+ if (tokens[i] == "-blendu") {
+ ++i;
+ }
+ else if (tokens[i] == "-blendv") {
+ ++i;
+ }
+ else if (tokens[i] == "-cc") {
+ ++i;
+ }
+ else if (tokens[i] == "-clamp") {
+ ++i;
+ }
+ else if (tokens[i] == "-mm") {
+ ++i;
+ }
+ else if (tokens[i] == "-o") {
+ i += 3;
+ }
+ else if (tokens[i] == "-s") {
+ i += 3;
+ }
+ else if (tokens[i] == "-t") {
+ i += 3;
+ }
+ else if (tokens[i] == "-texres") {
+ ++i;
+ }
+ else if (tokens[i][0] == '-') {
+ printf("Unknown option: \"%s\"\n", tokens[i].c_str());
+ }
+ else {
+ mtl->diffuseTextureFile = tokens[i];
+ }
+ }
+ if (mtl->diffuseTextureFile.empty())
+ printf("No diffuse texture given on line %u\n", lineNum);
+ continue;
+ }
+ }
+
+ return NVCV_SUCCESS;
+}
\ No newline at end of file
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLMaterial.h b/samples/ExpressionApp/BackEndOpenGL/GLMaterial.h
new file mode 100644
index 0000000..76653ca
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLMaterial.h
@@ -0,0 +1,143 @@
+/*###############################################################################
+#
+# Copyright 2016-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __GLMATERIAL_H
+#define __GLMATERIAL_H
+
+#include
+
+#include "GLSpectrum.h"
+#include "nvCVStatus.h"
+
+////////////////////////////////////////////////////////////////////////////////
+/// Specification for light transport on the surfaces of objects.
+/// @todo Store the provenance of the material?
+/// @todo Should we store the name, too?
+////////////////////////////////////////////////////////////////////////////////
+
+class GLMaterial {
+public:
+ /// Default constructor.
+ GLMaterial();
+
+ /// Copy constructor.
+ /// @param[in] mtl the material to copy.
+ /// @note a copy is made of the diffuseTextureFile, if not NULL.
+ GLMaterial(const GLMaterial& mtl);
+
+ /// Destructor.
+ /// @note the diffuseTextureFile string is disposed.
+ /// @note the opaque diffuseTexture is *not* disposed.
+ ~GLMaterial();
+
+ /// Assignment.
+ /// This copies the diffuseTextureFile, if not NULL>
+ /// @param[in] mtl the material to copy (RHS).
+ /// @return a reference to the LHS of the assignment.
+ //GLMaterial& operator=(const GLMaterial& mtl); // default implementation
+
+ /// Reset as it was in the constructor: 0 materials.
+ void clear();
+
+ /// Use this to set the diffuseTextureFile, by making a copy of the specified string.
+ /// @param[in] fileName the file name of the texture file. A copy of the string is made.
+ void setTextureFile(const char* fileName);
+
+ GLSpectrum3f ambientColor; ///< The ambient color, in [0,1].
+ GLSpectrum3f diffuseColor; ///< The diffuse color, in [0,1].
+ GLSpectrum3f specularColor; ///< The specular color, in [0,1].
+ GLSpectrum3f transmissionColor; ///< The transmission color, in [0,1].
+ float specularExponent; ///< The specular exponent, in [1, 10000]
+ float opacity; ///< The opacity, in [0,1].
+ std::string diffuseTextureFile; ///< The name of the diffuse texture file. Set through setTextureFile().
+ void *diffuseTexture; ///< User-defined texture representation -- unmanaged.
+ unsigned char illuminationModel; ///< The illumination model, in [0,10], or kUnspecifiedIlluminationModel.
+ static const int kUnspecifiedIlluminationModel = 255; ///< The value to be used for an unspecified illumination model.
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+/// Library of material specifications for surface light transport.
+////////////////////////////////////////////////////////////////////////////////
+
+class GLMaterialLibrary {
+public:
+ /// Constructor.
+ GLMaterialLibrary();
+
+ /// Destructor.
+ ~GLMaterialLibrary();
+
+ /// Reset as it was in the constructor: 0 materials.
+ void clear();
+
+ /// Read from a file
+ NvCV_Status read(const char* name);
+
+ /// Add a new material to the library. A copy is made both of material and name.
+ /// @param[in] mtrl the material to be added to the library.
+ /// @param[in] name the name of the material, for future access.
+ /// @return keErrNone if the operation was completed successfully.
+ /// @return keErrDuplicate if a material of the same name is already found in the library.
+ NvCV_Status addMaterial(const GLMaterial& mtrl, const char* name);
+
+ /// Add a new diffuse material to the library. A copy is made both of the name.
+ /// @param[in] color the diffuse color to be added to the library.
+ /// @param[in] name the name of the material, for future access.
+ /// @return keErrNone if the operation was completed successfully.
+ /// @return keErrDuplicate if a material of the same name is already found in the library.
+ NvCV_Status addDiffuseMaterial(const GLSpectrum3f& color, const char* name);
+
+ /// Remove a material.
+ /// @param[in] name the name of the material to remove.
+ /// @return keErrNone if the operation was completed successfully.
+ NvCV_Status removeMaterial(const char* name);
+
+ /// Create a new material with the given name.
+ /// @param[in] name the name of the material, for future access.
+ /// @return a pointer to the new material in the database, if the operation was completed successfully.
+ /// @return NULL, if a material of the same name is already found in the library.
+ GLMaterial* newMaterial(const char* name);
+
+ /// Get the number of materials in the material library.
+ /// @return the number of materials.
+ unsigned numMaterials() const;
+
+ /// Get the material with the specified name.
+ /// @param[in] name the name of the material to get.
+ /// @return the specified material, or NULL if the material was not found.
+ const GLMaterial* getMaterial(const char* name) const;
+
+ /// Get the material with the specified index.
+ /// @param[in] i the index of the material to get.
+ /// @param[out] name the name of the material with the specified index (can be NULL).
+ /// @return the specified material, or NULL if the material was not found.
+ const GLMaterial* getMaterial(unsigned i, const char** name = nullptr) const;
+
+private:
+ struct Impl;
+ Impl *pimpl;
+};
+
+
+#endif /* __GLMATERIAL_H */
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLMesh.cpp b/samples/ExpressionApp/BackEndOpenGL/GLMesh.cpp
new file mode 100644
index 0000000..f69446a
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLMesh.cpp
@@ -0,0 +1,624 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include "GLMesh.h"
+
+#include
+#include
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//// ////
+//// GLMesh ////
+//// ////
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+// Note: we assume that the transformation is affine, i.e. that M[3] = M[7] = M[11] = 0 and M[15] = 1.
+static void TransformPoints(const glm::mat4x4& M, unsigned numPts, const glm::vec3 *pts, glm::vec3 *xPts) {
+ for (; numPts--; ++pts, ++xPts) { // NB it is better to do the dot products in double precision
+ glm::vec3 q; // Use an intermediate variable to allow transformation in-place.
+ q.x = M[0][0] * pts->x + M[1][0] * pts->y + M[2][0] * pts->z + M[3][0];
+ q.y = M[0][1] * pts->x + M[1][1] * pts->y + M[2][1] * pts->z + M[3][1];
+ q.z = M[0][2] * pts->x + M[1][2] * pts->y + M[2][2] * pts->z + M[3][2];
+ *xPts = q;
+ }
+}
+
+
+// Note: We assume here that the transformation is isotropic;
+// otherwise we would need to transform by the inverse transpose of the upper left.
+static void TransformNormals(const glm::mat4x4& M, unsigned numPts, const glm::vec3 *pts, glm::vec3 *xPts) {
+ for (; numPts--; ++pts, ++xPts) { // NB it is better to do the dot products in double precision
+ glm::vec3 q; // Use an intermediate variable to allow transformation in-place.
+ q.x = M[0][0] * pts->x + M[1][0] * pts->y + M[2][0] * pts->z;
+ q.y = M[0][1] * pts->x + M[1][1] * pts->y + M[2][1] * pts->z;
+ q.z = M[0][2] * pts->x + M[1][2] * pts->y + M[2][2] * pts->z;
+ *xPts = glm::normalize(q);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// GLMesh API
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+GLMesh::GLMesh(const GLMesh& mesh) {
+ m_faceVertexCount = mesh.m_faceVertexCount;
+ m_vertices = mesh.m_vertices;
+ m_vertexIndices = mesh.m_vertexIndices;
+ m_texCoords = mesh.m_texCoords;
+ m_textureIndices = mesh.m_textureIndices;
+ m_normals = mesh.m_normals;
+ m_normalIndices = mesh.m_normalIndices;
+ m_faceNormals = mesh.m_faceNormals;
+
+}
+
+GLMesh::GLMesh() { }
+GLMesh::~GLMesh() { }
+void GLMesh::resizeVertices(unsigned n) { m_vertices.resize(n); }
+void GLMesh::resizeTexCoords(unsigned n) {
+ m_texCoords.resize(n);
+ m_textureIndices.resize(n ? unsigned(m_vertexIndices.size()) : 0);
+ }
+void GLMesh::resizeNormals(unsigned n) {
+ m_normals.resize(n);
+ m_normalIndices.resize(n ? unsigned(m_vertexIndices.size()) : 0);
+ }
+void GLMesh::resizeFaces(unsigned n) { m_faceVertexCount.resize(n); }
+void GLMesh::resizeTriangles(unsigned n) { m_faceVertexCount.clear(); m_faceVertexCount.resize(n, 3); }
+void GLMesh::resizeVertexIndices(unsigned n) {
+ m_vertexIndices.resize(n);
+ m_textureIndices.resize(m_texCoords.size() ? n : 0);
+ m_normalIndices.resize(m_normals.size() ? n : 0);
+ }
+void GLMesh::resizeDualIndices(unsigned n) {
+ m_dualIndices.resize(n);
+ m_vertexFaceCount.resize(m_vertices.size());
+ }
+void GLMesh::useFaceNormals(bool yes) { m_faceNormals.resize(yes ? numFaces() : 0); }
+
+unsigned GLMesh::numVertices() const { return unsigned(m_vertices.size()); }
+unsigned GLMesh::numTexCoords() const { return unsigned(m_texCoords.size()); }
+unsigned GLMesh::numNormals() const { return unsigned(m_normals.size()); }
+unsigned GLMesh::numFaces() const { return unsigned(m_faceVertexCount.size()); }
+unsigned GLMesh::numIndices() const { return unsigned(m_vertexIndices.size()); }
+
+
+void GLMesh::initPartitions() {
+ m_partitions.resize(1);
+ Partition& pt = m_partitions[0];
+ pt.faceIndex = 0;
+ pt.vertexIndex = 0;
+ pt.name.clear();
+ pt.materialName.clear();
+}
+
+
+NvCV_Status GLMesh::startPartition(const char *name, const char *material, int smooth) {
+ if (name)
+ for (const GLMesh::Partition& p : m_partitions)
+ if (p.name == name)
+ return NVCV_ERR_SELECTOR;
+
+ unsigned i = unsigned(m_partitions.size());
+ GLMesh::Partition *pt = &m_partitions[i - 1];
+
+ if (m_faceVertexCount.size() != pt->faceIndex) {
+ m_partitions.resize(i + 1);
+ pt = &m_partitions[i];
+ pt->faceIndex = unsigned(m_faceVertexCount.size());
+ pt->vertexIndex = unsigned(m_vertexIndices.size());
+ }
+ if (name) pt->name = name;
+ if (material) pt->materialName = material;
+ if (smooth >= 0) pt->smooth = smooth;
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status GLMesh::partitionMesh(unsigned numPartitions, const GLMesh::Partition *srcPartition) {
+ m_partitions.resize(numPartitions);
+ for (unsigned i = 0; i < numPartitions; ++i, ++srcPartition) {
+ if (srcPartition->faceIndex >= m_faceVertexCount.size()) {
+ initPartitions();
+ return NVCV_ERR_MISMATCH;
+ }
+
+ GLMesh::Partition& pt = m_partitions[i];
+ pt.faceIndex = srcPartition->faceIndex;
+ pt.vertexIndex = srcPartition->vertexIndex;
+ pt.numFaces = srcPartition->numFaces;
+ pt.numVertexIndices = srcPartition->numVertexIndices;
+ pt.name = srcPartition->name;
+ pt.materialName = srcPartition->materialName;
+ }
+ //computeStartingVertexIndices();
+
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status GLMesh::updatePartition(unsigned i, const GLMesh::Partition& update) {
+ if (i >= m_partitions.size())
+ return NVCV_ERR_FEATURENOTFOUND;
+
+ GLMesh::Partition& pt = m_partitions[i];
+ pt.faceIndex = update.faceIndex;
+ pt.vertexIndex = update.vertexIndex;
+ if (update.name.empty()) pt.name.clear();
+ else pt.name = update.name;
+ if (update.materialName.empty()) pt.materialName.clear();
+ else pt.materialName = update.materialName;
+ return NVCV_SUCCESS;
+}
+
+
+void GLMesh::computeStartingVertexIndices() {
+ unsigned vertIx;
+ std::sort(m_partitions.begin(), m_partitions.end());
+ const unsigned short *faceCount = m_faceVertexCount.data(), *lastFace;
+ GLMesh::Partition *pt = m_partitions.data(), *lastPt = pt + m_partitions.size() - 1;
+ for (vertIx = 0; pt != lastPt; ++pt) {
+ pt->vertexIndex = vertIx;
+ for (lastFace = faceCount + (pt[1].faceIndex - pt[0].faceIndex); faceCount != lastFace; ++faceCount)
+ vertIx += *faceCount;
+ }
+ pt->vertexIndex = vertIx;
+}
+
+
+NvCV_Status GLMesh::getPartition(unsigned i, GLMesh::Partition& pt) const {
+ if (i > m_partitions.size())
+ return NVCV_ERR_FEATURENOTFOUND;
+ pt = m_partitions[i];
+ return NVCV_SUCCESS;
+}
+
+
+bool GLMesh::indicesMatch(const std::vector& ivecA, const std::vector& ivecB) {
+ size_t n = ivecA.size();
+ const unsigned short *a = ivecA.data(),
+ *b = ivecB.data();
+
+ if (ivecB.size() != n)
+ return false;
+ for (; n--; ++a, ++b)
+ if (*a != *b)
+ return false;
+ return true;
+}
+
+
+void GLMesh::assureConsistency() {
+ if (m_texCoords.size() && (m_textureIndices.size() != m_vertexIndices.size()))
+ m_textureIndices.resize(m_vertexIndices.size());
+ if (m_normals.size() && (m_normalIndices.size() != m_vertexIndices.size()))
+ m_normalIndices.resize(m_vertexIndices.size());
+ if (m_faceNormals.size() && (m_faceNormals.size() != (m_vertexIndices.size() / 3)))
+ m_faceNormals.resize(m_vertexIndices.size() / 3);
+}
+
+
+void GLMesh::clear() {
+ m_faceVertexCount.resize(0);
+ m_vertices.resize(0);
+ m_texCoords.resize(0);
+ m_normals.resize(0);
+ m_faceNormals.resize(0);
+ m_vertexIndices.resize(0);
+ m_textureIndices.resize(0);
+ m_normalIndices.resize(0);
+ initPartitions();
+}
+
+
+glm::vec3* GLMesh::getVertices() {
+ return m_vertices.data();
+}
+
+glm::vec2* GLMesh::getTexCoords() {
+ assureConsistency();
+ return m_texCoords.size() ? m_texCoords.data() : nullptr;
+}
+
+glm::vec3* GLMesh::getNormals() {
+ assureConsistency();
+ return m_normals.size() ? m_normals.data() : nullptr;
+}
+
+glm::vec3* GLMesh::getFaceNormals() {
+ assureConsistency();
+ return m_faceNormals.size() ? m_faceNormals.data() : nullptr;
+}
+
+const glm::vec3* GLMesh::getVertices() const { return const_cast(this)->getVertices(); }
+const glm::vec2* GLMesh::getTexCoords() const { return const_cast(this)->getTexCoords(); }
+const glm::vec3* GLMesh::getNormals() const { return const_cast(this)->getNormals(); }
+const glm::vec3* GLMesh::getFaceNormals() const { return const_cast(this)->getFaceNormals(); }
+
+unsigned short* GLMesh::getFaceVertexCounts() { return m_faceVertexCount.data(); }
+unsigned short* GLMesh::getVertexIndices() { return m_vertexIndices.data(); }
+unsigned short* GLMesh::getTextureIndices() {
+ return m_textureIndices.size() ? m_textureIndices.data() : nullptr;
+}
+unsigned short* GLMesh::getNormalIndices() {
+ return m_normalIndices.size() ? m_normalIndices.data() : nullptr;
+}
+const unsigned short* GLMesh::getFaceVertexCounts() const { return m_faceVertexCount.data(); }
+const unsigned short* GLMesh::getVertexIndices() const { return m_vertexIndices.data(); }
+const unsigned short* GLMesh::getTextureIndices() const { return const_cast(this)->getTextureIndices(); }
+const unsigned short* GLMesh::getNormalIndices() const { return const_cast(this)->getNormalIndices(); }
+
+unsigned short* GLMesh::getVertexFaceCounts() { return m_vertexFaceCount.data(); }
+unsigned short* GLMesh::getDualIndices() {
+ return m_dualIndices.size() ? m_dualIndices.data() : nullptr;
+}
+const unsigned short* GLMesh::getVertexFaceCounts() const { return const_cast(this)->getVertexFaceCounts(); }
+const unsigned short* GLMesh::getDualIndices() const { return const_cast(this)->getDualIndices(); }
+
+
+void GLMesh::addVertex(float x, float y, float z) { m_vertices.emplace_back(glm::vec3{ x, y, z }); }
+void GLMesh::addTexCoord(float u, float v) { m_texCoords.emplace_back(glm::vec2{ u, v }); }
+void GLMesh::addNormal(float x, float y, float z) { m_normals.emplace_back(glm::vec3{ x, y, z }); }
+
+void GLMesh::addVertices(unsigned numVertices, const float *vertices) {
+ size_t preVertices = m_vertices.size();
+ m_vertices.resize(preVertices + numVertices);
+ memcpy(m_vertices.data() + preVertices, vertices, numVertices * sizeof(*m_vertices.data()));
+}
+
+void GLMesh::addTexCoords(unsigned numTexCoords, const float *texCoords) {
+ size_t preTexCoords = m_texCoords.size();
+ m_texCoords.resize(preTexCoords + numTexCoords);
+ memcpy(m_texCoords.data() + preTexCoords, texCoords, numTexCoords * sizeof(*m_texCoords.data()));
+}
+
+void GLMesh::addNormals(unsigned numNormals, const float *normals) {
+ size_t preNormals = m_normals.size();
+ m_normals.resize(preNormals + numNormals);
+ memcpy(m_normals.data() + preNormals, normals, numNormals * sizeof(*m_normals.data()));
+}
+
+
+void GLMesh::addFace(unsigned numVertices, const unsigned short *vertexIndices,
+ const unsigned short *textureIndices, const unsigned short *normalIndices)
+{
+ size_t n;
+ m_faceVertexCount.push_back((unsigned short)numVertices);
+ if (vertexIndices) {
+ n = m_vertexIndices.size();
+ m_vertexIndices.resize(n + numVertices);
+ memcpy(m_vertexIndices.data() + n, vertexIndices, numVertices * sizeof(*vertexIndices));
+ }
+ if (textureIndices) {
+ n = m_textureIndices.size();
+ m_textureIndices.resize(n + numVertices);
+ memcpy(m_textureIndices.data() + n, textureIndices, numVertices * sizeof(*textureIndices));
+ }
+ if (normalIndices) {
+ n = m_normalIndices.size();
+ m_normalIndices.resize(n + numVertices);
+ memcpy(m_normalIndices.data() + n, normalIndices, numVertices * sizeof(*normalIndices));
+ }
+}
+
+
+void GLMesh::addFaces(unsigned numFaces, unsigned numVerticesPerFace, const unsigned short *vertexIndices,
+ const unsigned short *textureIndices, const unsigned short *normalIndices)
+{
+ size_t numIndices = numFaces * numVerticesPerFace,
+ indexBytes = numIndices * sizeof(*vertexIndices);
+ size_t n;
+
+ n = m_faceVertexCount.size();
+ m_faceVertexCount.resize(n + numFaces, (unsigned short)numVerticesPerFace);
+
+ if (vertexIndices) {
+ n = m_vertexIndices.size();
+ m_vertexIndices.resize(n + numIndices);
+ memcpy(m_vertexIndices.data() + n, vertexIndices, indexBytes);
+ }
+ if (textureIndices) {
+ n = m_textureIndices.size();
+ m_textureIndices.resize(n + numIndices);
+ memcpy(m_textureIndices.data() + n, textureIndices, indexBytes);
+ }
+ if (normalIndices) {
+ n = m_normalIndices.size();
+ m_normalIndices.resize(n + numIndices);
+ memcpy(m_normalIndices.data() + n, normalIndices, indexBytes);
+ }
+}
+
+
+bool GLMesh::isTriMesh() const {
+ unsigned n;
+ const unsigned short *ix;
+
+ for (n = numFaces(), ix = getFaceVertexCounts(); n--; ++ix)
+ if (*ix != 3)
+ return false;
+ return true;
+}
+
+
+bool GLMesh::isQuadMesh() const {
+ unsigned n;
+ const unsigned short *ix;
+
+ for (n = numFaces(), ix = getFaceVertexCounts(); n--; ++ix)
+ if (*ix != 4)
+ return false;
+ return true;
+}
+
+
+bool GLMesh::isTriQuadMesh() const {
+ unsigned n;
+ const unsigned short *ix;
+
+ for (n = numFaces(), ix = getFaceVertexCounts(); n--; ++ix)
+ if (*ix > 4)
+ return false;
+ return true;
+}
+
+
+void GLMesh::transform(const glm::mat4x4& M) {
+ TransformPoints (M, unsigned(m_vertices.size()), m_vertices.data(), m_vertices.data());
+ TransformNormals(M, unsigned(m_normals.size()), m_normals.data(), m_normals.data());
+ TransformNormals(M, unsigned(m_faceNormals.size()), m_faceNormals.data(), m_faceNormals.data());
+}
+
+
+unsigned GLMesh::numPartitions() const {
+ return unsigned(m_partitions.size());
+}
+
+
+void GLMesh::finishPartitioning() {
+ computeStartingVertexIndices();
+}
+
+
+NvCV_Status GLMesh::setMaterial(const char *name) {
+ Partition pt{};
+ pt.materialName = name;
+ return partitionMesh(1, &pt);
+}
+
+
+void GLMesh::computeFaceNormals(int weighted) {
+ const glm::vec3 *vertices = m_vertices.data();
+ const unsigned short *numVertices = m_faceVertexCount.data();
+ glm::vec3 *nrm, *nrmEnd;
+ const unsigned short *ix;
+ const glm::vec3 *p0, *p1, *p2;
+ glm::vec3 n;
+ float mag;
+
+ useFaceNormals(true);
+ nrm = getFaceNormals();
+ nrmEnd = nrm + numFaces();
+ ix = m_vertexIndices.data();
+
+ for (; nrm != nrmEnd; ++nrm, ix += *numVertices++) {
+ if (3 == *numVertices) {
+ p0 = &vertices[ix[0]];
+ p1 = &vertices[ix[1]];
+ p2 = &vertices[ix[2]];
+ n = glm::cross((*p1 - *p0), (*p2 - *p0));
+ }
+ else {
+ unsigned numPts = *numVertices;
+ unsigned i;
+ p0 = &vertices[ix[numPts - 1]];
+ n = { 0.f, 0.f, 0.f };
+ for (i = 0, p0 = &vertices[ix[numPts - 1]]; i < numPts; ++i, p0 = p1) {
+ p1 = &vertices[ix[i]];
+ n.x -= (p1->y - p0->y) * (p1->z + p0->z);
+ n.y -= (p1->z - p0->z) * (p1->x + p0->x);
+ n.z -= (p1->x - p0->x) * (p1->y + p0->y);
+ }
+ }
+ mag = glm::length(n);
+ if (weighted == 0) { if (mag) n /= mag; } // Unit vector
+ else if (weighted > 0) { n *= 0.5f; } // Area-weighted normal
+ else /* weighted < 0 */ { if (mag) n /= mag * mag * 0.25f; } // Inverse-area-weighted vector
+ *nrm = n;
+ }
+}
+
+
+void GLMesh::computeVertexNormals(int weighted) {
+ glm::vec3 nrm;
+
+ computeFaceNormals(weighted);
+ if (m_normals.size() != m_vertices.size()) {
+ m_normals.resize(m_vertices.size());
+ m_normalIndices = m_vertexIndices;
+ }
+
+ if (m_vertexFaceCount.size() == m_vertices.size()) { // We already have the dual topology
+ glm::vec3 *n, *nEnd;
+ unsigned short *numPolys, *ix, *ixEnd;
+ for (nEnd = (n = m_normals.data()) + m_vertexFaceCount.size(), numPolys = m_vertexFaceCount.data(), ix = m_dualIndices.data(); n != nEnd; ++n, ++numPolys) {
+ for (ixEnd = ix + *numPolys, nrm = { 0.f, 0.f, 0.f }; ix != ixEnd; ++ix)
+ nrm += m_faceNormals[*ix];
+ *n = glm::normalize(nrm);
+ }
+ }
+}
+
+
+void GLMesh::BoundingBox::unionPoint(const glm::vec3& pt) { /* This works with NaN's */
+ if (!(_box[0].x < pt.x)) _box[0].x = pt.x; if (!(_box[1].x > pt.x)) _box[1].x = pt.x;
+ if (!(_box[0].y < pt.y)) _box[0].y = pt.y; if (!(_box[1].y > pt.y)) _box[1].y = pt.y;
+ if (!(_box[0].z < pt.z)) _box[0].z = pt.z; if (!(_box[1].z > pt.z)) _box[1].z = pt.z;
+}
+
+void GLMesh::BoundingBox::set(unsigned numPts, const glm::vec3 *pts) {
+ _box[0] = pts[0];
+ _box[1] = pts[0];
+ for (++pts; --numPts; ++pts)
+ unionPoint(*pts);
+}
+
+void GLMesh::BoundingBox::set(unsigned numPts, const glm::vec3 *pts, const glm::mat4x4& M) {
+ memset(this, -1, sizeof(*this)); // Set to NaN
+ for (; numPts--; ++pts) {
+ glm::vec3 q;
+ TransformPoints(M, 1, pts, &q);
+ unionPoint(q);
+ }
+}
+
+void GLMesh::BoundingSphere::set(unsigned numPts, const glm::vec3 *pts) {
+ /* Ritter algorithm */
+ float d, d0;
+ glm::vec3 p0, p1;
+ const glm::vec3 *pp, *pEnd = pts + numPts;
+
+ p0 = p1 = pts[0]; // Choose one point
+ for (pp = pts + 1, d0 = 0; pp != pEnd; ++pp) {
+ if (!(d0 > (d = glm::distance(p0, *pp)))) {
+ d0 = d;
+ p1 = *pp; // Find the furthest point
+ }
+ }
+ p0 = p1; // Choose that furthest point
+ for (pp = pts, d0 = 0; pp != pEnd; ++pp) {
+ if (!(d0 > (d = glm::distance(p0, *pp)))) {
+ d0 = d;
+ p1 = *pp; // Find the furthest point from that
+ }
+ }
+
+ _radius = d0 * .5f; // Make a sphere ...
+ _center = (p1 - p0) * .5f + p0; // ... from these furthest points
+
+ bool done;
+ // Accommodate every outlier as we encounter them
+ do {
+ done = true;
+ for (pp = pts; pp != pEnd; ++pp) { // Check that all points are in this sphere
+ glm::vec3 v = *pp - _center;
+ d0 = glm::length(v);
+ if (d0 > _radius) { // If not, ...
+ d = (d0 - _radius) * .5f;
+ _center += v * (d / d0); // ... adjust the sphere center ...
+ _radius += d; // ... and radius to accommodate this new point
+ done = false;
+ }
+ }
+ } while (!done);
+}
+
+
+void GLMesh::BoundingSphere::set(unsigned numPts, const glm::vec3 *pts, const glm::mat4x4& M) {
+ std::vector xPts(numPts);
+ TransformPoints(M, numPts, pts, xPts.data());
+ set(numPts, xPts.data());
+}
+
+void GLMesh::getBoundingBox(BoundingBox *bbox, const glm::mat4x4 *M) const {
+ if (M) bbox->set(unsigned(m_vertices.size()), m_vertices.data(), *M);
+ else bbox->set(unsigned(m_vertices.size()), m_vertices.data());
+}
+
+
+void GLMesh::getBoundingSphere(BoundingSphere *bsph, const glm::mat4x4 *M) const {
+ if (M) bsph->set(unsigned(m_vertices.size()), m_vertices.data(), *M);
+ else bsph->set(unsigned(m_vertices.size()), m_vertices.data());
+}
+
+
+unsigned GLMesh::notRenderable(unsigned /*options*/) const {
+ unsigned result = RENDERABLE;
+
+ if (!isTriMesh())
+ result |= NOT_TRIMESH;
+ if (0 != m_textureIndices.size() && !indicesMatch(m_vertexIndices, m_textureIndices))
+ result |= COMPLEX_TOPOLOGY;
+ if (0 != m_normalIndices.size() && !indicesMatch(m_vertexIndices, m_normalIndices))
+ result |= COMPLEX_TOPOLOGY;
+
+ return result;
+}
+
+
+NvCV_Status GLMesh::append(const GLMesh& other, const glm::mat4x4 *M) {
+ if ((!numTexCoords() != !other.numTexCoords()) || (!numNormals() != !other.numNormals()))
+ return NVCV_ERR_MISMATCH;
+
+ unsigned indexOffset = numVertices(),
+ thisCount = numIndices(),
+ otherCount = other.numIndices(),
+ i;
+
+ // Add vertices
+ addVertices(other.numVertices(), &other.getVertices()->x);
+ if (M)
+ TransformPoints(*M, other.numVertices(), getVertices() + indexOffset, getVertices() + indexOffset);
+ if (0 != (i = other.numTexCoords()))
+ addTexCoords(i, &other.getTexCoords()->x);
+ if (0 != (i = other.numNormals())) {
+ addNormals(i, &other.getNormals()->x);
+ if (M)
+ TransformNormals(*M, other.numNormals(), getNormals() + indexOffset, getNormals() + indexOffset);
+ }
+
+ { // Add indices
+ const unsigned short *nvx = other.getFaceVertexCounts(),
+ *vix = other.getVertexIndices(),
+ *tix = other.getTextureIndices(),
+ *nix = other.getNormalIndices();
+ for (i = other.numFaces(); i--; ++nvx) {
+ addFace(*nvx, vix, tix, nix);
+ vix += *nvx;
+ if (tix) tix += *nvx;
+ if (nix) nix += *nvx;
+ }
+ }
+
+
+ { // Offset the new indices
+ unsigned short *ix;
+ for (i = otherCount, ix = getVertexIndices() + thisCount; i--; ++ix)
+ *ix += (unsigned short)indexOffset;
+ if (nullptr != (ix = getTextureIndices()))
+ for (i = otherCount, ix += thisCount; i--; ++ix)
+ *ix += (unsigned short)indexOffset;
+ if (nullptr != (ix = getNormalIndices()))
+ for (i = otherCount, ix += thisCount; i--; ++ix)
+ *ix += (unsigned short)indexOffset;
+ }
+
+ return NVCV_SUCCESS;
+}
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLMesh.h b/samples/ExpressionApp/BackEndOpenGL/GLMesh.h
new file mode 100644
index 0000000..6ee7ae3
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLMesh.h
@@ -0,0 +1,280 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __GLMESH_H
+#define __GLMESH_H
+
+#include
+#include
+
+#include "glm/glm.hpp"
+#include "nvCVStatus.h"
+
+
+class GLMesh {
+public:
+ struct Partition {
+ unsigned faceIndex; ///< The index of the first face in the partition.
+ unsigned numFaces; ///< The number of faces in the partition.
+ unsigned vertexIndex; ///< The index of the first topological vertex in the partition.
+ unsigned numVertexIndices; ///< The number of topological vertices in the partition.
+ std::string name; ///< The name of the partition.
+ std::string materialName; ///< The name of the material assigned to the partition.
+ int smooth; ///< The smoothing group > 0; no smoothing == 0; unassigned < 0.
+
+ Partition() { smooth = -1; }
+ bool operator<(const Partition& pt) const { return faceIndex < pt.faceIndex; }
+ void finishPartitioning();
+ };
+ class BoundingBox {
+ public:
+ void unionPoint(const glm::vec3& pt);
+ void set(unsigned numPts, const glm::vec3* pts);
+ void set(unsigned numPts, const glm::vec3* pts, const glm::mat4x4& M);
+ glm::vec3& min() { return _box[0]; }
+ glm::vec3& max() { return _box[1]; }
+ const glm::vec3& min() const { return _box[0]; }
+ const glm::vec3& max() const { return _box[1]; }
+ glm::vec3 center() const { return (_box[0] + _box[1]) * 0.5f; }
+ private:
+ glm::vec3 _box[2];
+ };
+ class BoundingSphere {
+ public:
+ void set(unsigned numPts, const glm::vec3* pts);
+ void set(unsigned numPts, const glm::vec3* pts, const glm::mat4x4& M);
+ glm::vec3& center() { return _center; }
+ const glm::vec3& center() const { return _center; }
+ float& radius() { return _radius; }
+ float radius() const { return _radius; }
+
+ private:
+ glm::vec3 _center;
+ float _radius;
+ };
+ enum { BOUNDARY = 0xFFFFu }; ///< An index that indicates the boundary
+
+ GLMesh();
+ GLMesh(const GLMesh& mesh);
+ ~GLMesh();
+
+
+ /// Get the number of faces.
+ /// @return the number of faces.
+ unsigned numFaces() const;
+
+ /// Get the number of XYZ vertices.
+ /// @return the number of XYZ vertices.
+ unsigned numVertices() const;
+
+ /// Get the number of UV texture coordinates.
+ /// @return the number of UV texture coordinates.
+ unsigned numTexCoords() const;
+
+ /// Get the number of XYZ normals.
+ /// @return the number of XYZ normals.
+ unsigned numNormals() const;
+
+ /// Get the number of vertex indices.
+ /// @return the number of vertex indices.
+ unsigned numIndices() const;
+
+ /// Evaluate whether the mesh is composed only of triangles.
+ /// @return true if all faces have 3 vertices; false otherwise.
+ bool isTriMesh() const;
+
+ /// Evaluate whether the mesh is composed only of quadrilaterals.
+ /// @return true if all faces have 4 vertices; false otherwise.
+ bool isQuadMesh() const;
+
+ /// Evaluate whether the mesh is composed only of triangles and quadrilaterals.
+ /// @return true if no face has greater than 4 vertices; false otherwise.
+ bool isTriQuadMesh() const;
+
+ void resizeVertices(unsigned numVert);
+ void resizeTexCoords(unsigned numTexCoord);
+ void resizeNormals(unsigned numNorm);
+ void resizeFaces(unsigned numFace);
+ void resizeTriangles(unsigned numTriangles);
+ void resizeVertexIndices(unsigned numIndices);
+ void resizeDualIndices(unsigned numIndices);
+ void clear();
+
+ glm::vec3* getVertices(); ///< Get the vertices. @return a pointer to the vertices.
+ const glm::vec3* getVertices() const; ///< Get the vertices. @return a pointer to the vertices.
+ glm::vec2* getTexCoords(); ///< Get the texture coordinates. @return a pointer to the texture coordinates.
+ const glm::vec2* getTexCoords() const; ///< Get the texture coordinates. @return a pointer to the texture coordinates.
+ glm::vec3* getNormals(); ///< Get the vertex normals. @return a pointer to the vertex normals.
+ const glm::vec3* getNormals() const; ///< Get the vertex normals. @return a pointer to the vertex normals.
+ glm::vec3* getFaceNormals(); ///< Get the face normals, computed with computeFaceNormals(). @return a pointer to the vertex normals.
+ const glm::vec3* getFaceNormals() const; ///< Get the face normals, computed with computeFaceNormals(). @return a pointer to the vertex normals.
+ unsigned short* getFaceVertexCounts(); ///< Get the vertex counts for each face, in the primal topology. @return an array of vertex counts, one per face.
+ const unsigned short* getFaceVertexCounts() const; ///< Get the vertex counts for each face, in the primal topology. @return an array of vertex counts, one per face.
+ unsigned short* getVertexIndices(); ///< Get the vertex indices for each face: the primal topology. @return a pointer to the vertex indices.
+ const unsigned short* getVertexIndices() const; ///< Get the vertex indices for each face: the primal topology. @return a pointer to the vertex indices.
+ unsigned short* getTextureIndices(); ///< Get the texture indices for each face: the primal topology. @return a pointer to the texture indices.
+ const unsigned short* getTextureIndices() const; ///< Get the texture indices for each face: the primal topology. @return a pointer to the texture indices.
+ unsigned short* getNormalIndices(); ///< Get the normal indices for each face: the primal topology. @return a pointer to the normal indices.
+ const unsigned short* getNormalIndices() const; ///< Get the normal indices for each face: the primal topology. @return a pointer to the normal indices.
+ unsigned short* getVertexFaceCounts(); ///< Get the face counts for each vertex, in the dual topology. @return an array of face counts, one per vertex.
+ const unsigned short* getVertexFaceCounts() const; ///< Get the face counts for each vertex, in the dual topology. @return an array of face counts, one per vertex.
+ unsigned short* getDualIndices(); ///< Get the face indices for each vertex: the dual topology. @return a pointer to the dual face indices.
+ const unsigned short* getDualIndices() const; ///< Get the face indices for each vertex: the dual topology. @return a pointer to the dual face indices.
+
+ void addVertex(float x, float y, float z);
+ void addTexCoord(float u, float v);
+ void addNormal(float x, float y, float z);
+
+ void addVertices(unsigned numVertices, const float* vertices);
+ void addTexCoords(unsigned numTexCoords, const float* texCoords);
+ void addNormals(unsigned numNormals, const float* normals);
+
+ void addFace(unsigned numVertices, const unsigned short* vertexIndices,
+ const unsigned short* textureIndices, const unsigned short* normalIndices);
+
+ void addFaces(unsigned numFaces, unsigned numVerticesPerFace, const unsigned short* vertexIndices,
+ const unsigned short* textureIndices, const unsigned short* normalIndices);
+
+ /// Compute the normals per face.
+ /// @param[in] specify the weighing for the normals. In all cases, the zero vector will
+ /// be returned for faces with zero area.
+ /// 0: unit vectors.
+ /// +1: vectors weighted by the area.
+ /// -1: vectors weighted by the reciprocal of the area.
+ void computeFaceNormals(int weighted = 0);
+
+ /// Compute the vertex normals. The face normals will be computed in the process.
+ /// @param[in] weighted Determines the weighting used to combine the face normals:
+ /// 0: all incident faces normals will have the same weight.
+ /// -1: the normals will be weighted by inverse area of the face.
+ void computeVertexNormals(int weighted = 0);
+
+ void transform(const glm::mat4x4& M);
+
+ /// Get the number of partitions.
+ /// There is always at least one, which may neither have a name nor a material.
+ /// @return the number of partitions.
+ unsigned numPartitions() const;
+
+ /// The easiest way to partition a mesh: call this after all vertices and attributes
+ /// are recorded, and before the first face of each partition is recorded.
+ /// @param[in] name the name of the new partition.
+ /// @param[in] material the name of the material to be used in the new partition.
+ /// @param[in] smooth {-1, 0, 1} means {unspecified, not smooth, smooth}.
+ /// @return NvCV_StatusNone if the partition was retrieved successfully.
+ /// @return NvCV_StatusDuplicate if a partition with the same name already exists.
+ NvCV_Status startPartition(const char* name, const char* material, int smooth = -1);
+
+ /// Get the specified partition.
+ /// @param[in] i the index of the partition to retrieve.
+ /// @param[out] pt a place to store the specified partition.
+ /// @return NvCV_StatusNone if the partition was retrieved successfully.
+ /// @return NvCV_StatusTooBig if the face index was >= the number of faces.
+ NvCV_Status getPartition(unsigned i, Partition& pt) const;
+
+ /// Update the specified partition.
+ /// The function finishPartitioning() should be called
+ /// after the last updatePartition() has been called.
+ /// @param[in] i the index of the partition.
+ /// @param[in] partition the desired value for the specified partition.
+ /// @return NvCV_StatusNone if the partition was updated successfully.
+ /// @return NvCV_StatusTooBig if the face index was >= the number of faces.
+ NvCV_Status updatePartition(unsigned i, const Partition& partition);
+
+ /// Partition the mesh.
+ /// @param[in] numPartitions the number of partitions.
+ /// @param[in] partitions the array of partitions. Only { faceIndex, name, and
+ /// materialName need be supplied}; the rest are computed.
+ /// @return NvCV_StatusNone if the partition was executed successfully.
+ /// @return NvCV_StatusTooBig if any faceIndex was >= the number of faces.
+ NvCV_Status partitionMesh(unsigned numPartitions, const Partition* partitions);
+
+ /// The last step after partitioning with updatePartition().
+ /// This is not needed if the partitions were created solely with the use of
+ /// startPartition() or PartitionMesh().
+ /// @note The partitions may be reordered (sorted) after calling finishPartitioning().
+ void finishPartitioning();
+
+ /// Set a single material for the whole mesh.
+ /// @param[in] name the name of the material.
+ NvCV_Status setMaterial(const char* name);
+
+ /// Get the bounding box, optionally with an affine transformation.
+ /// @param[out] bbox a place to store the bounding box.
+ /// @param[in] M pointer to a modeling matrix; NULL implies the identity.
+ void getBoundingBox(BoundingBox* bbox, const glm::mat4x4* M = nullptr) const;
+
+ /// Get the bounding box, optionally with an affine transformation.
+ /// @param[out] bbox a place to store the bounding box.
+ /// @param[in] M pointer to a modeling matrix; NULL implies the identity.
+ void getBoundingSphere(BoundingSphere* bsph, const glm::mat4x4* M = nullptr) const;
+
+ /// Query whether the PolyMesh is not renderable easily by Open GL.
+ /// Since the more typical query would be whether it is renderable instead,
+ /// this seems like negative logic, but this choice was made to return a bit vector
+ /// indicating the reason that the Polymesh is not renderable.
+ /// @param[in] options Rendering options; currently ignored.
+ /// @return RENDERABLE if the PolyMesh is renderable. Otherwise a bit vector of:
+ /// NOT_TRIMESH if some faces are not triangular;
+ /// COMPLEX_TOPOLOGY if the vertex attribute topology is inconsistent;
+ unsigned notRenderable(unsigned options) const;
+
+ /// Append another mesh.
+ /// @param[in] mesh the other mesh.
+ /// @param[in] M an optional affine transform
+ NvCV_Status append(const GLMesh& mesh, const glm::mat4x4* M = nullptr);
+
+ /// Bit vector components indicating non-renderability.
+ enum {
+ RENDERABLE = 0x0, ///< The PolyMesh is renderable.
+ NOT_TRIMESH = 0x1, ///< Some faces are not triangular.
+ COMPLEX_TOPOLOGY = 0x2 ///< The vertex topology is not consistent.
+ };
+
+private:
+ void initPartitions();
+ void computeStartingVertexIndices();
+ static bool indicesMatch(const std::vector& ivecA, const std::vector& ivecB);
+ void assureConsistency();
+ void useFaceNormals(bool yes);
+
+ std::vector m_faceVertexCount;
+
+ std::vector m_vertices;
+ std::vector m_vertexIndices;
+
+ std::vector m_texCoords;
+ std::vector m_textureIndices;
+
+ std::vector m_normals;
+ std::vector m_normalIndices;
+
+ std::vector m_faceNormals;
+
+ std::vector m_partitions;
+
+ std::vector m_vertexFaceCount; // the number of faces surrounding each vertex
+ std::vector m_dualIndices; // the face indices for each vertex
+};
+
+#endif // __GLMESH_H
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLShaders.cpp b/samples/ExpressionApp/BackEndOpenGL/GLShaders.cpp
new file mode 100644
index 0000000..32738b6
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLShaders.cpp
@@ -0,0 +1,632 @@
+/*###############################################################################
+#
+# Copyright 2016-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifdef _MSC_VER
+ #include "glad/glad.h"
+#else
+ #include
+#endif // _MSC_VER
+#include
+#include
+#include "GLShaders.h"
+
+
+enum {
+ myErrNone = 0,
+ myErrShader = -1,
+ myErrProgram = -2,
+ myErrTexture = -3,
+};
+
+
+#define BAIL_IF_ERR(err) do { if ((err)) { goto bail; } } while(0)
+#define _STRINGIFY_(token) #token
+#define STRINGIFY(token) _STRINGIFY_(token)
+#define MAYBE_UNUSED(token) if (token){}
+
+
+/****************************************************************************//**
+ * Print Shader Log.
+ * \param[in] id the ID of the shader.
+ * \param[in] type either GL_VERTEX_SHADER or GL_FRAGMENT_SHADER.
+ * \param[in] shader the shader source code.
+ ********************************************************************************/
+
+static void PrintShaderLog(GLuint id, GLenum type, const char *shader) {
+ GLsizei msgLength;
+ std::string errMsg;
+ glGetShaderiv(id, GL_INFO_LOG_LENGTH, &msgLength);
+ errMsg.resize(msgLength);
+ glGetShaderInfoLog(id, msgLength, &msgLength, &errMsg[0]);
+ fprintf(stderr, "\nShader Log:\n%sfor %s Shader:\n%s\n", errMsg.c_str(),
+ ((type == GL_VERTEX_SHADER) ? "Vertex" : "Fragment"), shader);
+}
+
+
+/****************************************************************************//**
+ * Print Program Log.
+ * \param[in] id the id of the program.
+ ********************************************************************************/
+
+static void PrintProgramLog(GLuint id) {
+ GLsizei msgLength;
+ std::string errMsg;
+ glGetProgramiv(id, GL_INFO_LOG_LENGTH, &msgLength);
+ errMsg.resize(msgLength);
+ glGetProgramInfoLog(id, msgLength, &msgLength, &errMsg[0]);
+ fprintf(stderr, "\nProgram Log:\n%s\n", errMsg.c_str());
+}
+
+
+/****************************************************************************//**
+ * NewShader
+ ********************************************************************************/
+
+static int NewShader(const char *shaderStr, GLenum type, GLuint *shaderID) {
+ GLuint id;
+ GLint result;
+
+ *shaderID = 0;
+ id = glCreateShader(type);
+ glShaderSource(id, 1, &shaderStr, NULL);
+ glCompileShader(id);
+ glGetShaderiv(id, GL_COMPILE_STATUS, &result);
+ if (result) {
+ *shaderID = id;
+ return myErrNone;
+ }
+ else {
+ PrintShaderLog(id, type, shaderStr);
+ glDeleteShader(id);
+ return myErrShader;
+ }
+}
+
+
+/****************************************************************************//**
+ * NewProgram
+ ********************************************************************************/
+
+static int NewProgram(GLuint vertexShader, GLuint fragmentShader, GLuint *progID) {
+ GLint result;
+ GLuint id;
+
+ *progID = 0;
+
+ id = glCreateProgram();
+ glAttachShader(id, vertexShader);
+ glAttachShader(id, fragmentShader);
+
+ glLinkProgram(id);
+ glGetProgramiv(id, GL_LINK_STATUS, &result);
+ if (result) {
+ *progID = id;
+ return myErrNone;
+ }
+ else {
+ PrintProgramLog(id);
+ glDeleteProgram(id);
+ return myErrProgram;
+ }
+}
+
+
+/****************************************************************************//**
+ * IndexTypeFromSize
+ ********************************************************************************/
+
+static GLenum IndexTypeFromSize(unsigned indexSize) {
+ return (indexSize < 2) ? GL_UNSIGNED_BYTE
+ : (indexSize == 2) ? GL_UNSIGNED_SHORT
+ : GL_UNSIGNED_INT;
+}
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** SMOOTH RENDERER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+
+/********************************************************************************
+ * Shaders
+ ********************************************************************************/
+
+const char SmoothRenderer::_vertexShader[] =
+ "uniform mat4 MVP;\n"
+ "attribute vec3 vCol;\n"
+ "attribute vec3 vPos;\n"
+ "varying vec3 color;\n"
+ "void main()\n"
+ "{\n"
+ " gl_Position = MVP * vec4(vPos, 1.0);\n"
+ " color = vCol;\n"
+ "}\n";
+const char SmoothRenderer::_fragmentShader[] =
+ "varying vec3 color;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = vec4(color, 1.0);\n"
+ "}\n";
+
+
+/********************************************************************************
+ * startup
+ ********************************************************************************/
+
+int SmoothRenderer::startup() {
+ int err = myErrNone;
+ GLuint vertexShader = 0, fragmentShader = 0;
+
+ _programID = 0;
+ BAIL_IF_ERR(err = NewShader(_vertexShader, GL_VERTEX_SHADER, &vertexShader));
+ BAIL_IF_ERR(err = NewShader(_fragmentShader, GL_FRAGMENT_SHADER, &fragmentShader));
+ BAIL_IF_ERR(err = NewProgram(vertexShader, fragmentShader, &_programID));
+ _maxtrixID = glGetUniformLocation(_programID, "MVP");
+ _vtxPosID = glGetAttribLocation(_programID, "vPos");
+ _vtxColID = glGetAttribLocation(_programID, "vCol");
+ err = (-1 == _maxtrixID || -1 == _vtxPosID || -1 == _vtxColID) ? myErrShader : myErrNone;
+
+bail:
+ if (myErrNone != err) shutdown();
+ if (fragmentShader) glDeleteShader(fragmentShader);
+ if (vertexShader) glDeleteShader(vertexShader);
+ return err;
+}
+
+
+/********************************************************************************
+ * use
+ ********************************************************************************/
+
+int SmoothRenderer::use() {
+ if (0 == _programID)
+ return myErrProgram;
+ glUseProgram(_programID);
+ return myErrNone;
+}
+
+
+/********************************************************************************
+ * drawElements, from user memory
+ ********************************************************************************/
+
+void SmoothRenderer::drawElements(GLsizei numVertices, const GLfloat *positions, const GLfloat *colors,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, const GLvoid *indices, const GLfloat *M
+) {
+ MAYBE_UNUSED(numVertices);
+ glUseProgram(_programID);
+ if (M)
+ glUniformMatrix4fv(_maxtrixID, 1, GL_FALSE, M);
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ if (colors) { /* Separate array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(*positions), positions);
+ glEnableVertexAttribArray(_vtxColID);
+ glVertexAttribPointer(_vtxColID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(*colors), colors);
+ }
+ else { /* One contiguous array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(*positions), positions);
+ glEnableVertexAttribArray(_vtxColID);
+ glVertexAttribPointer(_vtxColID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(*positions), positions + 3);
+ }
+ glDrawElements(graphicsMode, indexCount, indexType, indices);
+}
+
+
+/********************************************************************************
+ * drawElements, from buffer objects
+ ********************************************************************************/
+
+void SmoothRenderer::drawElements(GLuint vtxBuf, unsigned posOff, unsigned colOff,
+ GLenum graphicsMode, GLsizei numIndices, unsigned indexSize, GLuint topoBuf, const GLfloat *M
+) {
+ GLenum err;
+ glUseProgram(_programID);
+ if (M)
+ glUniformMatrix4fv(_maxtrixID, 1, GL_FALSE, M);
+ glBindBuffer(GL_ARRAY_BUFFER, vtxBuf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, topoBuf);
+ if (!(colOff == 12 || colOff == 0)) { /* Separate array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(intptr_t)posOff);
+ glEnableVertexAttribArray(_vtxColID);
+ glVertexAttribPointer(_vtxColID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(intptr_t)colOff);
+ }
+ else { /* One contiguous array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(intptr_t)(posOff + 0 * sizeof(float)));
+ glEnableVertexAttribArray(_vtxColID);
+ glVertexAttribPointer(_vtxColID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(intptr_t)(posOff + 3 * sizeof(float)));
+ err = glGetError(); if (err) printf("glVertexAttribPointer returns %d\n", err);
+ }
+ glDrawElements(graphicsMode, numIndices, IndexTypeFromSize(indexSize), (void*)0);
+}
+
+
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** TEXTURE RENDERER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+
+
+/********************************************************************************
+ * shaders
+ ********************************************************************************/
+
+const char TextureRenderer::_vertexShader[] =
+ "uniform mat4 MVP;\n" // Model, view, projection matrices, concatenated.
+ "attribute vec3 vPos;\n" // Vertex position
+ "attribute vec2 vTex;\n" // Vertex texture coordinate
+ "varying vec2 texCoord;\n" // Interpolated texture coordinate
+ "void main()\n"
+ "{\n"
+ " gl_Position = MVP * vec4(vPos, 1.0);\n"
+ " texCoord = vTex;\n"
+ "}\n";
+const char TextureRenderer::_fragmentShader[] =
+ "uniform sampler2D tex;\n"
+ "varying vec2 texCoord;\n" // Interpolated texture coordinate
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = texture2D(tex, texCoord);\n"
+ "}\n";
+
+
+/********************************************************************************
+ * startup
+ ********************************************************************************/
+
+int TextureRenderer::startup() {
+ int err = myErrNone;
+ GLuint vertexShader = 0, fragmentShader = 0;
+
+ if (_programID)
+ return myErrNone;
+ _programID = 0;
+ BAIL_IF_ERR(err = NewShader(_vertexShader, GL_VERTEX_SHADER, &vertexShader));
+ BAIL_IF_ERR(err = NewShader(_fragmentShader, GL_FRAGMENT_SHADER, &fragmentShader));
+ BAIL_IF_ERR(err = NewProgram(vertexShader, fragmentShader, &_programID));
+ _maxtrixID = _vtxPosID = _vtxTexID = -1;
+ _maxtrixID = glGetUniformLocation(_programID, "MVP");
+ _vtxPosID = glGetAttribLocation(_programID, "vPos");
+ _vtxTexID = glGetAttribLocation(_programID, "vTex");
+ err = (-1 == _maxtrixID || -1 == _vtxPosID || -1 == _vtxTexID) ? myErrShader : myErrNone;
+
+bail:
+ if (myErrNone != err) shutdown();
+ if (fragmentShader) glDeleteShader(fragmentShader);
+ if (vertexShader) glDeleteShader(vertexShader);
+ return err;
+}
+
+
+/********************************************************************************
+ * use
+ ********************************************************************************/
+
+int TextureRenderer::use() {
+ if (0 == _programID)
+ return myErrProgram;
+ glUseProgram(_programID);
+ return myErrNone;
+}
+
+
+/********************************************************************************
+ * drawElements, from user buffers
+ ********************************************************************************/
+
+void TextureRenderer::drawElements(GLsizei numVertices, const GLfloat *xyz, const GLfloat *uv,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, const GLvoid *indices, GLuint texID ,const GLfloat *M
+) {
+ MAYBE_UNUSED(numVertices);
+ glUseProgram(_programID);
+ if (M)
+ glUniformMatrix4fv(_maxtrixID, 1, GL_FALSE, M);
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ glBindTexture(GL_TEXTURE_2D, texID);
+
+ if (uv) { /* Separate array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(*xyz), xyz);
+ glEnableVertexAttribArray(_vtxTexID);
+ glVertexAttribPointer(_vtxTexID, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(*uv), uv);
+ }
+ else { /* One contiguous array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(*xyz), xyz);
+ glEnableVertexAttribArray(_vtxTexID);
+ glVertexAttribPointer(_vtxTexID, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(*xyz), xyz + 3);
+ }
+ glDrawElements(graphicsMode, indexCount, indexType, indices);
+}
+
+
+/********************************************************************************
+ * drawElements, from buffer objects
+ ********************************************************************************/
+
+void TextureRenderer::drawElements(
+ GLuint vtxBuf, unsigned xyzOff, unsigned uvOff, GLenum graphicsMode,
+ GLsizei numIndices, GLenum indexSize, GLuint indexBuf, GLuint texID, const GLfloat *M
+) {
+ GLenum err;
+ glUseProgram(_programID);
+ if (M)
+ glUniformMatrix4fv(_maxtrixID, 1, GL_FALSE, M);
+ glBindBuffer(GL_ARRAY_BUFFER, vtxBuf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuf);
+ glBindTexture(GL_TEXTURE_2D, texID);
+
+ if (!(uvOff == 12 || uvOff == 0)) { /* Separate array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(intptr_t)xyzOff);
+ glEnableVertexAttribArray(_vtxTexID);
+ glVertexAttribPointer(_vtxTexID, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void*)(intptr_t)uvOff);
+ }
+ else { /* One contiguous array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(intptr_t)(xyzOff + 0 * sizeof(float)));
+ glEnableVertexAttribArray(_vtxTexID);
+ glVertexAttribPointer(_vtxTexID, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(intptr_t)(xyzOff + 3 * sizeof(float)));
+ err = glGetError(); if (err) printf("glVertexAttribPointer returns %d\n", err);
+ }
+ glDrawElements(graphicsMode, numIndices, IndexTypeFromSize(indexSize), (void*)0);
+}
+
+
+/********************************************************************************
+ * drawQuad, from user buffers
+ ********************************************************************************/
+
+void TextureRenderer::drawQuad(const float xyz[4*3], const float uv[4*2], GLuint texID, const float *M) {
+ static const unsigned char indices[4] = { 0, 1, 2, 3 }; // These need to be static, because GL is asynchronous
+ drawElements(4, xyz, uv, GL_TRIANGLE_FAN, 4, GL_UNSIGNED_BYTE, indices, texID, M);
+}
+
+
+/********************************************************************************
+ * Mesh shader
+ ********************************************************************************/
+
+class MeshShader {
+ static const char _vertexShader[], _fragmentShader[];
+};
+// The ambient and diffuse coefficients are rolled into the ambCol and litCol, respectively.
+const char MeshShader::_vertexShader[] =
+ "uniform mat4 MVP;\n" // Model, view, projection matrices, concatenated.
+ "uniform mat3 N;\n" // Normal matrix.
+ "uniform vec3 litDir;\n" // Light direction
+ "uniform vec3 litCol;\n" // Light color multiplied by the diffuse coefficient
+ "uniform vec3 ambCol;\n" // Ambient color multiplied by the ambient coefficient
+ "attribute vec3 vtxPos;\n" // Vertex position
+ "attribute vec3 vtxNor;\n" // Vertex normal
+ "attribute vec2 vtxTex;\n" // Vertex texture coordinate
+ "varying vec2 texCoord;\n" // Interpolated texture coordinate
+ "varying vec3 illum;\n" // Interpolated illumination
+ "void main()\n"
+ "{\n"
+ " gl_Position = MVP * vec4(vtxPos, 1.0);\n"
+ " texCoord = vtxTex;\n"
+ " illum = max(dot(N * vtxNor, litDir) * litCol + ambCol;\n"
+ "}\n";
+
+
+
+/********************************************************************************
+ * UpdateTexture
+ ********************************************************************************/
+
+GLenum UpdateTexture(GLint texID, GLsizei width, GLsizei height, GLsizei rowBytes, GLenum glFormat, const GLvoid *pixels) {
+ glBindTexture(GL_TEXTURE_2D, texID);
+ glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
+ glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, rowBytes / 4);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, glFormat, GL_UNSIGNED_BYTE, pixels);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); // restore to default
+ return glGetError();
+}
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** LAMBERTIAN RENDERER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+
+/********************************************************************************
+ * Shaders
+ ********************************************************************************/
+
+const char LambertianRenderer::_vertexShader[] =
+ "#version 120\n"
+ "uniform mat4 M;\n"
+ "uniform mat4 VP;\n"
+ "uniform vec4 lightLoc[" STRINGIFY(LAMBERTIAN_NUM_LIGHTS) "];\n"
+ "uniform vec3 lightColor[" STRINGIFY(LAMBERTIAN_NUM_LIGHTS) "];\n"
+ "uniform vec3 Ka;\n"
+ "uniform vec3 Kd;\n"
+ "attribute vec3 vPos;\n"
+ "attribute vec3 vNrm;\n"
+ "varying vec3 color;\n"
+ "void main()\n"
+ "{\n"
+ " vec4 loc = M * vec4(vPos, 1.);\n" // Transform points into world space ...
+ " gl_Position = VP * loc;\n" // ... and screen space
+ " vec3 N = normalize(mat3(M) * vNrm);\n" // Transform normal into world space, assuming isotropic scaling
+ " color = Ka;\n" // Initialize color to ambient
+ " for (int i = 0; i < " STRINGIFY(LAMBERTIAN_NUM_LIGHTS) "; ++i)\n"
+ " {\n"
+ " vec3 L = normalize(lightLoc[i].xyz - lightLoc[i].w * loc.xyz);\n" // Compute vector to light: w must be either 1 or 0
+ " float d = dot(L, N);\n" // Lambertian lighting
+ " if (d > 0.)\n" // If the light hits the outside surface, ...
+ " color += d * lightColor[i] * Kd;\n" // ... accumulate color from the light source
+ " }\n"
+ "}\n";
+const char LambertianRenderer::_fragmentShader[] =
+ "varying vec3 color;\n"
+ "void main()\n"
+ "{\n"
+ " gl_FragColor = vec4(color, 1.);\n" // Interpolate the color
+ "}\n";
+
+
+/********************************************************************************
+ * startup
+ ********************************************************************************/
+
+int LambertianRenderer::startup() {
+ int err = myErrNone;
+ GLuint vertexShader = 0, fragmentShader = 0;
+
+ _programID = 0;
+ BAIL_IF_ERR(err = NewShader(_vertexShader, GL_VERTEX_SHADER, &vertexShader));
+ BAIL_IF_ERR(err = NewShader(_fragmentShader, GL_FRAGMENT_SHADER, &fragmentShader));
+ BAIL_IF_ERR(err = NewProgram(vertexShader, fragmentShader, &_programID));
+ _lightLoc = glGetUniformLocation(_programID, "lightLoc"); // light locations
+ _lightColor = glGetUniformLocation(_programID, "lightColor"); // light diffuse colors
+ _MmatrixID = glGetUniformLocation(_programID, "M"); // M matrix; require UL 3x3 to be orthogonal
+ _VPmatrixID = glGetUniformLocation(_programID, "VP"); // VP matrix
+ _ambientColorID = glGetUniformLocation(_programID, "Ka"); // ambient color
+ _diffuseColorID = glGetUniformLocation(_programID, "Kd"); // diffuse color
+ _vtxPosID = glGetAttribLocation(_programID, "vPos"); // vertex positions
+ _vtxNrmID = glGetAttribLocation(_programID, "vNrm"); // vertex normals
+
+ err = (-1 == _lightLoc || -1 == _lightColor || -1 == _MmatrixID || -1 == _VPmatrixID || -1 == _ambientColorID
+ || -1 == _diffuseColorID || -1 == _vtxPosID || -1 == _vtxNrmID) ? myErrShader : myErrNone;
+ BAIL_IF_ERR(err);
+
+bail:
+ if (myErrNone != err) shutdown();
+ if (fragmentShader) glDeleteShader(fragmentShader);
+ if (vertexShader) glDeleteShader(vertexShader);
+ return err;
+}
+
+
+/********************************************************************************
+ * use
+ ********************************************************************************/
+
+int LambertianRenderer::use() {
+ if (0 == _programID)
+ return myErrProgram;
+ glUseProgram(_programID);
+ return myErrNone;
+}
+
+
+/********************************************************************************
+ * set lights
+ ********************************************************************************/
+
+void LambertianRenderer::setLights(const float locXYZW[4*LAMBERTIAN_NUM_LIGHTS], const float colorRGB[3*LAMBERTIAN_NUM_LIGHTS]) {
+ glUseProgram(_programID);
+ glUniform4fv(_lightLoc, LAMBERTIAN_NUM_LIGHTS, locXYZW);
+ glUniform3fv(_lightColor, LAMBERTIAN_NUM_LIGHTS, colorRGB);
+}
+
+
+
+/********************************************************************************
+ * drawElements, from user memory
+ ********************************************************************************/
+
+void LambertianRenderer::drawElements(GLsizei numVertices, const GLfloat *positions, const GLfloat *normals,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, const GLvoid *indices,
+ const GLfloat M[4*4], const GLfloat VP[4*4], const float Ka[3], const float Kd[3]
+) {
+ MAYBE_UNUSED(numVertices);
+ glUseProgram(_programID);
+ if (M) glUniformMatrix4fv(_MmatrixID, 1, GL_FALSE, M);
+ if (VP) glUniformMatrix4fv(_VPmatrixID, 1, GL_FALSE, VP);
+ if (Ka) glUniform3fv(_ambientColorID, 1, Ka);
+ if (Kd) glUniform3fv(_diffuseColorID, 1, Kd);
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ if (normals) { /* Separate array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(*positions), positions);
+ glEnableVertexAttribArray(_vtxNrmID);
+ glVertexAttribPointer(_vtxNrmID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(*normals), normals);
+ }
+ else { /* One contiguous array for position and color */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(*positions), positions);
+ glEnableVertexAttribArray(_vtxNrmID);
+ glVertexAttribPointer(_vtxNrmID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(*normals), normals + 3);
+ }
+ glDrawElements(graphicsMode, indexCount, indexType, indices);
+}
+
+
+/********************************************************************************
+ * drawElements, from buffer objects
+ ********************************************************************************/
+
+void LambertianRenderer::drawElements(GLuint vtxBuf, unsigned posOff, unsigned nrmOff,
+ GLenum graphicsMode, GLsizei numIndices, unsigned indexSize, GLuint indexBuf,
+ const GLfloat M[4*4], const GLfloat VP[4*4], const float Ka[3], const float Kd[3]
+) {
+ GLenum err;
+
+ glUseProgram(_programID);
+ if (M) glUniformMatrix4fv(_MmatrixID, 1, GL_FALSE, M);
+ if (VP) glUniformMatrix4fv(_VPmatrixID, 1, GL_FALSE, VP);
+ if (Ka) glUniform3fv(_ambientColorID, 1, Ka);
+ if (Kd) glUniform3fv(_diffuseColorID, 1, Kd);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vtxBuf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuf);
+ if (!(nrmOff == 12 || nrmOff == 0)) { /* Separate array for position and normal */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(intptr_t)posOff);
+ glEnableVertexAttribArray(_vtxNrmID);
+ glVertexAttribPointer(_vtxNrmID, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(intptr_t)nrmOff);
+ }
+ else { /* One contiguous array for position and normal */
+ glEnableVertexAttribArray(_vtxPosID);
+ glVertexAttribPointer(_vtxPosID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(intptr_t)(posOff + 0 * sizeof(float)));
+ glEnableVertexAttribArray(_vtxNrmID);
+ glVertexAttribPointer(_vtxNrmID, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(intptr_t)(posOff + 3 * sizeof(float)));
+ err = glGetError(); if (err) printf("glVertexAttribPointer returns %d\n", err);
+ }
+ glDrawElements(graphicsMode, numIndices, IndexTypeFromSize(indexSize), (void*)0);
+}
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLShaders.h b/samples/ExpressionApp/BackEndOpenGL/GLShaders.h
new file mode 100644
index 0000000..8aafb8e
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLShaders.h
@@ -0,0 +1,366 @@
+/*###############################################################################
+#
+# Copyright 2016-2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __ARSHADERS_H__
+#define __ARSHADERS_H__
+
+#ifdef _MSC_VER
+ #include "glad/glad.h"
+#else
+ #include
+#endif // _MSC_VER
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** SMOOTH RENDERER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+class SmoothRenderer {
+public:
+
+ SmoothRenderer() { _programID = 0; }
+ ~SmoothRenderer() { shutdown(); }
+
+ int startup();
+ void shutdown() { if (_programID) glDeleteProgram(_programID); _programID = 0; }
+ int use();
+ int activate() { return startup(); } // DEPRECATED
+ void deactivate() { shutdown(); } // DEPRECATED
+
+ /** These take vertex and topology data in user-space buffers.
+ * @param[in] numPts The number of points in xyz or rgb.
+ * @param[in] xyz The vertex locations {x, y, z }.
+ * @param[in] rgb The vertex colors { r, g, b }, in [0, 1].
+ * @param[in] numIndices The number of indices.
+ * @param[in] indices The indices. Note that three versions are given, where indices can be 1, 2, or 4 bytes.
+ * @param[in] M the matrix.
+ */
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* rgb,
+ unsigned numIndices, const unsigned char* indices, const float* M = nullptr) {
+ drawElements(numPts, xyz, rgb, GL_TRIANGLES, numIndices, GL_UNSIGNED_BYTE, indices, M);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* rgb,
+ unsigned numIndices, const unsigned short* indices, const float* M = nullptr) {
+ drawElements(numPts, xyz, rgb, GL_TRIANGLES, numIndices, GL_UNSIGNED_SHORT, indices, M);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* rgb,
+ unsigned numIndices, const unsigned int* indices, const float* M = nullptr) {
+ drawElements(numPts, xyz, rgb, GL_TRIANGLES, numIndices, GL_UNSIGNED_INT, indices, M);
+ }
+
+ /** These take vertex and topology data in GL buffer objects
+ * @param[in] vtxBuf the vertex buffer object identifier.
+ * @param[in] xyzOff the offset, in bytes, of the xyz positions in the vertex buffer.
+ * @param[in] rgbOff the offset, in bytes, of the rgb color in the vertex buffer.
+ * @param[in] numIndices the number of indices.
+ * @param[in] indexBuf the index buffer object identifier.
+ * @param[in] indexSize the byte size of the indices: 1, 2, or 4.
+ * @param[in] M the matrix.
+ */
+ void drawTriMesh(GLuint vtxBuf, unsigned xyzOff, unsigned rgbOff,
+ unsigned numIndices, GLuint indexBuf, GLenum indexSize, const float* M) {
+ drawElements(vtxBuf, xyzOff, rgbOff, GL_TRIANGLES, numIndices, indexSize, indexBuf, M);
+ }
+
+private:
+ /** Render geometry from user buffers, pre-shaded at vertices.
+ * @param[in] numVertices The number of 3D vertices.
+ * @param[in] positions The array of 3D positions -- one for every vertex.
+ * @param[in] colors The array of RGB colors -- one for every vertex.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] indexCount The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexType The type of index { GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_UNSIGNED_INT }.
+ * @param[in] indices The array of vertices.
+ * @param[in] M The modeling-viewing-projection matrix.
+ */
+ void drawElements(GLsizei numVertices, const GLfloat* positions, const GLfloat* colors,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, const GLvoid* indices, const GLfloat* M);
+
+ /** Render geometry from GL buffer objects, pre-shaded at vertices.
+ * @param[in] vtxBuf The ID of the GL buffer used to store the vertices.
+ * @param[in] posOff The offset of the positions in the vertex buffer. This is typically 0,
+ * but is not restricted so.
+ * @param[in] colOff The offset of the colors in the vertex buffer. Both planar (homogeneous, separate)
+ * and chunky (nonhomogeneous, interleaved) representations are accommodated.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] indexCount The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexType The type of index { GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_UNSIGNED_INT }.
+ * @param[in] indexBuf The ID of the GL buffer used to store the indices.
+ * @param[in] M The modeling-viewing-projection matrix.
+ */
+ void drawElements(GLuint vtxBuf, unsigned posOff, unsigned colOff,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, GLuint indexBuf, const GLfloat* M);
+
+ GLuint _programID;
+ GLint _maxtrixID, _vtxPosID, _vtxColID;
+ static const char _vertexShader[], _fragmentShader[];
+};
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** TEXTURE RENDERER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+class TextureRenderer {
+public:
+
+ TextureRenderer() { _programID = 0; }
+ ~TextureRenderer() { shutdown(); }
+
+ int startup();
+ void shutdown() { if (_programID) glDeleteProgram(_programID); _programID = 0; }
+ int use();
+ int activate() { return startup(); } // DEPRECATED
+ void deactivate() { shutdown(); } // DEPRECATED
+
+ /** These take vertex and topology data in user-space buffers.
+ * @param[in] numPts The number of points in xyz or uv.
+ * @param[in] xyz The vertex locations {x, y, z }.
+ * @param[in] uv The vertex texture coordinates { u, v }, in [0, 1].
+ * @param[in] numIndices The number of indices.
+ * @param[in] indices The indices. Note that three versions are given, where indices can be 1, 2, or 4 bytes.
+ * @param[in] texID The texture ID.
+ * @param[in] M the matrix. NULL keeps the matrix as it was in the last invocation.
+ */
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* uv,
+ unsigned numIndices, const unsigned char* indices, GLuint texID, const float* M = nullptr) {
+ drawElements(numPts, xyz, uv, GL_TRIANGLES, numIndices, GL_UNSIGNED_BYTE, indices, texID, M);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* uv,
+ unsigned numIndices, const unsigned short* indices, GLuint texID, const float* M = nullptr) {
+ drawElements(numPts, xyz, uv, GL_TRIANGLES, numIndices, GL_UNSIGNED_SHORT, indices, texID, M);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* uv,
+ unsigned numIndices, const unsigned int* indices, GLuint texID, const float* M = nullptr) {
+ drawElements(numPts, xyz, uv, GL_TRIANGLES, numIndices, GL_UNSIGNED_INT, indices, texID, M);
+ }
+
+ /** Draw a texture-mapped quadrilateral.
+ * @param[in] xyz The vertex locations {x, y, z }.
+ * @param[in] uv The vertex texture coordinates { u, v }, in [0, 1].
+ * @param[in] texID The texture ID.
+ * @param[in] M the matrix. NULL keeps the matrix as it was in the last invocation.
+ */
+ void drawQuad(const float xyz[4 * 3], const float uv[4 * 2], GLuint texID, const float* M = nullptr);
+
+ /** These take vertex and topology data in GL buffer objects
+ * @param[in] vtxBuf the vertex buffer object identifier.
+ * @param[in] xyzOff the offset, in bytes, of the xyz positions in the vertex buffer.
+ * @param[in] uvOff the offset, in bytes, of the texture coordinates in the vertex buffer.
+ * @param[in] numIndices the number of indices.
+ * @param[in] indexBuf the index buffer object identifier.
+ * @param[in] indexSize the byte size of the indices: 1, 2, or 4.
+ * @param[in] texID The texture ID.
+ * @param[in] M the matrix. NULL keeps the matrix as it was in the last invocation.
+ */
+ void drawTriMesh(GLuint vtxBuf, unsigned xyzOff, unsigned rgbOff,
+ unsigned numIndices, GLuint indexBuf, GLenum indexSize, GLuint texID, const float* M) {
+ drawElements(vtxBuf, xyzOff, rgbOff, GL_TRIANGLES, numIndices, indexSize, indexBuf, texID, M);
+ }
+
+private:
+ /** Render geometry from user buffers.
+ * @param[in] numVertices The number of 3D vertices.
+ * @param[in] positions The array of 3D positions -- one for every vertex.
+ * @param[in] uv The array of texture coordinates -- one for every vertex.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] indexCount The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexType The type of index { GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_UNSIGNED_INT }.
+ * @param[in] indices The array of vertices.
+ * @param[in] texID The ID of the texture to be used.
+ * @param[in] M The modeling-viewing-projection matrix.
+ */
+ void drawElements(GLsizei numVertices, const GLfloat* positions, const GLfloat* uv, GLenum graphicsMode,
+ GLsizei indexCount, GLenum indexType, const GLvoid* indices, GLuint texID, const GLfloat* M);
+
+ /** Render geometry from GL buffer objects.
+ * @param[in] vtxBuf The ID of the GL buffer used to store the vertices.
+ * @param[in] posOff The offset of the positions in the vertex buffer. This is typically 0,
+ * but is not restricted so.
+ * @param[in] uvOff The offset of the texture coordinates in the vertex buffer. Both planar
+ * (homogeneous, separate) and chunky (nonhomogeneous, interleaved)
+ * representations are accommodated.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] indexCount The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexSize The size of index { 1, 2, 4 } in bytes.
+ * @param[in] indexBuf The ID of the GL buffer used to store the indices.
+ * @param[in] texID The ID of the texture to be used.
+ * @param[in] M The modeling-viewing-projection matrix.
+ */
+ void drawElements(GLuint vtxBuf, unsigned posOff, unsigned uvOff, GLenum graphicsMode,
+ GLsizei indexCount, GLenum indexSize, GLuint indexBuf, GLuint texID, const GLfloat* M);
+
+ GLuint _programID;
+ GLint _maxtrixID, _vtxPosID, _vtxTexID;
+ static const char _vertexShader[], _fragmentShader[];
+};
+
+
+/** Update the specified texture.
+ * @param[in] texID The ID of the texture to be updated.
+ * @param[in] width The width of the source image.
+ * @param[in] height The height of the source image.
+ * @param[in] rowBytes The byte stride between pixels vertically in the source image (must be positive).
+ * @param[in] glFormat The format of the source image. One of { GL_RGBA, GL_BGRA, GL_RGB, GL_BGR, GL_RG, GL_R }.
+ * @param[in] pixels A pointer to pixel(0,0) of the source image.
+ * @return GL_NO_ERROR if the update was successful.
+ */
+GLenum UpdateTexture(GLint texID, GLsizei width, GLsizei height, GLsizei rowBytes, GLenum glFormat,
+ const GLvoid* pixels);
+
+
+/********************************************************************************
+ ********************************************************************************
+ ***** LAMBERTIAN SHADER *****
+ ********************************************************************************
+ ********************************************************************************/
+
+ /** The camera is assumed to be at the origin. Lights are represented in the camera coordinate system.
+ * The model coordinates are transformed M * pt;
+ * the normal transformed by (M{3x3})^(-1)^(T), or simply M{3x3} assuming the scaling is isotropic.
+ * We further assume that M{3x3} is orthonormal.
+ */
+class LambertianRenderer {
+public:
+#define LAMBERTIAN_NUM_LIGHTS 2
+
+ LambertianRenderer() { _programID = 0; }
+ ~LambertianRenderer() { shutdown(); }
+
+ int startup();
+ void shutdown() { if (_programID) glDeleteProgram(_programID); _programID = 0; }
+ int use();
+
+ /** Set all lights. We accommodate point lights or directional lights.
+ * These are specified in camera space, which we assume is fixed while the objects move.
+ * @param[in] locXYZW The location of the lights -- in camera space.
+ * The homogeneous coordinate W is used to choose between
+ * directional lights (W=0) and point lights (W=1).
+ * The result is undefined for other values of W.
+ * @param[in] colorRGB the emissive color of the light source, RGB in [0, 1].
+ * To turn a light off, set its emissive color to (0,0,0).
+ */
+ void setLights(const float locXYZW[4 * LAMBERTIAN_NUM_LIGHTS], const float colorRGB[3 * LAMBERTIAN_NUM_LIGHTS]);
+
+ /* These take vertex and topology data in user-space buffers.
+ * @param[in] numPts The number of points in xyz or normals.
+ * @param[in] xyz The vertex locations {x, y, z}.
+ * @param[in] nrm The vertex normals {nx, ny, nz}.
+ * @param[in] numIndices The number of indices.
+ * @param[in] indices The indices. Note that three versions are given, where indices can be 1, 2, or 4 bytes.
+ * @param[in] M The modeling matrix. If NULL, the previous matrix will be used.
+ * @param[in] VP The viewing+projection matrix. If NULL, the previous matrix will be used.
+ * @param[in] Ka The ambient color {r, g, b}. If NULL, the previous ambient color will be used.
+ * @param[in] Kd The diffuse color {r, g, b}. If NULL, the previous diffuse color will be used.
+ */
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* nrm, unsigned numIndices, const unsigned char* indices,
+ const float* M = nullptr, const float* VP = nullptr, const float* Ka = nullptr, const float* Kd = nullptr) {
+ drawElements(numPts, xyz, nrm, GL_TRIANGLES, numIndices, GL_UNSIGNED_BYTE, indices, M, VP, Ka, Kd);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* nrm, unsigned numIndices, const unsigned short* indices,
+ const float* M = nullptr, const float* VP = nullptr, const float* Ka = nullptr, const float* Kd = nullptr) {
+ drawElements(numPts, xyz, nrm, GL_TRIANGLES, numIndices, GL_UNSIGNED_SHORT, indices, M, VP, Ka, Kd);
+ }
+ void drawTriMesh(unsigned numPts, const float* xyz, const float* nrm, unsigned numIndices, const unsigned int* indices,
+ const float* M = nullptr, const float* VP = nullptr, const float* Ka = nullptr, const float* Kd = nullptr) {
+ drawElements(numPts, xyz, nrm, GL_TRIANGLES, numIndices, GL_UNSIGNED_INT, indices, M, VP, Ka, Kd);
+ }
+
+ /* These take vertex and topology data in GL buffer objects
+ * @param[in] vtxBuf the vertex buffer object identifier.
+ * @param[in] xyzOff the offset, in bytes, of the xyz positions in the vertex buffer.
+ * @param[in] nrmOff the offset, in bytes, of the normals in the vertex buffer.
+ * @param[in] numIndices the number of indices.
+ * @param[in] indexBuf the index buffer object identifier.
+ * @param[in] indexSize the byte size of the indices: 1, 2, or 4.
+ * @param[in] M The modeling matrix. If NULL, the previous matrix will be used.
+ * @param[in] VP The viewing+projection matrix. If NULL, the previous matrix will be used.
+ * @param[in] Ka The ambient color {r, g, b}. If NULL, the previous ambient color will be used.
+ * @param[in] Kd The diffuse color {r, g, b}. If NULL, the previous diffuse color will be used.
+ */
+ void drawTriMesh(GLuint vtxBuf, unsigned xyzOff, unsigned rgbOff, unsigned numIndices, GLuint indexBuf, GLenum indexSize,
+ const float* M = nullptr, const float* VP = nullptr, const float* Ka = nullptr, const float* Kd = nullptr) {
+ drawElements(vtxBuf, xyzOff, rgbOff, GL_TRIANGLES, numIndices, indexSize, indexBuf, M, VP, Ka, Kd);
+ }
+
+private:
+ /** Render geometry from user buffers.
+ * @param[in] numVertices The number of 3D vertices.
+ * @param[in] positions The array of 3D positions -- one for every vertex.
+ * @param[in] normals The array of normals -- one for every vertex.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] indexCount The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexType The type of index { GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_UNSIGNED_INT }.
+ * @param[in] indices The array of vertices.
+ * @param[in] MV The modeling-viewing matrix.
+ * @param[in] P The projection matrix.
+ * @param[in] Ka The ambient color.
+ * @param[in] Kd The diffuse color.
+ */
+ void drawElements(GLsizei numVertices, const GLfloat* positions, const GLfloat* normals,
+ GLenum graphicsMode, GLsizei indexCount, GLenum indexType, const GLvoid* indices,
+ const GLfloat M[4 * 4], const GLfloat VP[4 * 4], const float Ka[3], const float Kd[3]);
+
+ /** Render geometry from GL buffer objects.
+ * @param[in] vtxBuf The ID of the GL buffer used to store the vertices.
+ * @param[in] posOff The offset of the positions in the vertex buffer. This is typically 0,
+ * but is not restricted so.
+ * @param[in] nrmOff The offset of the normals in the vertex buffer. Both planar (homogeneous, separate)
+ * and chunky (nonhomogeneous, interleaved) representations are accommodated.
+ * @param[in] graphicsMode One of { GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN }.
+ * GL_QUADS is not supported.
+ * @param[in] numIndices The number of 0-based vertex indices that define the geometry
+ * from the vertices and graphics mode.
+ * @param[in] indexSize The size of index { 1, 2, 4 } in bytes.
+ * @param[in] indexBuf The ID of the GL buffer used to store the indices.
+ * @param[in] M The modeling matrix.
+ * @param[in] VP The viewing+projection matrix.
+ * @param[in] Ka The ambient color.
+ * @param[in] Kd The diffuse color.
+ */
+ void drawElements(GLuint vtxBuf, unsigned posOff, unsigned nrmOff,
+ GLenum graphicsMode, GLsizei numIndices, unsigned indexSize, GLuint indexBuf,
+ const GLfloat M[4 * 4], const GLfloat VP[4 * 4], const float Ka[3], const float Kd[3]);
+
+ GLuint _programID;
+ GLint _MmatrixID, _VPmatrixID, _lightLoc, _lightColor, _ambientColorID, _diffuseColorID;
+ GLint _vtxPosID, _vtxNrmID;
+ static const char _vertexShader[], _fragmentShader[];
+};
+
+
+#endif /* __ARSHADERS_H__ */
diff --git a/samples/ExpressionApp/BackEndOpenGL/GLSpectrum.h b/samples/ExpressionApp/BackEndOpenGL/GLSpectrum.h
new file mode 100644
index 0000000..d44d2c1
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/GLSpectrum.h
@@ -0,0 +1,106 @@
+ /*###############################################################################
+ #
+ # Copyright 2016-2021 NVIDIA Corporation
+ #
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
+ # this software and associated documentation files (the "Software"), to deal in
+ # the Software without restriction, including without limitation the rights to
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ # the Software, and to permit persons to whom the Software is furnished to do so,
+ # subject to the following conditions:
+ #
+ # The above copyright notice and this permission notice shall be included in all
+ # copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ #
+ ###############################################################################*/
+
+#ifndef __GLSPECTRUM_H
+#define __GLSPECTRUM_H
+
+
+////////////////////////////////////////////////////////////////////////////////
+/// The representation used for spectral parameters in surface illumination transport.
+////////////////////////////////////////////////////////////////////////////////
+
+struct GLSpectrum3f {
+ float r, g, b; ///< Red, green and blue components of the color spectrum.
+
+ /// Default constructor.
+ GLSpectrum3f() {}
+
+ /// Initialization constructor.
+ /// @param[in] R the red component.
+ /// @param[in] G the green component.
+ /// @param[in] B the blue component.
+ GLSpectrum3f(float R, float G, float B) { set(R, G, B); }
+
+ /// Access to the array of spectral components.
+ /// @return a pointer to the array of spectral components.
+ const float* data() const { return &r; }
+
+ /// Access to the array of spectral components.
+ /// @return a pointer to the array of spectral components.
+ float* data() { return &r; }
+
+ /// Set the spectral components.
+ /// @param[in] R the red component.
+ /// @param[in] G the green component.
+ /// @param[in] B the blue component.
+ void set(float R, float G, float B) { r = R; g = G; b = B; };
+
+ /// Componentwise scaling of the spectrum.
+ /// @param[in] the scaling spectrum (RHS).
+ /// @return The LHS, scaled by the RHS.
+ GLSpectrum3f& operator*=(const GLSpectrum3f& k) { r *= k.r; g *= k.g; b *= k.b; return *this; }
+
+ /// Componentwise augmentation of the spectrum.
+ /// @param[in] the delta spectrum (RHS).
+ /// @return The LHS, augmented by the RHS.
+ GLSpectrum3f& operator+=(const GLSpectrum3f& k) { r += k.r; g += k.g; b += k.b; return *this; }
+
+ /// Scalar scaling of the spectrum.
+ /// @param[in] the scalar (RHS).
+ /// @return The LHS, scaled by the RHS.
+ GLSpectrum3f& operator*=(float s) { r *= s; g *= s; b *= s; return *this; }
+
+ /// Scalar scaling of the spectrum.
+ /// @param[in] the scalar (RHS).
+ /// @return The LHS, scaled by the RHS.
+ GLSpectrum3f& operator/=(float s) { r /= s; g /= s; b /= s; return *this; }
+
+ /// Componentwise scaling of the spectrum.
+ /// @param[in] k the scaling spectrum (RHS).
+ /// @return The componentwise product of the LHS and RHS.
+ GLSpectrum3f operator*(const GLSpectrum3f& k) const { return GLSpectrum3f(r * k.r, g * k.g, b * k.b); }
+
+ /// Componentwise augmentation of the spectrum.
+ /// @param[in] k the scale vector (RHS).
+ /// @return The componentwise sum of the LHS and RHS.
+ GLSpectrum3f operator+(const GLSpectrum3f& k) const { return GLSpectrum3f(r + k.r, g + k.g, b + k.b); }
+
+ /// Scalar scaling of the spectrum.
+ /// @param[in] s the scalar (RHS).
+ /// @return The product of the LHS and the RHS scalar.
+ GLSpectrum3f operator*(float s) const { return GLSpectrum3f(r * s, g * s, b * s); }
+
+ /// Scalar scaling of the spectrum.
+ /// @param[in] s the scalar (RHS).
+ /// @return The product of the LHS and the RHS scalar.
+ GLSpectrum3f operator/(float s) const { return GLSpectrum3f(r / s, g / s, b / s); }
+};
+
+/// Scalar scaling of the spectrum.
+/// @param[in] s the scalar (LHS).
+/// @param[in] k the spectrum to be scaled (RHS).
+/// @return The product of the RHS and the scalar LHS.
+inline GLSpectrum3f operator*(float s, const GLSpectrum3f& k) { return GLSpectrum3f(s * k.r, s * k.g, s * k.b); }
+
+
+#endif // __GLSPECTRUM_H
diff --git a/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.cpp b/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.cpp
new file mode 100644
index 0000000..eae139e
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.cpp
@@ -0,0 +1,645 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include "OpenGLMeshRenderer.h"
+
+#include
+#include
+#include
+#include
+
+#ifdef _MSC_VER
+#include "glad/glad.h"
+#define strcasecmp _stricmp
+#else
+#include
+#endif // _MSC_VER
+
+#include "FaceIO.h"
+#include "GLFW/glfw3.h"
+#include "glm/glm.hpp"
+#include "glm/gtc/matrix_transform.hpp"
+#include "glm/gtc/quaternion.hpp"
+#include "GLMaterial.h"
+#include "GLMesh.h"
+#include "GLShaders.h"
+#include "GLSpectrum.h"
+#include "nvAR_defs.h"
+#include "nvCVOpenCV.h"
+#include "opencv2/highgui/highgui.hpp"
+#include "SimpleFaceModel.h"
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+///// SUPPORT MACROS AND FUNCTIONS /////
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+#define BAIL_IF_ERR(err) do { if ((err) != 0) { goto bail; } } while(0)
+#define BAIL(err, code) do { err = code; goto bail; } while(0)
+
+#ifndef __BYTE_ORDER__ /* How bytes are packed into a 32 bit word */
+ #define __ORDER_LITTLE_ENDIAN__ 3210 /* First byte in the least significant position */
+ #define __ORDER_BIG_ENDIAN__ 0123 /* First byte in the most significant position */
+#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64) || _MSC_VER
+ #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+ #endif /* _MSC_VER */
+#endif /* __BYTE_ORDER__ */
+
+
+/********************************************************************************
+ * glfwErrorCallback
+ ********************************************************************************/
+
+static void glfwErrorCallback(int error, const char *description) {
+ fprintf(stderr, "Error %d: %s\n", error, description);
+}
+
+
+/********************************************************************************
+ * MakeGLContext
+ ********************************************************************************/
+
+static NvCV_Status MakeGLContext(int width, int height, const char *title, GLFWwindow **pWindow) {
+ NvCV_Status nvErr = NVCV_SUCCESS;
+ GLFWwindow *window;
+
+ /* Get a context */
+ glfwSetErrorCallback(glfwErrorCallback);
+ if (!glfwInit()) {
+ // Initialization failed
+ fprintf(stderr, "Unable to initialize glfw\n");
+ return NVCV_ERR_INITIALIZATION;
+ }
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
+ window = glfwCreateWindow(width, height, title, /*GLFWmonitor */NULL, /*GLFWwindow*/NULL);
+ if (!window) {
+ // Window or OpenGL context creation failed
+ fprintf(stderr, "Unable to create glfw window\n");
+ BAIL(nvErr, NVCV_ERR_INITIALIZATION);
+ }
+ int winWidth, winHeight;
+ glfwGetWindowSize(window, &winWidth, &winHeight);
+ if (winWidth != width || winHeight != height) {
+ fprintf(stderr, "getWindowSize(%u x %u) != (%u x %u)\n", winWidth, winHeight, width, height);
+ }
+ glfwMakeContextCurrent(window);
+ #ifdef _MSC_VER
+ if (!gladLoadGL()) {
+ fprintf(stderr, "Unable to load GL\n");
+ BAIL(nvErr, NVCV_ERR_INITIALIZATION);
+ }
+ fprintf(stderr, "OpenGL Version %d.%d loaded\n", GLVersion.major, GLVersion.minor);
+ #endif // _MSC_VER
+ *pWindow = window;
+
+bail:
+ return nvErr;
+}
+
+
+/********************************************************************************
+ * CloseGLContext
+ ********************************************************************************/
+
+static void CloseGLContext(GLFWwindow *window) {
+ if (window)
+ glfwDestroyWindow(window);
+ glfwTerminate();
+}
+
+
+/********************************************************************************
+ * ComputeDualTopologyFromAdjacencies
+ ********************************************************************************/
+
+static NvCV_Status ComputeDualTopologyFromAdjacencies(const SimpleFaceModelAdapter *fma, GLMesh *mesh) {
+ union IVF {
+ unsigned i;
+ struct VF {
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned short face, vertex; // Vertex in most significant position
+ #else // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ unsigned short vertex, face; // Vertex in most significant position
+ #endif // __BYTE_ORDER__
+ } vf;
+ bool operator<( const IVF& other) { return i < other.i; }
+ bool operator==(const IVF& other) { return i == other.i; }
+ };
+ std::vector topo;
+
+ topo.reserve(mesh->numVertices() * 6 * 2); // Assume valence-6, duplicated
+ const unsigned short *adjVertices = const_cast(fma)->getAdjacentVertices(0),
+ *adjFaces = const_cast(fma)->getAdjacentFaces(0);
+ unsigned n = fma->getAdjacentVerticesSize();
+ if (n != fma->getAdjacentFacesSize()) return NVCV_ERR_MISMATCH;
+ for (unsigned ej = 0; ej < n; ej += 2) { // 2 adjacencies per edge
+ for (unsigned vx = 0; vx < 2; ++vx) { // for every vertex on the edge
+ for (unsigned fc = 0; fc < 2; ++fc) { // and every face on the edge
+ IVF vf;
+ vf.vf.vertex = adjVertices[ej + vx];
+ vf.vf.face = adjFaces[ej + fc];
+ if (vf.vf.vertex && vf.vf.face) { // if a real vertex and a real face
+ --vf.vf.vertex; // convert from 1-based index ...
+ --vf.vf.face; // ... to 0-based index
+ topo.push_back(vf);
+ }
+ }
+ }
+ }
+ std::sort(topo.begin(), topo.end());
+ topo.erase(std::unique(topo.begin(), topo.end()), topo.end());
+ mesh->resizeDualIndices(unsigned(topo.size()));
+ unsigned short *dual = mesh->getDualIndices(),
+ *numFaces = mesh->getVertexFaceCounts();
+ memset(numFaces, 0, mesh->numVertices() * sizeof(*numFaces));
+ for (unsigned i = 0; i < topo.size(); ++i) {
+ numFaces[topo[i].vf.vertex]++;
+ dual[i] = topo[i].vf.face;
+ }
+ return NVCV_SUCCESS;
+}
+
+
+/********************************************************************************
+ * MakeMesh
+ ********************************************************************************/
+
+NvCV_Status MakeMesh(const SimpleFaceModelAdapter *fma, GLMesh *mesh) {
+ mesh->clear();
+ mesh->addVertices(fma->getShapeMeanSize() / 3, const_cast(fma)->getShapeMean(0));
+ mesh->addFaces(fma->getTriangleListSize() / 3, 3, const_cast(fma)->getTriangleList(0), 0, 0);
+ NvCV_Status err = ComputeDualTopologyFromAdjacencies(fma, mesh); // This make vertex normal computation lightning fast
+ if (NVCV_SUCCESS != err) return err;
+ mesh->computeVertexNormals();
+ if (fma->fm.partitions.size()) {
+ std::vector parts(fma->fm.partitions.size());
+ for (unsigned i = unsigned(parts.size()); i--;) {
+ const SimpleFaceModel::Partition& fr = fma->fm.partitions[i];
+ GLMesh::Partition& to = parts[fr.partitionIndex];
+ //to.partitionIndex = fr.partitionIndex; // to doesn't have a partitionIndex
+ to.faceIndex = fr.faceIndex;
+ to.numFaces = fr.numFaces;
+ to.vertexIndex = fr.vertexIndex;
+ to.numVertexIndices = fr.numVertexIndices;
+ to.name = fr.name;
+ to.materialName = fr.materialName;
+ to.smooth = fr.smoothingGroup;
+ }
+ mesh->partitionMesh(unsigned(parts.size()), parts.data());
+ }
+ return NVCV_SUCCESS;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+///// RENDER CONTEXT /////
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+#define LAMBERTIAN_NUM_LIGHTS 2
+
+class RenderContext {
+public:
+
+ RenderContext() {
+ m_win = nullptr;
+ }
+
+ ~RenderContext() {
+ if (m_win) CloseGLContext(m_win);
+ m_lam.shutdown();
+ m_txr.shutdown();
+ }
+
+ NvCV_Status init() {
+ if (0 != m_lam.startup())
+ return NVCV_ERR_OPENGL;
+ if (0 != m_txr.startup())
+ return NVCV_ERR_OPENGL;
+ return NVCV_SUCCESS;
+ }
+
+ void setClearColor(float r, float g, float b, float a = 1.f) { glClearColor(r, g, b, a); }
+
+ void setClearColor(unsigned char r, unsigned char g, unsigned char b) {
+ glClearColor(r * (1.f / 255.f), g * (1.f / 255.f), b * (1.f / 255.f), 1.f);
+ }
+
+ NvCV_Status makeWindowContext(int wd, int ht, const char *title) {
+ NvCV_Status err = MakeGLContext(wd, ht, title, &m_win);
+ if (NVCV_SUCCESS == err) {
+ m_width = wd;
+ m_height = ht;
+ glfwMakeContextCurrent(m_win);
+ glViewport(0, 0, wd, ht);
+ glEnable(GL_DEPTH_TEST);
+ glClearColor(0.f, 0.f, 0.f, 1.f);
+ if (/*FLAG_orientation*/0) {
+ glEnable(GL_CULL_FACE);
+ glCullFace((/*FLAG_orientation*/0 > 0) ? GL_BACK : GL_FRONT);
+ }
+ else {
+ glDisable(GL_CULL_FACE);
+ }
+ }
+ return err;
+ }
+
+ void computeInverseViewMatrix() {
+ #if 0
+ Vinv = glm::inverse(V);
+ #else // Inversion is simple because we know that it is a rigid transform
+ m_Vinv = glm::transpose(m_V);
+ m_Vinv[3][0] = -(m_Vinv[0][0] * m_V[3][0] + m_Vinv[1][0] * m_V[3][1] + m_Vinv[2][0] * m_V[3][2]);
+ m_Vinv[3][1] = -(m_Vinv[0][1] * m_V[3][0] + m_Vinv[1][1] * m_V[3][1] + m_Vinv[2][1] * m_V[3][2]);
+ m_Vinv[3][2] = -(m_Vinv[0][2] * m_V[3][0] + m_Vinv[1][2] * m_V[3][1] + m_Vinv[2][2] * m_V[3][2]);
+ m_Vinv[0][3] = 0.f; m_Vinv[1][3] = 0.f; m_Vinv[2][3] = 0.f; m_Vinv[3][3] = 1.f;
+ #endif
+ }
+
+ void setViewMatrix(const glm::mat4x4& viewMatrix) {
+ m_V = viewMatrix;
+ computeInverseViewMatrix();
+ }
+
+ void setViewMatrix(const glm::vec3& fromPoint, const glm::vec3& toPoint, const glm::vec3& upVector) {
+ m_V = glm::lookAt(fromPoint, toPoint, upVector);
+ computeInverseViewMatrix();
+ }
+
+ void setOrthoCamera(float hither, float yon) {
+ m_P = glm::orthoLH_NO(m_width * -.5f, m_width * +.5f, m_height * -.5f, m_height * +.5f, hither, yon);
+ }
+
+ void setOrthoCamera(float wd, float ht, float hither, float yon) {
+ m_P = glm::orthoLH_NO(wd * -.5f, wd * +.5f, ht * -.5f, ht * +.5f, hither, yon);
+ }
+
+ void setLights(const glm::vec4 locs[LAMBERTIAN_NUM_LIGHTS], const GLSpectrum3f colors[LAMBERTIAN_NUM_LIGHTS]) {
+ memcpy(m_lightLoc, locs, sizeof(m_lightLoc));
+ memcpy(m_lightColor, colors, sizeof(m_lightColor));
+ }
+
+ void setViewOfBound(const GLMesh::BoundingSphere& bsph, const glm::vec3& lookAt, const glm::vec3& up, float vfov,
+ float fracFill, float yDir = 1.f) {
+ float r = bsph.radius() / fracFill,
+ aspect = (float)m_width / (float)m_height,
+ signZ = -yDir,
+ dist;
+
+ if (vfov > 0) { // Perspective
+ dist = r * .5f / tanf(vfov * .5f);
+ m_P = glm::perspective(vfov, aspect, (dist - r) * 0.2f, (dist + r) * 2.0f);
+ }
+ else { // Orthographic
+ float w = r,
+ h = r;
+ if (aspect < 1.f) h /= aspect; // Wide
+ else w *= aspect; // Tall
+ dist = r * 2.f;
+
+ m_P = glm::orthoLH_NO(-w, +w, -h, +h, (dist - r) * signZ, (dist + r) * signZ);
+ }
+ m_V = glm::lookAt(bsph.center() - glm::normalize(lookAt) * dist, bsph.center(), up);
+ computeInverseViewMatrix();
+ }
+
+ void setViewOfBound(const GLMesh::BoundingBox& bbox, const glm::vec3& lookAt, const glm::vec3& up, float vfov,
+ float fracFill, float yDir = 1.f, float yOff = 0.f) {
+ glm::vec3 boxSize = bbox.max() - bbox.min();
+ glm::vec3 boxCenter = bbox.center();
+ float borderFrac = ((1.f - fracFill) / fracFill),
+ dx = boxSize.x * (1.f + borderFrac), // border on left and right
+ dy = boxSize.y * (1.f + borderFrac) * (1.f - fabsf(yOff)),
+ r = ((dx > dy) ? dx : dy), // radius of bounding sphere
+ aspectGeom = dx / dy,
+ aspectWind = (float)m_width / (float)m_height,
+ signZ = -yDir,
+ dist;
+
+ if (vfov > 0) { // Perspective
+ dist = r * .5f / tanf(vfov * .5f);
+ m_P = glm::perspective(vfov, aspectWind, dist - r, dist + r);
+ }
+ else { // Orthographic
+ if (aspectGeom > aspectWind) dy *= aspectGeom / aspectWind;
+ else dx *= aspectWind / aspectGeom;
+ dist = r * 2.f;
+ dx *= .5f;
+ dy *= .5f;
+ m_P = glm::orthoLH_NO(-dx, +dx, -dy, +dy, (dist - r) * signZ, (dist + r) * signZ);
+ }
+ boxCenter.y += boxSize.y * yOff;
+ m_V = glm::lookAt(boxCenter - glm::normalize(lookAt) * dist, boxCenter, up);
+ computeInverseViewMatrix();
+ }
+
+ NvCV_Status renderPolyMesh(const GLMesh& mesh, const glm::mat4x4& M, const char *materialOverride = nullptr) {
+ NvCV_Status nvErr = NVCV_SUCCESS;
+ glm::mat4x4 VP = m_P * m_V;
+ const GLSpectrum3f defaultDiffuse = { 0.77f, 0.63f, 0.55f },
+ defaultAmbient = defaultDiffuse * 0.3f;
+
+ #ifdef DEBUG_RENDERING
+ unsigned why = mesh.notRenderable(0);
+ if (why) {
+ if (FLAG_debug)
+ printf("Mesh %p is not renderable: %s: %s\n", &mesh,
+ ((why & GLMesh::NOT_TRIMESH) ? "not a TriMesh" : ""),
+ ((why & GLMesh::COMPLEX_TOPOLOGY) ? "complex topology" : "")
+ );
+ return keErrGeometry;
+ }
+ #endif // DEBUG_RENDERING
+
+ // Set lights for all shaders
+ m_lam.setLights(&m_lightLoc[0].x, m_lightColor[0].data()); // TODO: set this elsewhere
+
+ for (unsigned ix = 0, numPartitions = mesh.numPartitions(); ix < numPartitions; ++ix) {
+ GLMesh::Partition pt;
+ nvErr = mesh.getPartition(ix, pt);
+ BAIL_IF_ERR(nvErr);
+ const GLMaterial *mtl = m_mtlLib.getMaterial(materialOverride ? materialOverride : pt.materialName.c_str());
+
+ if (mesh.numNormals()) { // We should check for textures, too
+ const GLSpectrum3f *difColor, *ambColor;
+ if (mtl) {
+ difColor = &mtl->diffuseColor;
+ ambColor = &mtl->ambientColor;
+ }
+ else {
+ difColor = &defaultDiffuse;
+ ambColor = &defaultAmbient;
+ }
+ m_lam.drawTriMesh(mesh.numVertices(), &mesh.getVertices()->x, &mesh.getNormals()->x,
+ pt.numVertexIndices, mesh.getVertexIndices() + pt.vertexIndex, &M[0][0], &VP[0][0],
+ ambColor->data(), difColor->data());
+ }
+ }
+
+ bail:
+ return nvErr;
+ }
+
+ unsigned m_width, m_height; ///< The dimensions of the viewport.
+ GLFWwindow *m_win; ///< The window context.
+ GLMaterialLibrary m_mtlLib; ///< The material library.
+ glm::mat4x4 m_V, m_Vinv; ///< The viewing matrix and its inverse.
+ glm::mat4x4 m_P; ///< The projection matrix.
+ glm::vec4 m_lightLoc[LAMBERTIAN_NUM_LIGHTS]; ///< The light locations.
+ GLSpectrum3f m_lightColor[LAMBERTIAN_NUM_LIGHTS]; ///< The light colors.
+ LambertianRenderer m_lam; ///< The Lambertian renderer.
+ TextureRenderer m_txr; ///< The texture renderer.
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+///// OPENGL MESH RENDERER /////
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+
+class OpenGLMeshRenderer : public MeshRenderer {
+public:
+ ~OpenGLMeshRenderer();
+
+ static NvCV_Status initDispatch(MeshRenderer::Dispatch *dispatch);
+ static NvCV_Status unload();
+
+private:
+ OpenGLMeshRenderer();
+ SimpleFaceModelAdapter _sfma;
+ RenderContext _ctx;
+ GLMesh _mesh;
+ glm::vec3 _ctrRot;
+
+ // C-style object-oriented member functions that are usually loaded from DLL, although in this implementation
+ // the OpenGLMeshRenderer is compiled directly into the ExpressionApp, and the MeshRendererBroker is
+ // automatically adding it to its portfolio of renderers without creating a separate DLL.
+ static NvCV_Status create(MeshRenderer **han);
+ static void destroy(MeshRenderer *han);
+ static NvCV_Status name(const char **str);
+ static NvCV_Status info(const char **str);
+ static NvCV_Status read(MeshRenderer *han, const char *modelFile);
+ static NvCV_Status init(MeshRenderer *han, unsigned width, unsigned height, const char *windowName);
+ static NvCV_Status setCamera(MeshRenderer *han,
+ const float locPt[3], const float lookVec[3], const float upVec[3], float vfov);
+ static NvCV_Status render(MeshRenderer *han,
+ const float exprs[53], const float qrot[4], const float tran[3], NvCVImage *result);
+ NvCV_Status setFOV(float radians);
+};
+
+NvCV_Status OpenGLMeshRenderer_InitDispatch(MeshRenderer::Dispatch *dispatch) {
+ return OpenGLMeshRenderer::initDispatch(dispatch);
+}
+
+NvCV_Status OpenGLMeshRenderer_Unload() {
+ return OpenGLMeshRenderer::unload();
+}
+
+
+/********************************************************************************
+ * DeformModel
+ ********************************************************************************/
+
+static NvCV_Status DeformModel(const SimpleFaceModel& model, const float *identCoeffs, const float *exprCoeffs, GLMesh *mesh) {
+ unsigned size = unsigned(model.shapeMean.size()) * 3, // the number of floats in the mesh vector
+ numCoeffs, i;
+ float *const dst0 = &mesh->getVertices()->x, // begin
+ *const dst1 = dst0 + size; // end
+ float const *src;
+ float *dst, c;
+
+ memcpy(dst0, model.shapeMean.data(), size * sizeof(*dst0)); // Initialize
+ if (identCoeffs) {
+ for (i = 0, numCoeffs = unsigned(model.shapeEigenValues.size()), src = model.shapeModes.data()->vec; i < numCoeffs; ++i, ++identCoeffs) {
+ if ((c = *identCoeffs) != 0.f) {
+ for (dst = dst0; dst != dst1;)
+ *dst++ += *src++ * c;
+ }
+ else {
+ src += size;
+ }
+ }
+ }
+ for (i = 0, numCoeffs = unsigned(model.blendShapes.size()); i < numCoeffs; ++i, ++exprCoeffs) {
+ if ((c = *exprCoeffs) != 0.f) {
+ for (dst = dst0, src = model.blendShapes[i].shape.data()->vec; dst != dst1;)
+ *dst++ += *src++ * c;
+ }
+ }
+ mesh->computeVertexNormals();
+ return NVCV_SUCCESS;
+}
+
+
+OpenGLMeshRenderer::OpenGLMeshRenderer() {
+ /*NvCV_Status err =*/ (void)initDispatch(&this->m_dispatch);
+}
+
+OpenGLMeshRenderer::~OpenGLMeshRenderer() {
+}
+
+NvCV_Status OpenGLMeshRenderer::name(const char **str) {
+ static const char name[] = "OpenGL";
+ *str = name;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status OpenGLMeshRenderer::info(const char **str) {
+ static const char info[] = "OpenGL renderer using local illumination";
+ *str = info;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status OpenGLMeshRenderer::create(MeshRenderer **han) {
+ *han = new OpenGLMeshRenderer();
+ return NVCV_SUCCESS;
+}
+
+void OpenGLMeshRenderer::destroy(MeshRenderer* /*han*/) {
+}
+
+NvCV_Status OpenGLMeshRenderer::read(MeshRenderer *han, const char *modelFile) {
+ OpenGLMeshRenderer *ren = static_cast(han);
+ size_t z = strlen(modelFile);
+ if (z < 5) return NVCV_ERR_FILE;
+ if (!strcasecmp(".nvf", modelFile + z - 4)) {
+ FaceIOErr ioErr = ReadNVFFaceModel(modelFile, &ren->_sfma); // TODO clear _sfma first
+ if (kIOErrNone != ioErr) {
+ printf("Error: \"%s\": %s\n", modelFile, FaceIOErrorStringFromCode(ioErr));
+ return NVCV_ERR_READ;
+ }
+ NvCV_Status nvErr;
+ nvErr = MakeMesh(&ren->_sfma, &ren->_mesh);
+ if (NVCV_SUCCESS != nvErr) return nvErr;
+
+ std::string mtlFile;
+ mtlFile.assign(modelFile, 0, strlen(modelFile) - 3);
+ mtlFile += "mtl";
+ nvErr = ren->_ctx.m_mtlLib.read(mtlFile.c_str());
+ unsigned why = ren->_mesh.notRenderable(0);
+ if (why) {
+ printf("Mesh \"%s\" is not renderable: %s: %s\n", modelFile,
+ ((why & GLMesh::NOT_TRIMESH) ? "not a TriMesh" : ""),
+ ((why & GLMesh::COMPLEX_TOPOLOGY) ? "complex topology" : "")
+ );
+ return NVCV_ERR_MISMATCH;
+ }
+ return NVCV_SUCCESS;
+ }
+ // else if (!strcasecmp(".obj", file + z - 4)) { read obj files }
+ else {
+ return NVCV_ERR_FILE;
+ }
+
+}
+
+NvCV_Status OpenGLMeshRenderer::init(MeshRenderer *han,
+ unsigned width, unsigned height, const char *windowName) {
+ OpenGLMeshRenderer *ren = static_cast(han);
+ static const GLSpectrum3f lightColor[LAMBERTIAN_NUM_LIGHTS] = { { 1.f, 1.f, 1.f }, { .8f, .1f, .1f } };
+ static const glm::vec4 lightLoc[LAMBERTIAN_NUM_LIGHTS] = { { 0, 0, +1000, 0}, { 100, -200, -500, 0 } };
+ NvCV_Status nvErr;
+
+ nvErr = ren->_ctx.makeWindowContext(width, height, windowName);
+ nvErr = ren->_ctx.init();
+ ren->_ctx.setClearColor(0.2f, 0.2f, 0.2f, 1.f);
+ ren->_ctx.setLights(lightLoc, lightColor);
+ ren->setFOV(0.f); // Default orthographic
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status OpenGLMeshRenderer::setFOV(float fov) {
+ if (0 == _mesh.numVertices())
+ return NVCV_ERR_MODEL;
+ GLMesh::BoundingBox bbox;
+ _mesh.getBoundingBox(&bbox);
+ _ctrRot = bbox.center();
+ _ctrRot.y = bbox.min().y; // Assume that assets are designed with Y-up.
+ float vShift = (_mesh.numVertices() > 10000) ? 0.15f : 0.0f; // Heuristic to determine whether there is a neck
+ _ctx.setViewOfBound(bbox, glm::vec3(0.f, 0.f, -1.f), glm::vec3(0.f, +1.f, 0.f), fov, .9f, +1, vShift);
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status OpenGLMeshRenderer::setCamera(MeshRenderer *han,
+ const float locPt[3], const float lookVec[3], const float upVec[3], float vfov) {
+ if (locPt || lookVec || upVec) {} // We don't accommodate these yet
+ return static_cast(han)->setFOV(vfov);
+}
+
+NvCV_Status OpenGLMeshRenderer::render(MeshRenderer *han,
+ const float exprs[53], const float qrot[4], const float* /*tran*/, NvCVImage *result) {
+ OpenGLMeshRenderer *ren = static_cast(han);
+ NvCV_Status nvErr;
+ glm::mat4x4 M;
+ glm::quat q; // Convert quaternion from {x,y,z,w} --> GLM's {w,x,y,z}
+
+ if (NVCV_RGBA != result->pixelFormat)
+ return NVCV_ERR_PIXELFORMAT;
+
+ if (qrot) { q.x = qrot[0]; q.y = qrot[1]; q.z = qrot[2]; q.w = qrot[3]; }
+ else { q.x = 0.0f; q.y = 0.0f; q.z = 0.0f; q.w = 1.0f; }
+
+ #ifndef TRANSLATE_POSE
+ M = glm::translate(glm::mat4x4(1.f), -ren->_ctrRot);
+ M = glm::mat4_cast(q) * M;
+ M = glm::translate(M, ren->_ctrRot);
+ #else // TRANSLATE_POSE
+ M = glm::mat4_cast(q);
+ if (tran)
+ M = glm::translate(M, *((const glm::vec3*)(trans)));
+ #endif // TRANSLATE_POSE
+
+ nvErr = DeformModel(ren->_sfma.fm, nullptr, exprs, &ren->_mesh);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ nvErr = ren->_ctx.renderPolyMesh(ren->_mesh, M, NULL);
+ glReadPixels(0, 0, result->width, result->height, GL_RGBA, GL_UNSIGNED_BYTE, result->pixels);
+ GLenum glErr = glGetError();
+ if (glErr)
+ return NVCV_ERR_OPENGL;
+ // GL returns an image upside-down, but we can use the NvCVImage_FlipY in the caller to flip it with no overhead
+ return NVCV_SUCCESS;
+
+}
+
+NvCV_Status OpenGLMeshRenderer::initDispatch(MeshRenderer::Dispatch *dispatch) {
+ dispatch->name = &OpenGLMeshRenderer::name;
+ dispatch->info = &OpenGLMeshRenderer::info;
+ dispatch->create = &OpenGLMeshRenderer::create;
+ dispatch->destroy = &OpenGLMeshRenderer::destroy;
+ dispatch->read = &OpenGLMeshRenderer::read;
+ dispatch->init = &OpenGLMeshRenderer::init;
+ dispatch->setCamera = &OpenGLMeshRenderer::setCamera;
+ dispatch->render = &OpenGLMeshRenderer::render;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status OpenGLMeshRenderer::unload() {
+ return NVCV_SUCCESS;
+}
diff --git a/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.h b/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.h
new file mode 100644
index 0000000..58de854
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/OpenGLMeshRenderer.h
@@ -0,0 +1,41 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __OPENGL_MESH_RENDERER__
+#define __OPENGL_MESH_RENDERER__
+
+#include "MeshRenderer.h"
+
+
+/// Initialize the renderer dispatch table.
+/// @param[out] dispatch the dispatch table.
+/// @return NVCV_SUCCESS if successful.
+NvCV_Status OpenGLMeshRenderer_InitDispatch(MeshRenderer::Dispatch *dispatch);
+
+/// Unload the OpenGL Mesh Renderer from memory.
+/// @note Any previously initialized dispatch tables will be invalid.
+/// @return NVCV_SUCCESS if successful.
+NvCV_Status OpenGLMeshRenderer_Unload();
+
+
+#endif // __OPENGL_MESH_RENDERER__
diff --git a/samples/ExpressionApp/BackEndOpenGL/SimpleFaceModel.h b/samples/ExpressionApp/BackEndOpenGL/SimpleFaceModel.h
new file mode 100644
index 0000000..ece8a29
--- /dev/null
+++ b/samples/ExpressionApp/BackEndOpenGL/SimpleFaceModel.h
@@ -0,0 +1,245 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+#ifndef __SIMPLE_FACE_MODEL__
+#define __SIMPLE_FACE_MODEL__
+
+#include
+
+#include
+#include
+
+#include "FaceIO.h"
+#include "nvAR_defs.h"
+
+/********************************************************************************
+ * SimpleFaceModel
+ ********************************************************************************/
+
+
+struct SimpleFaceModel {
+ std::vector shapeMean;
+ std::vector shapeModes; /* shapeMean.size() * numModes */
+ std::vector shapeEigenValues;
+ std::vector triangles;
+ struct BlendShape {
+ std::string name;
+ std::vector shape;
+ };
+ std::vector blendShapes;
+ struct Partition {
+ unsigned partitionIndex; ///< The index of the partition.
+ unsigned faceIndex; ///< The index of the first face in the partition.
+ unsigned numFaces; ///< The number of faces in the partition.
+ unsigned vertexIndex; ///< The index of the first topological vertex in the partition.
+ unsigned numVertexIndices; ///< The number of topological vertices in the partition.
+ int smoothingGroup; ///< Smoothing group > 0; no smoothing == 0; undefined < 0.
+ std::string name; ///< The name of the partition.
+ std::string materialName; ///< The name of the material assigned to the partition.
+ void set(unsigned partIx, unsigned firstFaceIndex, unsigned lastFaceIndex,
+ unsigned firstVertexIndex, unsigned lastVertexIndex, int smooth,
+ const char* partName = nullptr, const char* mtrlName = nullptr) {
+ partitionIndex = partIx;
+ smoothingGroup = smooth;
+ faceIndex = firstFaceIndex;
+ numFaces = lastFaceIndex - firstFaceIndex + 1;
+ vertexIndex = firstVertexIndex;
+ numVertexIndices = lastVertexIndex - firstVertexIndex + 1;
+ if (partName) name = partName;
+ if (mtrlName) materialName = mtrlName;
+ }
+ Partition(unsigned partIx, unsigned firstFaceIndex, unsigned lastFaceIndex,
+ unsigned firstVertexIndex, unsigned lastVertexIndex, int smooth,
+ const char* partName, const char* mtrlName) {
+ set(partIx, firstFaceIndex, lastFaceIndex, firstVertexIndex, lastVertexIndex, smooth, partName, mtrlName);
+ }
+ Partition() { set(0, 0, 0, 0, 0, -1, nullptr, nullptr); }
+ };
+ std::vector partitions;
+ std::vector ibugLandmarkMappings; /* 68 */
+ const unsigned short ibugRightContour[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ const unsigned short ibugLeftContour[8] = { 10, 11, 12, 13, 14, 15, 16, 17 };
+ std::vector modelRightContour;
+ std::vector modelLeftContour;
+ std::vector adjacentFaces;
+ std::vector adjacentVertices;
+ std::vector nvlmLandmarks;
+ std::vector nvlmRightContour;
+ std::vector nvlmLeftContour;
+
+ void appendMode(const NvAR_Point3f* pts) {
+ size_t n = shapeMean.size(),
+ off = shapeModes.size();
+ shapeModes.resize(off + n);
+ float *to = shapeModes[off].vec; // Delta mode vector
+ const float *fr = &pts->x; // Mode points
+ const float *mn = &shapeMean[0].x; // Mean points
+ for (n *= 3; n--;) // 3D points
+ *to++ = *fr++ - *mn++; // Delta shape
+ }
+
+ void setBlendShape(unsigned i, const std::string& name, const NvAR_Point3f* pts) {
+ size_t n = shapeMean.size();
+ blendShapes[i].name = name;
+ blendShapes[i].shape.resize(n);
+ float *to = blendShapes[i].shape.data()->vec; // Delta mode vector
+ const float *fr = &pts->x; // Blendshape points
+ const float *mn = &shapeMean[0].x; // Mean points
+ for (n *= 3; n--;) // 3D points
+ *to++ = *fr++ - *mn++; // Delta shape
+ }
+};
+
+
+/********************************************************************************
+ * SimpleFaceModelAdapter
+ ********************************************************************************/
+
+class SimpleFaceModelAdapter : public FaceIOAdapter {
+public:
+ SimpleFaceModel fm;
+
+ uint32_t getShapeMeanSize() const override { return unsigned(fm.shapeMean.size()) * 3; }
+ uint32_t getShapeModesSize() const override { return unsigned(fm.shapeModes.size()) * 3; }
+ uint32_t getShapeNumModes() const override { return unsigned(fm.shapeModes.size() / fm.shapeMean.size()); }
+ uint32_t getShapeEigenvaluesSize()const override { return unsigned(fm.shapeEigenValues.size()); }
+ float* getShapeMean(uint32_t size) override { if (size) fm.shapeMean.resize(size / 3);
+ return &fm.shapeMean.data()->x; };
+ float* getShapeModes(uint32_t modeSize, uint32_t numModes) override {
+ if (modeSize) fm.shapeModes.resize(modeSize / 3 * numModes); return fm.shapeModes.data()->vec; }
+ float* getShapeEigenvalues(uint32_t numModes) override { if (numModes) fm.shapeEigenValues.resize(numModes);
+ return fm.shapeEigenValues.data(); }
+
+ uint32_t getColorMeanSize() const override { return 0; }
+ uint32_t getColorModesSize() const override { return 0; }
+ uint32_t getColorNumModes() const override { return 0; }
+ uint32_t getColorEigenvaluesSize() const override { return 0; }
+ float* getColorMean(uint32_t /*size*/) override { return nullptr; }
+ float* getColorModes(uint32_t /*modeSize*/, uint32_t /*numModes*/) override { return nullptr; }
+ float* getColorEigenvalues(uint32_t /*numModes*/) override { return nullptr; }
+
+ void setTriangleListSize(uint32_t size) override { fm.triangles.resize(size / 3); }
+ uint32_t getTriangleListSize() const override { return unsigned(fm.triangles.size()) * 3; }
+ uint16_t* getTriangleList(uint32_t size) override { if (size) fm.triangles.resize(size / 3);
+ return fm.triangles.data()->vec; }
+
+ void setTextureCoordinatesSize(uint32_t /*size*/) override {}
+ uint32_t getTextureCoordinatesSize() const override { return 0; }
+ float* getTextureCoordinates(uint32_t /*size*/) override { return nullptr; }
+
+ void setNumBlendShapes(uint32_t n) override { fm.blendShapes.resize(n); }
+ void setBlendShapeName(uint32_t i, const char* name) override { fm.blendShapes[i].name = name; }
+ uint32_t getNumBlendShapes() const override { return unsigned(fm.blendShapes.size()); }
+ const char* getBlendShapeName(uint32_t i) const override { return fm.blendShapes[i].name.c_str(); }
+ uint32_t getBlendShapeSize(uint32_t i) const override { return unsigned((fm.blendShapes[i].shape.size()) * 3); }
+ float* getBlendShape(uint32_t i, uint32_t size) override { if (size) fm.blendShapes[i].shape.resize(size / 3);
+ return fm.blendShapes[i].shape.data()->vec; }
+
+ void setIbugLandmarkMappingsSize(uint32_t n) override { fm.ibugLandmarkMappings.resize(n); }
+ uint32_t getIbugLandmarkMappingsSize() const override { return unsigned(fm.ibugLandmarkMappings.size()); }
+ uint16_t* getIbugLandmarkMappings(uint32_t size) override { if (size) fm.ibugLandmarkMappings.resize(size);
+ return fm.ibugLandmarkMappings.data(); }
+ void appendIbugLandmarkMapping(uint16_t i) override { fm.ibugLandmarkMappings.push_back(i); }
+ void appendIbugLandmarkMapping(uint16_t i, uint16_t j) override { fm.ibugLandmarkMappings.push_back(i);
+ fm.ibugLandmarkMappings.push_back(j); }
+
+ void setIbugRightContourSize(uint32_t /*n*/) override {}
+ uint32_t getIbugRightContourSize() const override {
+ return sizeof(fm.ibugRightContour) / sizeof(fm.ibugRightContour[0]); }
+ uint16_t* getIbugRightContour(uint32_t /*size*/) override { return const_cast(fm.ibugRightContour); }
+ void appendIbugRightContour(uint16_t /*i*/) override {}
+
+ void setIbugLeftContourSize(uint32_t /*n*/) override {}
+ uint32_t getIbugLeftContourSize() const override { return sizeof(fm.ibugLeftContour)/sizeof(fm.ibugLeftContour[0]);}
+ uint16_t* getIbugLeftContour(uint32_t /*size*/) override { return const_cast(fm.ibugLeftContour); }
+ void appendIbugLeftContour(uint16_t /*i*/) override {}
+
+ void setModelRightContourSize(uint32_t n) override { fm.modelRightContour.resize(n); }
+ uint32_t getModelRightContourSize() const override { return unsigned(fm.modelRightContour.size()); }
+ uint16_t* getModelRightContour(uint32_t size) override { if (size) fm.modelRightContour.resize(size);
+ return fm.modelRightContour.data(); }
+ void appendModelRightContour(uint16_t i) override { fm.modelRightContour.push_back(i); }
+
+ void setModelLeftContourSize(uint32_t n) override { fm.modelLeftContour.resize(n); }
+ uint32_t getModelLeftContourSize() const override { return unsigned(fm.modelLeftContour.size()); }
+ uint16_t* getModelLeftContour(uint32_t size) override { if (size) fm.modelLeftContour.resize(size);
+ return fm.modelLeftContour.data(); }
+ void appendModelLeftContour(uint16_t i) override { fm.modelLeftContour.push_back(i); }
+
+ void setAdjacentFacesSize(uint32_t n) override { fm.adjacentFaces.resize(n); }
+ uint32_t getAdjacentFacesSize() const override { return unsigned(fm.adjacentFaces.size()); }
+ uint16_t* getAdjacentFaces(uint32_t size) override { if (size) fm.adjacentFaces.resize(size);
+ return fm.adjacentFaces.data(); }
+ void appendAdjacentFace(uint16_t i) override { fm.adjacentFaces.push_back(i); }
+ void appendAdjacentFaces(uint16_t i, uint16_t j) override { fm.adjacentFaces.push_back(i);
+ fm.adjacentFaces.push_back(j); }
+
+ void setAdjacentVerticesSize(uint32_t n) override { fm.adjacentVertices.resize(n); }
+ uint32_t getAdjacentVerticesSize() const override { return unsigned(fm.adjacentVertices.size()); }
+ uint16_t* getAdjacentVertices(uint32_t size) override { if (size) fm.adjacentVertices.resize(size);
+ return fm.adjacentVertices.data(); }
+ void appendAdjacentVertex(uint16_t i) override { fm.adjacentVertices.push_back(i); }
+ void appendAdjacentVertices(uint16_t i, uint16_t j) override { fm.adjacentVertices.push_back(i);
+ fm.adjacentVertices.push_back(j); }
+
+ void setNvlmLandmarksSize(uint32_t n) override { fm.nvlmLandmarks.resize(n); }
+ uint32_t getNvlmLandmarksSize() const override { return (uint32_t)fm.nvlmLandmarks.size(); }
+ uint16_t* getNvlmLandmarks(uint32_t size) override { if (size) fm.nvlmLandmarks.resize(size);
+ return fm.nvlmLandmarks.data(); }
+ void appendNvlmLandmark(uint16_t i) override { fm.nvlmLandmarks.push_back(i); }
+
+ void setNvlmRightContourSize(uint32_t n) override { fm.nvlmRightContour.resize(n); }
+ uint32_t getNvlmRightContourSize() const override { return (uint32_t)fm.nvlmRightContour.size(); }
+ uint16_t* getNvlmRightContour(uint32_t size) override { if (size) fm.nvlmRightContour.resize(size);
+ return fm.nvlmRightContour.data(); }
+ void appendNvlmRightContour(uint16_t i) override { fm.nvlmRightContour.push_back(i); }
+
+ void setNvlmLeftContourSize(uint32_t n) override { fm.nvlmLeftContour.resize(n); }
+ uint32_t getNvlmLeftContourSize() const override { return (uint32_t)fm.nvlmLeftContour.size(); }
+ uint16_t* getNvlmLeftContour(uint32_t size) override { if (size) fm.nvlmLeftContour.resize(size);
+ return fm.nvlmLeftContour.data(); }
+ void appendNvlmLeftContour(uint16_t i) override { fm.nvlmLeftContour.push_back(i); }
+
+ void setNumPartitions(uint32_t n) override { fm.partitions.resize(n); }
+ void setPartitionName(uint32_t i, const char* name) override { fm.partitions.at(i).name = name; }
+ void setPartitionMaterialName(uint32_t i, const char* name) override { fm.partitions.at(i).materialName = name;}
+ void setPartition(uint32_t i, uint32_t faceIndex, uint32_t numFaces, uint32_t vertexIndex, uint32_t numVertices,
+ int32_t smoothingGroup) override {
+ fm.partitions.at(i).set(i, faceIndex, faceIndex + numFaces - 1, vertexIndex,
+ vertexIndex + numVertices - 1, smoothingGroup);
+ }
+ uint32_t getNumPartitions() const override { return (uint32_t)fm.partitions.size(); }
+ const char* getPartitionName(uint32_t i) const override { return fm.partitions.at(i).name.c_str(); }
+ const char* getPartitionMaterialName(uint32_t i) const override { return fm.partitions.at(i).materialName.c_str(); }
+ int16_t getPartition(uint32_t i, uint32_t* faceIndex, uint32_t* numFaces, uint32_t* vertexIndex,
+ uint32_t* numVertices, int32_t* smoothingGroup) const override {
+ const SimpleFaceModel::Partition& pt = fm.partitions.at(i);
+ if (faceIndex) *faceIndex = pt.faceIndex;
+ if (numFaces) *numFaces = pt.numFaces;
+ if (vertexIndex) *vertexIndex = pt.vertexIndex;
+ if (numVertices) *numVertices = pt.numVertexIndices;
+ if (smoothingGroup) *smoothingGroup = pt.smoothingGroup;
+ return (int16_t)pt.partitionIndex;
+ }
+};
+
+#endif // __SIMPLE_FACE_MODEL__
diff --git a/samples/ExpressionApp/CMakeLists.txt b/samples/ExpressionApp/CMakeLists.txt
new file mode 100644
index 0000000..df1177b
--- /dev/null
+++ b/samples/ExpressionApp/CMakeLists.txt
@@ -0,0 +1,97 @@
+######################
+# The Expression app #
+######################
+set(APP_SRCS
+ ExpressionApp.cpp
+ MeshRenderer.cpp MeshRenderer.h
+ DirectoryIterator.cpp DirectoryIterator.h
+ BackEndOpenGL/GLMaterial.cpp BackEndOpenGL/GLMaterial.h
+ BackEndOpenGL/GLMesh.cpp BackEndOpenGL/GLMesh.h
+ BackEndOpenGL/GLShaders.cpp BackEndOpenGL/GLShaders.h
+ BackEndOpenGL/GLSpectrum.h
+ BackEndOpenGL/SimpleFaceModel.h
+ BackEndOpenGL/OpenGLMeshRenderer.cpp BackEndOpenGL/OpenGLMeshRenderer.h
+ BackEndOpenGL/FaceIO.cpp BackEndOpenGL/FaceIO.h
+)
+if(WIN32)
+ set(APP_SRCS ${APP_SRCS} nvARProxy.cpp nvCVImageProxy.cpp)
+ find_package(OpenGL REQUIRED)
+endif(WIN32)
+
+option(ENABLE_UI "Enable UI to adjust rigging" OFF) # ON still needs some more link debugging
+
+if (${ENABLE_UI})
+ set(APP_SRCS ${APP_SRCS}
+ ExpressionAppUI.h
+ ExpressionAppUI.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imconfig.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui_internal.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imstb_textedit.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imstb_rectpack.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imstb_truetype.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/backends/imgui_impl_glfw.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/backends/imgui_impl_opengl3.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/backends/imgui_impl_opengl3_loader.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/misc/cpp/imgui_stdlib.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui_tables.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui_widgets.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/imgui_draw.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/backends/imgui_impl_glfw.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/backends/imgui_impl_opengl3.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking/misc/cpp/imgui_stdlib.cpp
+ )
+endif (${ENABLE_UI})
+
+add_executable(ExpressionApp ${APP_SRCS})
+
+target_include_directories(ExpressionApp PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/BackEndOpenGL
+ ${CMAKE_CURRENT_SOURCE_DIR}/../utils
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/glm/include
+ ${SDK_INCLUDES_PATH}
+)
+if (${ENABLE_UI})
+ add_definitions("-D_ENABLE_UI")
+ target_include_directories(ExpressionApp PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/Imgui/imgui-docking
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/nlohmann/json/single_include/nlohmann
+ )
+endif (${ENABLE_UI})
+
+if(WIN32)
+ target_link_libraries(ExpressionApp PUBLIC
+ opencv346
+ glfw3
+ GLAD
+ ${OPENGL_gl_LIBRARY}
+ )
+ target_link_directories(ExpressionApp PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/GLAD/lib
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/GLFW/lib
+ )
+ target_include_directories(ExpressionApp PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/GLAD/include
+ ${CMAKE_CURRENT_SOURCE_DIR}/../external/GLFW/include
+ )
+ set(OPENCV_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../external/opencv/bin)
+ set(PATH_STR "PATH=%PATH%" ${OPENCV_PATH_STR})
+ set_target_properties(ExpressionApp PROPERTIES
+ FOLDER SampleApps
+ VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
+ VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}" )
+elseif(UNIX)
+ #find_package(PNG REQUIRED)
+ #find_package(JPEG REQUIRED)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -fpermissive")
+ target_link_libraries(ExpressionApp PUBLIC
+ nvARPose
+ NVCVImage
+ OpenCV
+ glfw
+ OpenGL
+ dl
+ )
+endif()
diff --git a/samples/ExpressionApp/DirectoryIterator.cpp b/samples/ExpressionApp/DirectoryIterator.cpp
new file mode 100644
index 0000000..d3594c2
--- /dev/null
+++ b/samples/ExpressionApp/DirectoryIterator.cpp
@@ -0,0 +1,169 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include "DirectoryIterator.h"
+
+
+#ifdef _WIN32
+////////////////////////////////////////////////////////////////////////////////
+///// WINDOWS /////
+////////////////////////////////////////////////////////////////////////////////
+#include
+#include
+
+struct DirectoryIterator::Impl {
+ HANDLE h;
+ unsigned which;
+ bool first;
+ WIN32_FIND_DATAA data;
+};
+
+DirectoryIterator::DirectoryIterator() {
+ m_impl = new DirectoryIterator::Impl;
+ m_impl->h = nullptr;
+}
+
+DirectoryIterator::DirectoryIterator(const char *path, unsigned iterateWhat) : DirectoryIterator() {
+ init(path, iterateWhat);
+}
+
+DirectoryIterator::~DirectoryIterator() {
+ if (m_impl) {
+ if (m_impl->h) FindClose(m_impl->h);
+ delete m_impl;
+ }
+}
+
+int DirectoryIterator::init(const char *path, unsigned iterateWhat) {
+ std::string pathStar = path;
+ pathStar += "\\*";
+ if (nullptr == (m_impl->h = FindFirstFileA(pathStar.c_str(), &m_impl->data))) return -99; /* either dir or file */
+ m_impl->which = iterateWhat ? iterateWhat : kTypeAll;
+ m_impl->first = true;
+ return 0;
+}
+
+int DirectoryIterator::next(const char **pName, unsigned *type) {
+ if (!pName) return -1;
+ while (1) {
+ if (m_impl->first) {
+ m_impl->first = false;
+ }
+ else if (!FindNextFileA(m_impl->h, &m_impl->data)) {
+ *pName = nullptr;
+ if (type) *type = 0;
+ return -99;
+ }
+ *pName = m_impl->data.cFileName;
+
+ if (0 != (m_impl->data.dwFileAttributes & (
+ FILE_ATTRIBUTE_NORMAL |
+ FILE_ATTRIBUTE_ARCHIVE |
+ FILE_ATTRIBUTE_COMPRESSED |
+ FILE_ATTRIBUTE_ENCRYPTED |
+ FILE_ATTRIBUTE_HIDDEN |
+ FILE_ATTRIBUTE_INTEGRITY_STREAM |
+ FILE_ATTRIBUTE_NOT_CONTENT_INDEXED |
+ FILE_ATTRIBUTE_NO_SCRUB_DATA |
+ FILE_ATTRIBUTE_READONLY |
+ FILE_ATTRIBUTE_REPARSE_POINT |
+ FILE_ATTRIBUTE_SPARSE_FILE |
+ FILE_ATTRIBUTE_TEMPORARY
+ ))) {
+ if (m_impl->which & kTypeFile) {
+ if (type) *type = kTypeFile;
+ break;
+ }
+ }
+
+ else if (0 == (m_impl->data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ if (m_impl->which & kTypeDirectory) {
+ if (type) *type = kTypeDirectory;
+ break;
+ }
+ }
+
+ else {
+ if (m_impl->which & kTypeSpecial) {
+ if (type) *type = kTypeSpecial;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+#else /* !_WIN32 == UNIX */
+////////////////////////////////////////////////////////////////////////////////
+///// UNIX /////
+////////////////////////////////////////////////////////////////////////////////
+#include
+
+struct DirectoryIterator::Impl {
+ DIR *dp;
+ unsigned which;
+};
+
+DirectoryIterator::DirectoryIterator() {
+ m_impl = new DirectoryIterator::Impl;
+ m_impl->dp = nullptr;
+}
+
+DirectoryIterator::DirectoryIterator(const char* path, unsigned iterateWhat) : DirectoryIterator() {
+ init(path, iterateWhat);
+}
+
+DirectoryIterator::~DirectoryIterator() {
+ if (m_impl) {
+ if (m_impl->dp) closedir(m_impl->dp);
+ delete m_impl;
+ }
+}
+
+int DirectoryIterator::init(const char *path, unsigned iterateWhat) {
+ if (nullptr == (m_impl->dp = opendir(path))) return -1;
+ m_impl->which = iterateWhat ? iterateWhat : kTypeAll;
+ return 0;
+}
+
+int DirectoryIterator::next(const char **pName, unsigned *type) {
+ struct dirent *entry;
+
+ if (type) *type = 0;
+ if (!pName) return -1;
+ while (nullptr != (entry = readdir(m_impl->dp))) {
+ *pName = entry->d_name;
+ switch (entry->d_type) {
+ case DT_REG: if (m_impl->which & kTypeFile) { if (type) *type = kTypeFile; return 0; } break;
+ case DT_DIR: if (m_impl->which & kTypeDirectory) { if (type) *type = kTypeDirectory; return 0; } break;
+ default: if (m_impl->which & kTypeSpecial) { if (type) *type = kTypeSpecial; return 0; } break;
+ }
+ }
+ *pName = nullptr;
+ return -99;
+}
+
+
+#endif /* UNIX */
diff --git a/samples/ExpressionApp/DirectoryIterator.h b/samples/ExpressionApp/DirectoryIterator.h
new file mode 100644
index 0000000..c07c8fc
--- /dev/null
+++ b/samples/ExpressionApp/DirectoryIterator.h
@@ -0,0 +1,68 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#ifndef __DIRECTORY_ITERATOR_H
+#define __DIRECTORY_ITERATOR_H
+
+class DirectoryIterator {
+public:
+ enum {
+ kTypeFile = 1,
+ kTypeDirectory = 2,
+ kTypeSpecial = 4,
+ kTypeAll = (kTypeFile | kTypeDirectory | kTypeSpecial)
+ };
+
+ /// Constructor
+ DirectoryIterator();
+
+ /// Constructor
+ /// @param[in] path The path of the directory to iterate.
+ /// @param[in] iterateWhat The types of files to list.
+ DirectoryIterator(const char *path, unsigned iterateWhat);
+
+ /// Destructor
+ ~DirectoryIterator();
+
+ /// Start looking in a particular directory.
+ /// @param[in] path The path of the directory to iterate.
+ /// @param[in] iterateWhat The types of files to list.
+ /// @return 0 If successful,
+ /// -1 If path was NULL,
+ /// -99 If there are no files.
+ int init(const char *path, unsigned iterateWhat);
+
+ /// Get the next file.
+ /// @param pName[out] a place to store the name of the next file.
+ /// @param type[out] a place to store the type of the next file.
+ /// @return 0 If successful,
+ /// -1 If path was NULL,
+ /// -99 If there are no more files.
+ int next(const char **pName, unsigned *type);
+
+private:
+ struct Impl;
+ Impl *m_impl;
+};
+
+#endif // __DIRECTORY_ITERATOR_H
diff --git a/samples/ExpressionApp/ExpressionApp.cpp b/samples/ExpressionApp/ExpressionApp.cpp
new file mode 100644
index 0000000..367ffde
--- /dev/null
+++ b/samples/ExpressionApp/ExpressionApp.cpp
@@ -0,0 +1,1288 @@
+/*###############################################################################
+#
+# Copyright 2021 NVIDIA Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+###############################################################################*/
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "nvAR.h"
+#include "nvAR_defs.h"
+#include "nvCVOpenCV.h"
+#include "opencv2/opencv.hpp"
+#include "MeshRenderer.h"
+
+#ifdef _WIN32
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif // WIN32_LEAN_AND_MEAN
+ #include
+ #define strcasecmp _stricmp
+#else
+ #include
+#endif // _WIN32
+
+#ifdef _ENABLE_UI
+#include "ExpressionAppUI.h"
+#endif // _ENABLE_UI
+
+#if CV_MAJOR_VERSION >= 4
+ #define CV_CAP_PROP_FPS cv::CAP_PROP_FPS
+ #define CV_CAP_PROP_FRAME_COUNT cv::CAP_PROP_FRAME_COUNT
+ #define CV_CAP_PROP_FRAME_HEIGHT cv::CAP_PROP_FRAME_HEIGHT
+ #define CV_CAP_PROP_FRAME_WIDTH cv::CAP_PROP_FRAME_WIDTH
+ #define CV_CAP_PROP_POS_FRAMES cv::CAP_PROP_POS_FRAMES
+ #define CV_INTER_AREA cv::INTER_AREA
+ #define CV_INTER_LINEAR cv::INTER_LINEAR
+#endif // CV_MAJOR_VERSION
+
+#ifndef M_PI
+ #define M_PI 3.1415926535897932385
+#endif /* M_PI */
+#ifndef M_2PI
+ #define M_2PI 6.2831853071795864769
+#endif /* M_2PI */
+#ifndef M_PI_2
+ #define M_PI_2 1.5707963267948966192
+#endif /* M_PI_2 */
+#define D_RADIANS_PER_DEGREE (M_PI / 180.)
+#define F_PI ((float)M_PI)
+#define F_PI_2 ((float)M_PI_2)
+#define F_2PI ((float)M_2PI)
+#define F_RADIANS_PER_DEGREE (float)(M_PI / 180.)
+#define CTL(x) ((x) & 0x1F)
+#define HELP_REQUESTED 411
+
+#define BAIL_IF_ERR(err) do { if ((int)(err) != 0) { goto bail; } } while(0)
+#define BAIL_IF_NULL(x, err, code) do { if ((void*)(x) == NULL) { err = code; goto bail; } } while(0)
+#define BAIL_IF_CUERR(cu, err) do { if (cudaSuccess != (cu)) { err = NvFromCuErr(cu); } } while(0)
+#define BAIL(err, code) do { err = code; goto bail; } while(0)
+
+#define DEFAULT_CODEC "avc1"
+#define DEFAULT_FACE_MODEL "face_model2.nvf"
+#define DEFAULT_RENDER_MODEL "face_model2.nvf"
+
+/********************************************************************************
+ * Command-line arguments
+ ********************************************************************************/
+
+bool
+ FLAG_debug = false,
+ FLAG_loop = false,
+ FLAG_show = false,
+ FLAG_showUI = false,
+ FLAG_verbose = false;
+std::string
+ FLAG_camRes,
+ FLAG_codec = DEFAULT_CODEC,
+ FLAG_fitModel = DEFAULT_FACE_MODEL,
+ FLAG_inFile,
+ FLAG_modelDir,
+ FLAG_outDir,
+ FLAG_outFile,
+ FLAG_renderModel = DEFAULT_RENDER_MODEL;
+int
+ FLAG_filter = NVAR_TEMPORAL_FILTER_FACE_BOX
+ | NVAR_TEMPORAL_FILTER_FACIAL_LANDMARKS
+ | NVAR_TEMPORAL_FILTER_FACE_ROTATIONAL_POSE
+ | NVAR_TEMPORAL_FILTER_FACIAL_EXPRESSIONS
+ | NVAR_TEMPORAL_FILTER_FACIAL_GAZE,
+ //| NVAR_TEMPORAL_FILTER_ENHANCE_EXPRESSIONS,
+ FLAG_viewMode = 0xF, // VIEW_MESH | VIEW_IMAGE | VIEW_PLOT | VIEW_LM
+ FLAG_exprMode = 2, // 1=mesh, 2=MLP
+ FLAG_gaze = 0;
+double
+ FLAG_fov = 0.0; // Orthographic by default
+
+
+/********************************************************************************
+ * Usage
+ ********************************************************************************/
+
+static void Usage() {
+ printf(
+ "ExpressionApp [ ...]\n"
+ "where is\n"
+ " --cam_res=[WWWx]HHH specify resolution as height or width x height\n"
+ " --codec= FOURCC code for the desired codec (default avc1)\n"
+ " --debug[=(true|false)] report debugging info (default false)\n"
+ " --expr_mode= SDK feature used for generation expressions: 1=Face3DReconstruction, 2=FaceExpressions (default 2)\n"
+ " --face_model= specify the face model to be used for fitting (default " DEFAULT_FACE_MODEL ")\n"
+ " --filter= 1: face box, 2: landmarks, 4: pose, 16: expressions, 32: gaze,\n"
+ " 256: eye and mouth closure\n"
+ " (default 55: face box, landmarks, pose, expressions, gaze; no closure)\n"
+ " --gaze= specify gaze estimation mode 0=implicit, 1=explicit (default 0)\n"
+ " --fov= field of view, in degrees; 0 implies orthographic (default 0)\n"
+ " --help print this message\n"
+ " --in= specify the input file (default webcam 0)\n"
+ " --loop[=(true|false)] play the same video repeatedly\n"
+ " --model_dir= specify the directory containing the TRT models\n"
+ " --model_path= specify the directory containing the TRT models\n"
+ " --out= specify the output file\n"
+ " --render_model= specify the face model to be used for rendering (default " DEFAULT_RENDER_MODEL ")\n"
+ " --show[=(true|false)] show the results (default false, unless --out is empty)\n"
+ " --show_ui[=(true|false)] show the expression calibration UI (default false)\n"
+ " --temporal= apply temporal filter: see --filter\n"
+ " --view_mode= 1: mesh, 2: image, 4: plot, 8: landmarks (default 15: all)\n"
+ " --verbose[=(true|false)] report interesting info (default off)\n"
+ "Keyboard commands:\n"
+ " escape - quit\n"
+ " q or Q - quit\n"
+ " m - toggle mesh display\n"
+ " n - calibrate expression weights\n"
+ " i - toggle image display\n"
+ " p - toggle plot display\n"
+ " l - toggle landmark display\n"
+ " f - toggle frame rate display\n"
+ " L or ctrl-L - toggle landmark filtering\n"
+ " N or ctrl-N - un-calibrate expression weights\n"
+ " P or ctrl-P - toggle pose filtering\n"
+ " E or ctrl-E - toggle expression filtering\n"
+ " G or ctrl-G - toggle gaze filtering\n"
+ " C or ctrl-C - toggle closure enhancement\n"
+ " 1 - expressions from mesh fitting\n"
+ " 2 - expressions from DNN\n"
+ );
+}
+
+static bool GetFlagArgVal(const char *flag, const char *arg, const char **val) {
+ if (*arg != '-')
+ return false;
+ while (*++arg == '-')
+ continue;
+ const char *s = strchr(arg, '=');
+ if (s == nullptr) {
+ if (strcmp(flag, arg))
+ return false;
+ *val = nullptr;
+ return true;
+ }
+ unsigned n = (unsigned)(s - arg);
+ if ((strlen(flag) != n) || (strncmp(flag, arg, n) != 0))
+ return false;
+ *val = s + 1;
+ return true;
+}
+
+static bool GetFlagArgVal(const char *flag, const char *arg, std::string *val) {
+ const char *valStr;
+ if (!GetFlagArgVal(flag, arg, &valStr)) return false;
+ val->assign(valStr ? valStr : "");
+ return true;
+}
+
+static bool GetFlagArgVal(const char *flag, const char *arg, bool *val) {
+ const char *valStr;
+ bool success = GetFlagArgVal(flag, arg, &valStr);
+ if (success)
+ *val = (!valStr ||
+ !strcasecmp(valStr, "true") ||
+ !strcasecmp(valStr, "on") ||
+ !strcasecmp(valStr, "yes") ||
+ !strcasecmp(valStr, "1"));
+ return success;
+}
+
+bool GetFlagArgVal(const char *flag, const char *arg, long *val) {
+ const char *valStr;
+ bool success = GetFlagArgVal(flag, arg, &valStr);
+ if (success)
+ *val = strtol(valStr, nullptr, 0);
+ return success;
+}
+
+static bool GetFlagArgVal(const char *flag, const char *arg, int *val) {
+ long longVal;
+ bool success = GetFlagArgVal(flag, arg, &longVal);
+ if (success)
+ *val = (int)longVal;
+ return success;
+}
+
+bool GetFlagArgVal(const char* flag, const char* arg, double* val) {
+ const char* valStr;
+ bool success = GetFlagArgVal(flag, arg, &valStr);
+ if (success)
+ *val = valStr ? strtod(valStr, nullptr) : 1.0;
+ return success;
+}
+
+
+/********************************************************************************
+ * StringToFourcc
+ ********************************************************************************/
+
+static int StringToFourcc(const std::string &str) {
+ union chint { int i; char c[4]; };
+ chint x = {0};
+ for (int n = (str.size() < 4) ? (int)str.size() : 4; n--;)
+ x.c[n] = str[n];
+ return x.i;
+}
+
+
+/********************************************************************************
+ * ParseMyArgs
+ ********************************************************************************/
+
+static int ParseMyArgs(int argc, char **argv) {
+ // query NVAR_MODEL_DIR environment variable first before checking the command line arguments
+ const char* modelPath = getenv("NVAR_MODEL_DIR");
+ if (modelPath) {
+ FLAG_modelDir = modelPath;
+ }
+
+ int errs = 0;
+ for (--argc, ++argv; argc--; ++argv) {
+ bool help;
+ const char *arg = *argv;
+ if (arg[0] != '-') {
+ continue;
+ } else if ((arg[1] == '-') && (
+ GetFlagArgVal("cam_res", arg, &FLAG_camRes) ||
+ GetFlagArgVal("codec", arg, &FLAG_codec) ||
+ GetFlagArgVal("debug", arg, &FLAG_debug) ||
+ GetFlagArgVal("expr_mode", arg, &FLAG_exprMode) ||
+ GetFlagArgVal("face_model", arg, &FLAG_fitModel) ||
+ GetFlagArgVal("filter", arg, &FLAG_filter) ||
+ GetFlagArgVal("gaze", arg, &FLAG_gaze) ||
+ GetFlagArgVal("fov", arg, &FLAG_fov) ||
+ GetFlagArgVal("in", arg, &FLAG_inFile) ||
+ GetFlagArgVal("in_file", arg, &FLAG_inFile) ||
+ GetFlagArgVal("loop", arg, &FLAG_loop) ||
+ GetFlagArgVal("model_dir", arg, &FLAG_modelDir) ||
+ GetFlagArgVal("model_path", arg, &FLAG_modelDir) ||
+ GetFlagArgVal("out", arg, &FLAG_outFile) ||
+ GetFlagArgVal("out_file", arg, &FLAG_outFile) ||
+ GetFlagArgVal("render_model", arg, &FLAG_renderModel) ||
+ GetFlagArgVal("show", arg, &FLAG_show) ||
+ GetFlagArgVal("show_ui", arg, &FLAG_showUI) ||
+ GetFlagArgVal("temporal", arg, &FLAG_filter) ||
+ GetFlagArgVal("verbose", arg, &FLAG_verbose) ||
+ GetFlagArgVal("view_mode", arg, &FLAG_viewMode)
+ )) {
+ continue;
+ } else if (GetFlagArgVal("help", arg, &help)) {
+ Usage();
+ errs = HELP_REQUESTED;
+ } else if (arg[1] != '-') {
+ for (++arg; *arg; ++arg) {
+ if (*arg == 'v') {
+ FLAG_verbose = true;
+ } else {
+ // printf("Unknown flag: \"-%c\"\n", *arg);
+ }
+ }
+ continue;
+ } else {
+ // printf("Unknown flag: \"%s\"\n", arg);
+ }
+ }
+ return errs;
+}
+
+enum {
+ myErrNone = 0,
+ myErrShader = -1,
+ myErrProgram = -2,
+ myErrTexture = -3,
+};
+
+
+static bool FileExists(const char* fileName) {
+ #ifdef _MSC_VER
+ DWORD attributes = GetFileAttributesA(fileName);
+ return attributes != INVALID_FILE_ATTRIBUTES;
+ #else // !MSC_VER
+ struct stat statBuf;
+ return 0 == stat(fileName, &statBuf);
+ #endif // MSC_VER
+}
+inline bool FileExists(const std::string& str) { return FileExists(str.c_str()); }
+static bool SetDirIfFileExists(const std::string& testDir, const std::string& file, std::string& resultDir) {
+ std::string dirFile = (testDir + '/' + file);
+ bool exists = FileExists(dirFile);
+ if (exists)
+ resultDir = testDir;
+ return exists;
+}
+static bool SetPathIfFileExists(const std::string& testDir, const std::string& file, std::string& path) {
+ std::string dirFile = testDir.empty() ? file : (testDir + '/' + file);
+ bool exists = FileExists(dirFile);
+ if (exists)
+ path = dirFile;
+ return exists;
+}
+
+class MyTimer {
+public:
+ MyTimer() { dt = dt.zero(); } /**< Clear the duration to 0. */
+ void start() { t0 = std::chrono::high_resolution_clock::now(); } /**< Start the timer. */
+ void pause() { dt = std::chrono::high_resolution_clock::now() - t0; } /**< Pause the timer. */
+ void resume() { t0 = std::chrono::high_resolution_clock::now() - dt; } /**< Resume the timer. */
+ void stop() { pause(); } /**< Stop the timer. */
+ double elapsedTimeFloat() const {
+ return std::chrono::duration(dt).count();
+ } /**< Report the elapsed time as a float. */
+private:
+ std::chrono::high_resolution_clock::time_point t0;
+ std::chrono::high_resolution_clock::duration dt;
+};
+
+inline NvCV_Status NvFromAppErr(int appErr) { return (NvCV_Status)appErr; }
+
+
+class App {
+public:
+
+ App() {}
+ ~App() { stop(); }
+
+ NvCV_Status run();
+ NvCV_Status stop();
+ NvCV_Status setInputVideo(const std::string& file); // open immediately
+ NvCV_Status setInputCamera(int index, const std::string& resStr); // open immediately
+ NvCV_Status setOutputVideo(const std::string& file);
+ NvCV_Status set(int codec, double fps, unsigned width, unsigned height); // deferred open
+ NvCV_Status init();
+ NvCV_Status resizeDst();
+ NvCV_Status openOutputVideo(int codec, double fps, unsigned width, unsigned height);
+ NvCV_Status initFaceFit();
+ NvCV_Status initMLPExpressions();
+ NvCV_Status calibrateExpressionWeights();
+ NvCV_Status unCalibrateExpressionWeights();
+ NvCV_Status normalizeExpressionsWeights();
+ NvCV_Status toggleFaceBoxFiltering();
+ NvCV_Status toggleLandmarkFiltering();
+ NvCV_Status togglePoseFiltering();
+ NvCV_Status toggleExpressionFiltering();
+ NvCV_Status toggleGazeFiltering();
+ NvCV_Status toggleClosureEnhancement();
+ NvCV_Status overlayLandmarks(const float landmarks[126 * 2], unsigned screenHeight, NvCVImage *im);
+ void getFPS();
+ void drawFPS(cv::Mat& img);
+ void barPlotExprs();
+
+ const char *getErrorStringFromCode(NvCV_Status err);
+
+ struct Pose {
+ NvAR_Quaternion rotation;
+ NvAR_Vector3f translation;
+ float* data() { return &rotation.x; }
+ const float* data() const { return &rotation.x; }
+ };
+ CUstream _stream = 0;
+ cv::Mat _ocvSrcImg, _ocvDstImg; // _ocvSrcImg is allocated, _ocvDstImg is just a wrapper
+ cv::VideoCapture _vidIn{};
+ cv::VideoWriter _vidOut{};
+ double _frameRate;
+ int _miniX, _miniY, _renderX, _renderY, _plotX, _plotY;
+ NvAR_FaceMesh _arMesh { nullptr, 0, nullptr, 0 };
+ NvAR_FeatureHandle _featureHan{};
+ NvAR_RenderingParams _renderParams;
+ NvCVImage _srcImg, _compImg, _srcGpu, _renderImg; // wrapper, alloced, alloced, alloced
+ Pose _pose;
+ std::string _inFile, _outFile;
+ std::vector _outputBboxData;
+ NvAR_BBoxes _outputBboxes;
+ std::vector _expressions, _expressionZeroPoint, _expressionScale, _expressionExponent, _eigenvalues,
+ _landmarkConfidence;
+ std::vector _landmarks;
+ std::vector _vertices;
+ std::vector< NvAR_Vector3u16> _triangles;
+ unsigned _videoWidth, _videoHeight, _miniWidth, _miniHeight, _renderWidth, _renderHeight,
+ _plotWidth, _plotHeight, _compWidth, _compHeight, _eigenCount, _exprCount, _landmarkCount,
+ _viewMode, _exprMode, _filtering;
+ MeshRendererBroker _broker;
+ MeshRenderer *_renderer = nullptr;
+ static const char _windowTitle[], *_exprAbbr[][4], *_sfmExprAbbr[][4];
+ MyTimer _timer;
+ bool _showFPS;
+ bool _performCalibration;
+ double _frameTime;
+ float _globalExpressionParam;
+#ifdef _ENABLE_UI
+ ExpressionAppUI ui_obj_;
+#endif // _ENABLE_UI
+ enum {
+ EXPR_MODE_MESH = 1,
+ EXPR_MODE_MLP = 2,
+ };
+ enum {
+ VIEW_MESH = (1 << 0),
+ VIEW_IMAGE = (1 << 1),
+ VIEW_PLOT = (1 << 2),
+ VIEW_LM = (1 << 3)
+ };
+ enum {
+ APP_ERR_GENERAL = 1,
+ APP_ERR_OPEN,
+ APP_ERR_READ,
+ APP_ERR_WRITE,
+ APP_ERR_INIT,
+ APP_ERR_RUN,
+ APP_ERR_EFFECT,
+ APP_ERR_PARAM,
+ APP_ERR_UNIMPLEMENTED,
+ APP_ERR_MISSING,
+ APP_ERR_VIDEO,
+ APP_ERR_IMAGE_SIZE,
+ APP_ERR_NOT_FOUND,
+ APP_ERR_FACE_MODEL,
+ APP_ERR_GLFW_INIT,
+ APP_ERR_GL_INIT,
+ APP_ERR_RENDER_INIT,
+ APP_ERR_GL_RESOURCE,
+ APP_ERR_GL_GENERAL,
+ APP_ERR_FACE_FIT,
+ APP_ERR_NO_FACE,
+ APP_ERR_CANCEL,
+ APP_ERR_CAMERA,
+ APP_ERR_ARG_PARSE,
+ APP_ERR_EOF
+ };
+};
+const char App::_windowTitle[] = "Expression App";
+const char *App::_exprAbbr[][4] = {
+ { "BROW", "DOWN", "LEFT", NULL }, // 0 browDown_L
+ { "BROW", "DOWN", "RIGHT",NULL }, // 1 browDown_R
+ { "BROW", "INNR", "UP", "LEFT" }, // 2 browInnerUp_L
+ { "BROW", "INNR", "UP", "RIGHT" }, // 3 browInnerUp_R
+ { "BROW", "OUTR", "UP", "LEFT" }, // 4 browOuterUp_L
+ { "BROW", "OUTR", "UP", "RIGHT" }, // 5 browOuterUp_R
+ { "CHEE", "PUFF", "LEFT", NULL }, // 6 cheekPuff_L
+ { "CHEE", "PUFF", "RIGHT",NULL }, // 7 cheekPuff_R
+ { "CHEE", "SQNT", "LEFT", NULL }, // 8 cheekSquint_L
+ { "CHEE", "SQNT", "RIGHT",NULL }, // 9 cheekSquint_R
+ { "EYE", "BLNK", "LEFT", NULL }, // 10 eyeBlink_L
+ { "EYE", "BLNK", "RIGHT",NULL }, // 11 eyeBlink_R
+ { "EYE", "LOOK", "DOWN", "LEFT" }, // 12 eyeLookDown_L
+ { "EYE", "LOOK", "DOWN", "RIGHT" }, // 13 eyeLookDown_R
+ { "EYE", "LOOK", "IN", "LEFT" }, // 14 eyeLookIn_L
+ { "EYE", "LOOK", "IN", "RIGHT" }, // 15 eyeLookIn_R
+ { "EYE", "LOOK", "OUT", "LEFT" }, // 16 eyeLookOut_L
+ { "EYE", "LOOK", "OUT", "RIGHT" }, // 17 eyeLookOut_R
+ { "EYE", "LOOK", "UP", "LEFT" }, // 18 eyeLookUp_L
+ { "EYE", "LOOK", "UP", "RIGHT" }, // 19 eyeLookUp_R
+ { "EYE", "SQNT", "LEFT", NULL }, // 20 eyeSquint_L
+ { "EYE", "SQNT", "RIGHT",NULL }, // 21 eyeSquint_R
+ { "EYE", "WIDE", "LEFT", NULL }, // 22 eyeWide_L
+ { "EYE", "WIDE", "RIGHT",NULL }, // 23 eyeWide_R
+ { "JAW", "FWD", NULL, NULL }, // 24 jawForward
+ { "JAW", "LEFT", NULL, NULL }, // 25 jawLeft
+ { "JAW", "OPEN", NULL, NULL }, // 26 jawOpen
+ { "JAW", "RIGHT",NULL ,NULL }, // 27 jawRight
+ { "MOUT", "CLOS", NULL, NULL }, // 28 mouthClose
+ { "MOUT", "DMPL", "LEFT", NULL }, // 29 mouthDimple_L
+ { "MOUT", "DMPL", "RIGHT",NULL }, // 30 mouthDimple_R
+ { "MOUT", "FRWN", "LEFT", NULL }, // 31 mouthFrown_L
+ { "MOUT", "FRWN", "RIGHT",NULL }, // 32 mouthFrown_R
+ { "MOUT", "FUNL", NULL, NULL }, // 33 mouthFunnel
+ { "MOUT", "LEFT", NULL, NULL }, // 34 mouthLeft
+ { "MOUT", "LOWR", "DOWN", "LEFT" }, // 35 mouthLowerDown_L
+ { "MOUT", "LOWR", "DOWN", "RIGHT" }, // 36 mouthLowerDown_R
+ { "MOUT", "PRES", "LEFT", NULL }, // 37 mouthPress_L
+ { "MOUT", "PRES", "RIGHT",NULL }, // 38 mouthPress_R
+ { "MOUT", "PUKR", NULL, NULL }, // 39 mouthPucker
+ { "MOUT", "RIGHT",NULL, NULL }, // 40 mouthRight
+ { "MOUT", "ROLL", "LOWR", NULL }, // 41 mouthRollLower
+ { "MOUT", "ROLL", "UPPR", NULL }, // 41 mouthRollUpper
+ { "MOUT", "SHRG", "LOWR", NULL }, // 43 mouthShrugLower
+ { "MOUT", "SHRG", "UPPR", NULL }, // 44 mouthShrugUpper
+ { "MOUT", "SMIL", "LEFT", NULL }, // 45 mouthSmile_L
+ { "MOUT", "SMIL", "RIGHT",NULL }, // 46 mouthSmile_R
+ { "MOUT", "STRH", "LEFT", NULL }, // 47 mouthStretch_L
+ { "MOUT", "STRH", "RIGHT",NULL }, // 48 mouthStretch_R
+ { "MOUT", "UPPR", "UP", "LEFT" }, // 49 mouthUpperUp_L
+ { "MOUT", "UPPR", "UP", "RIGHT" }, // 50 mouthUpperUp_R
+ { "NOSE", "SNER", "LEFT", NULL }, // 51 noseSneer_L
+ { "NOSE", "SNER", "RIGHT",NULL }, // 52 noseSneer_R
+};
+const char *App::_sfmExprAbbr[][4] = {
+ { "ANGER", NULL, NULL, NULL },
+ { "DISGUST", NULL, NULL, NULL },
+ { "FEAR", NULL, NULL, NULL },
+ { "HAPPY", NULL, NULL, NULL },
+ { "SAD", NULL, NULL, NULL },
+ { "SURPRISE", NULL, NULL, NULL },
+};
+
+NvCV_Status App::setInputVideo(const std::string& file) {
+ // opencv2/vidioio.hpp
+ if (!_vidIn.open(file)) return (NvCV_Status)APP_ERR_OPEN;
+ _frameRate = _vidIn.get(CV_CAP_PROP_FPS);
+ _videoWidth = (unsigned)_vidIn.get(CV_CAP_PROP_FRAME_WIDTH);
+ _videoHeight = (unsigned)_vidIn.get(CV_CAP_PROP_FRAME_HEIGHT);
+ if (FLAG_verbose)
+ printf("Video capture resolution set to %dx%d @ %4.1f fps\n", _videoWidth, _videoHeight, _frameRate);
+ _inFile = file;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::setInputCamera(int index, const std::string& resStr) {
+ if (!_vidIn.open(index))
+ return NvFromAppErr(APP_ERR_OPEN);
+ if (!resStr.empty()) {
+ int n, width, height;
+ n = sscanf(resStr.c_str(), "%d%*[xX]%d", &width, &height);
+ switch (n) {
+ case 2:
+ break; // We have read both width and height
+ case 1:
+ height = width;
+ width = (int)(height * (4. / 3.) + .5);
+ break;
+ default:
+ height = 0;
+ width = 0;
+ break;
+ }
+ if (width) _vidIn.set(CV_CAP_PROP_FRAME_WIDTH, width);
+ if (height) _vidIn.set(CV_CAP_PROP_FRAME_HEIGHT, height);
+ _inFile = "webcam";
+ _inFile += std::to_string(index);
+ }
+ _videoWidth = (unsigned)_vidIn.get(CV_CAP_PROP_FRAME_WIDTH);
+ _videoHeight = (unsigned)_vidIn.get(CV_CAP_PROP_FRAME_HEIGHT);
+ _frameRate = _vidIn.get(CV_CAP_PROP_FPS);
+ // Rounding the frame rate is required because OpenCV does not support all frame rates when writing video
+ static const int fps_precision = 1000;
+ _frameRate = static_cast((_frameRate + 0.5) * fps_precision) / static_cast(fps_precision);
+ if (FLAG_verbose)
+ printf("Camera capture resolution set to %dx%d @ %4.1f fps\n", _videoWidth, _videoHeight, _frameRate);
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::setOutputVideo(const std::string& file) {
+ _outFile = file;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::openOutputVideo(int codec, double fps, unsigned width, unsigned height) {
+ cv::Size sz;
+ sz.width = width;
+ sz.height = height;
+ if (_outFile.empty()) {
+ if (_inFile.size() <= 4)
+ return NvFromAppErr(APP_ERR_OPEN);
+ _outFile.assign(_inFile, 0, _inFile.size() - 4);
+ _outFile += "_out.mp4";
+ }
+ _vidOut.open(_outFile, codec, fps, sz, true);
+ return _vidOut.isOpened() ? NVCV_SUCCESS : NvFromAppErr(APP_ERR_OPEN);
+}
+
+NvCV_Status App::stop() {
+#if _ENABLE_UI
+ if(FLAG_showUI)
+ ui_obj_.cleanup();
+#endif // _ENABLE_UI
+ if (_vidOut.isOpened()) _vidOut.release();
+ if (_vidIn.isOpened()) _vidIn.release();
+ if (_featureHan) NvAR_Destroy(_featureHan);
+ if (_renderer) _renderer->destroy();
+ _renderer = nullptr;
+ _featureHan = nullptr;
+ _inFile.clear();
+ _outFile.clear();
+ return NVCV_SUCCESS;
+}
+
+
+const char *App::getErrorStringFromCode(NvCV_Status err) {
+ int intErr = (int)err;
+ if (intErr > 0) {
+ struct LUTEntry { int code; const char *string; };
+ static const struct LUTEntry lut[] {
+ { APP_ERR_GENERAL, "General application error" },
+ { APP_ERR_READ, "Read error" },
+ { APP_ERR_WRITE, "Write error" },
+ { APP_ERR_INIT, "Initialization error" },
+ { APP_ERR_RUN, "Run error" },
+ { APP_ERR_EFFECT, "Error creating an effect" },
+ { APP_ERR_PARAM, "Parameter error" },
+ { APP_ERR_UNIMPLEMENTED, "Unimplemented" },
+ { APP_ERR_MISSING, "Something is missing" },
+ { APP_ERR_VIDEO, "Video error" },
+ { APP_ERR_IMAGE_SIZE, "Image size error" },
+ { APP_ERR_NOT_FOUND, "Not found" },
+ { APP_ERR_FACE_MODEL, "Face model error" },
+ { APP_ERR_GLFW_INIT, "Error initializing GLFW" },
+ { APP_ERR_GL_INIT, "Error initializing OpenGL" },
+ { APP_ERR_RENDER_INIT, "Error initializing the renderer" },
+ { APP_ERR_GL_RESOURCE, "OpenGL resource error" },
+ { APP_ERR_GL_GENERAL, "General OpenGL error" },
+ { APP_ERR_FACE_FIT, "Face fit error" },
+ { APP_ERR_NO_FACE, "No face was found" },
+ { APP_ERR_CANCEL, "The operation has been canceled" },
+ { APP_ERR_CAMERA, "Camera error" },
+ { APP_ERR_ARG_PARSE, "Argument parsing error" },
+ };
+ for (const LUTEntry *p = lut; p != &lut[sizeof(lut) / sizeof(lut[0])]; ++p)
+ if (intErr == p->code)
+ return p->string;
+ }
+ return NvCV_GetErrorStringFromCode(err);
+}
+
+char *g_nvARSDKPath = NULL;
+
+
+NvCV_Status ResizeNvCVImage(const NvCVImage *src, NvCVImage *dst) {
+ int interpolation = ((double)dst->width * dst->height / (src->width * src->height) < 1.) ?
+ CV_INTER_AREA : CV_INTER_LINEAR;
+ cv::Mat ocvSrc, ocvDst;
+ CVWrapperForNvCVImage(src, &ocvSrc);
+ CVWrapperForNvCVImage(dst, &ocvDst);
+ cv::resize(ocvSrc, ocvDst, ocvDst.size(), 0, 0, interpolation);
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status App::initFaceFit() {
+ unsigned modelLandmarks = 126, n;
+ NvCV_Status err;
+
+ // Initialize AR effect
+ if (_featureHan) {
+ if (EXPR_MODE_MESH == _exprMode) return NVCV_SUCCESS;
+ NvAR_Destroy(_featureHan);
+ _featureHan = nullptr;
+ }
+ BAIL_IF_ERR(err = NvAR_Create(NvAR_Feature_Face3DReconstruction, &_featureHan));
+ if (!FLAG_modelDir.empty())
+ BAIL_IF_ERR(err = NvAR_SetString(_featureHan, NvAR_Parameter_Config(ModelDir), FLAG_modelDir.c_str()));
+ if (!FLAG_fitModel.empty()) {
+ BAIL_IF_ERR(err = NvAR_SetString(_featureHan, NvAR_Parameter_Config(ModelName), FLAG_fitModel.c_str()));
+ if (FLAG_fitModel[FLAG_fitModel.size() - 5] == '0') // face model 0 has 68 landmarks, the others have 126
+ modelLandmarks = 68;
+ }
+ BAIL_IF_ERR(err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Landmarks_Size), modelLandmarks));
+ BAIL_IF_ERR(err = NvAR_SetCudaStream(_featureHan, NvAR_Parameter_Config(CUDAStream), _stream));
+ BAIL_IF_ERR(err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering));
+ BAIL_IF_ERR(err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(GazeMode), FLAG_gaze));
+ BAIL_IF_ERR(err = NvAR_Load(_featureHan));
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(ShapeEigenValueCount), &_eigenCount));
+ _eigenvalues.resize(_eigenCount);
+ _outputBboxData.assign(25, { 0.f, 0.f, 0.f, 0.f });
+ _outputBboxes.boxes = _outputBboxData.data();
+ _outputBboxes.max_boxes = (uint8_t)_outputBboxData.size();
+ _outputBboxes.num_boxes = 0;
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(BoundingBoxes), &_outputBboxes, sizeof(NvAR_BBoxes)));
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(Landmarks_Size), &_landmarkCount));
+ _landmarks.resize(_landmarkCount);
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(Landmarks), _landmarks.data(), sizeof(NvAR_Point2f)));
+ _landmarkConfidence.resize(_landmarkCount);
+ BAIL_IF_ERR(err = NvAR_SetF32Array(_featureHan, NvAR_Parameter_Output(LandmarksConfidence), _landmarkConfidence.data(), _landmarkCount));
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(ExpressionCount), &_exprCount));
+ _expressions.resize(_exprCount, 0.0f);
+ _expressionZeroPoint.resize(_exprCount, 0.0f);
+ _expressionScale.resize(_exprCount, 1.0f);
+ _expressionExponent.resize(_exprCount, 1.0f);
+ BAIL_IF_ERR(err = NvAR_SetF32Array(_featureHan, NvAR_Parameter_Output(ExpressionCoefficients), _expressions.data(), _exprCount));
+ BAIL_IF_ERR(err = NvAR_SetF32Array(_featureHan, NvAR_Parameter_Output(ShapeEigenValues), _eigenvalues.data(), _eigenCount));
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Input(Image), &_srcGpu, sizeof(NvCVImage)));
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(Pose), &_pose, sizeof(_pose.rotation)));
+ // The following are not used, but apparently required
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(VertexCount), &n));
+ _vertices.resize(_arMesh.num_vertices = n);
+ _arMesh.vertices = &_vertices[0];
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(TriangleCount), &n));
+ _triangles.resize(_arMesh.num_triangles = n);
+ _arMesh.tvi = &_triangles[0];
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(FaceMesh), &_arMesh, sizeof(_arMesh)));
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(RenderingParams), &_renderParams, sizeof(_renderParams)));
+ _exprMode = EXPR_MODE_MESH;
+bail:
+ return err;
+}
+
+NvCV_Status App::initMLPExpressions() {
+ const unsigned landmarkCount = 126;
+ NvCV_Status err;
+
+ // Initialize AR effect
+ if (_featureHan) {
+ if (EXPR_MODE_MLP == _exprMode) return NVCV_SUCCESS;
+ NvAR_Destroy(_featureHan);
+ _featureHan = nullptr;
+ }
+ BAIL_IF_ERR(err = NvAR_Create(NvAR_Feature_FaceExpressions, &_featureHan));
+ if (!FLAG_modelDir.empty())
+ BAIL_IF_ERR(err = NvAR_SetString(_featureHan, NvAR_Parameter_Config(ModelDir), FLAG_modelDir.c_str()));
+ BAIL_IF_ERR(err = NvAR_SetCudaStream(_featureHan, NvAR_Parameter_Config(CUDAStream), _stream));
+ BAIL_IF_ERR(err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering));
+ BAIL_IF_ERR(err = NvAR_Load(_featureHan));
+ _outputBboxData.assign(25, { 0.f, 0.f, 0.f, 0.f });
+ _outputBboxes.boxes = _outputBboxData.data();
+ _outputBboxes.max_boxes = (uint8_t)_outputBboxData.size();
+ _outputBboxes.num_boxes = 0;
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(BoundingBoxes), &_outputBboxes, sizeof(NvAR_BBoxes)));
+ _landmarks.resize(landmarkCount);
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(Landmarks), _landmarks.data(), sizeof(NvAR_Point2f)));
+ _landmarkConfidence.resize(landmarkCount);
+ BAIL_IF_ERR(err = NvAR_SetF32Array(_featureHan, NvAR_Parameter_Output(LandmarksConfidence), _landmarkConfidence.data(), landmarkCount));
+ BAIL_IF_ERR(err = NvAR_GetU32(_featureHan, NvAR_Parameter_Config(ExpressionCount), &_exprCount));
+ _expressions.resize(_exprCount);
+ _expressionZeroPoint.resize(_exprCount, 0.0f);
+ _expressionScale.resize(_exprCount, 1.0f);
+ _expressionExponent.resize(_exprCount, 1.0f);
+ BAIL_IF_ERR(err = NvAR_SetF32Array(_featureHan, NvAR_Parameter_Output(ExpressionCoefficients), _expressions.data(), _exprCount));
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Input(Image), &_srcGpu, sizeof(NvCVImage)));
+ BAIL_IF_ERR(err = NvAR_SetObject(_featureHan, NvAR_Parameter_Output(Pose), &_pose, sizeof(NvAR_Quaternion)));
+ _exprMode = EXPR_MODE_MLP;
+
+bail:
+ return err;
+}
+
+NvCV_Status App::calibrateExpressionWeights() {
+ assert(_expressions.size() == _exprCount);
+ assert(_expressionScale.size() == _exprCount);
+ assert(_expressionZeroPoint.size() == _exprCount);
+ _expressionZeroPoint = _expressions;
+ for (size_t i = 0; i < _exprCount; i++) {
+ _expressionScale[i] = 1.0f / (1.0f - _expressionZeroPoint[i]);
+ }
+ _performCalibration = false;
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::unCalibrateExpressionWeights() {
+ std::fill(_expressionZeroPoint.begin(), _expressionZeroPoint.end(), 0.0f);
+ std::fill(_expressionScale.begin(), _expressionScale.end(), 1.0f);
+ std::fill(_expressionExponent.begin(), _expressionExponent.end(), 1.0f);
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::normalizeExpressionsWeights() {
+ assert(_expressions.size() == _exprCount);
+ assert(_expressionScale.size() == _exprCount);
+ assert(_expressionZeroPoint.size() == _exprCount);
+ for (size_t i = 0; i < _exprCount; i++) {
+ float tempExpr = _expressions[i];
+ _expressions[i] = 1.0f - (std::pow(
+ 1.0f - (std::max(_expressions[i] - _expressionZeroPoint[i], 0.0f) * _expressionScale[i]),
+ _expressionExponent[i]));
+ _expressions[i] = _globalExpressionParam * _expressions[i] + (1.0f - _globalExpressionParam) * tempExpr;
+ }
+ return NVCV_SUCCESS;
+}
+
+NvCV_Status App::init() {
+ NvCV_Status err;
+ std::string path;
+ std::vector rendererList;
+
+ _renderHeight = 480;
+ _renderWidth = 480;
+ _miniHeight = _renderHeight;
+ _miniWidth = (unsigned)((float)_videoWidth * _renderHeight / (_videoHeight * 2) + 0.5f) * 2;
+ _compWidth = _miniWidth + _renderWidth;
+ _plotWidth = _compWidth;
+ _plotHeight = 72;
+ _compHeight = _renderHeight + _plotHeight;
+ _miniX = 0;
+ _miniY = 0;
+ _renderX = _miniWidth;
+ _renderY = 0;
+ _plotX = 0;
+ _plotY = _renderHeight;
+ _viewMode = FLAG_viewMode;
+ _showFPS = false;
+ _performCalibration = false;
+ _frameTime = 0.f;
+ _exprMode = 0;
+ _featureHan = nullptr;
+ _filtering = FLAG_filter;
+ _globalExpressionParam = 1.0f;
+
+ err = _broker.getMeshRendererList(rendererList);
+ if (NVCV_SUCCESS != err) {
+ printf("Cannot engage renderer broker: %s\n", NvCV_GetErrorStringFromCode(err));
+ return err;
+ }
+ if (FLAG_verbose) {
+ printf("Renderer list:\n");
+ for (const std::string& str : rendererList)
+ printf(" %s\n", str.c_str());
+ }
+ if (rendererList.size() < 1) {
+ printf("No renderers available to the broker\n");
+ return NVCV_ERR_FEATURENOTFOUND;
+ }
+ err = _broker.create(rendererList[0].c_str(), &_renderer);
+ if (NVCV_SUCCESS != err) {
+ printf("Cannot create the %s renderer\n", rendererList[0].c_str());
+ return NVCV_ERR_FEATURENOTFOUND;
+ }
+
+ if (!SetPathIfFileExists("", FLAG_renderModel, path) && !SetPathIfFileExists(FLAG_modelDir, FLAG_renderModel, path)) {
+ err = NVCV_ERR_FILE;
+ printf("Cannot find %s: %s\n", FLAG_renderModel.c_str(), NvCV_GetErrorStringFromCode(err));
+ return err;
+ }
+ err = _renderer->read(path.c_str());
+ if (NVCV_SUCCESS != err) {
+ printf("{\"%s\",\"%s\"}: %s\n", FLAG_modelDir.c_str(), FLAG_renderModel.c_str(), NvCV_GetErrorStringFromCode(err));
+ return err;
+ }
+ err = _renderer->init(_renderWidth, _renderHeight, _windowTitle);
+ if (NVCV_SUCCESS != err) {
+ printf("renderer init: %s\n", NvCV_GetErrorStringFromCode(err));
+ return err;
+ }
+ err = _renderer->setCamera(nullptr, nullptr, nullptr, (float)(FLAG_fov * D_RADIANS_PER_DEGREE));
+ if (NVCV_SUCCESS != err) {
+ printf("renderer setCamera: %s\n", NvCV_GetErrorStringFromCode(err));
+ return err;
+ }
+
+ BAIL_IF_ERR(err = NvCVImage_Alloc(&_srcGpu, _videoWidth, _videoHeight, NVCV_BGR, NVCV_U8, NVCV_CHUNKY, NVCV_GPU, 1));
+ BAIL_IF_ERR(err = NvCVImage_Alloc(&_compImg, _compWidth, _compHeight, NVCV_BGR, NVCV_U8, NVCV_CHUNKY, NVCV_CPU, 0));
+ BAIL_IF_ERR(err = NvCVImage_Alloc(&_renderImg, _renderWidth, _renderHeight, NVCV_RGBA, NVCV_U8, NVCV_CHUNKY, NVCV_CPU, 0));
+ CVWrapperForNvCVImage(&_compImg, &_ocvDstImg);
+ resizeDst();
+
+ if (!FLAG_outFile.empty() || !FLAG_show) {
+ err = App::openOutputVideo(StringToFourcc(FLAG_codec), _frameRate, _compWidth, _compHeight);
+ if (NVCV_SUCCESS != err) {
+ printf("ERROR: \"%s\": %s\n", _outFile.c_str(), getErrorStringFromCode(err));
+ goto bail;
+ }
+ }
+
+ // Initialize AR effect
+ switch (FLAG_exprMode) {
+ default:
+ printf("Unknown expression mode %u; using 1=mesh instead\n", FLAG_exprMode);
+ /* fall through */
+ case EXPR_MODE_MESH:
+ BAIL_IF_ERR(err = initFaceFit());
+ break;
+ case EXPR_MODE_MLP:
+ BAIL_IF_ERR(err = initMLPExpressions());
+ break;
+ }
+
+ if (FLAG_show) cv::namedWindow(_windowTitle, 1);
+
+#if _ENABLE_UI
+ if(FLAG_showUI)
+ ui_obj_.init(_exprCount, _filtering, FLAG_exprMode, _viewMode, _showFPS);
+#endif // _ENABLE_UI
+
+bail:
+ return err;
+}
+
+
+NvCV_Status App::overlayLandmarks(const float landmarks[126 * 2], unsigned screenHeight, NvCVImage* im) {
+ cv::Mat frame;
+ CVWrapperForNvCVImage(im, &frame);
+ float scale = (float)sqrtf((float)frame.rows / screenHeight);
+ if (scale < 1.f) scale = 1.f;
+ int size = int(5 * scale), thickness = int(scale);
+ const float* p = landmarks;
+ for (unsigned i = 126; i--; p += 2)
+ cv::drawMarker(frame, { (int)lround(p[0]), (int)lround(p[1]) }, CV_RGB(0, 255, 0), cv::MARKER_CROSS, size, thickness, 8);
+ return NVCV_SUCCESS;
+}
+
+
+NvCV_Status App::run() {
+ NvCV_Status err = NVCV_SUCCESS;
+ NvCVImage tmpImg, view;
+
+ for (unsigned frameCount = 0;; ++frameCount) {
+ if (!_vidIn.read(_ocvSrcImg) || _ocvSrcImg.empty()) {
+ if (!frameCount) return NvFromAppErr(APP_ERR_VIDEO); // No frames in video
+ if (!FLAG_loop) return NvFromAppErr(APP_ERR_EOF); // Video has completed
+ _vidIn.set(CV_CAP_PROP_POS_FRAMES, 0); // Rewind, because loop mode has been selected
+ --frameCount; // Account for the wasted frame
+ continue; // Read the first frame again
+ }
+ NVWrapperForCVMat(&_ocvSrcImg, &_srcImg); // We probably don't need to do this every frame
+
+#ifdef _ENABLE_UI
+ unsigned int exprMode = 0;
+ bool uncalibrate = false;
+ bool calibrate = false;
+ unsigned int filter = 0;
+ unsigned int viewMode = 0;
+ bool killApp = false;
+ if (FLAG_showUI) {
+ ui_obj_.stateQuerybyCore(viewMode, exprMode, filter, calibrate, uncalibrate, _showFPS, _globalExpressionParam, _expressionZeroPoint, _expressionScale, _expressionExponent, killApp);
+
+ if (killApp == true) {
+ return NvFromAppErr(APP_ERR_CANCEL);
+ }
+ _performCalibration = calibrate;
+
+ if (viewMode != _viewMode) {
+ _viewMode = viewMode;
+ resizeDst();
+ }
+ if (uncalibrate) {
+ unCalibrateExpressionWeights();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_FACE_BOX) {
+ toggleFaceBoxFiltering();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_FACIAL_LANDMARKS) {
+ toggleLandmarkFiltering();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_FACE_ROTATIONAL_POSE) {
+ togglePoseFiltering();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_FACIAL_EXPRESSIONS) {
+ toggleExpressionFiltering();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_FACIAL_GAZE) {
+ toggleGazeFiltering();
+ }
+ if ((filter ^ _filtering) & NVAR_TEMPORAL_FILTER_ENHANCE_EXPRESSIONS) {
+ toggleClosureEnhancement();
+ }
+ _filtering = filter;
+
+ if (_exprMode != exprMode) {
+ if (exprMode == EXPR_MODE_MESH) {
+ initFaceFit();
+ }
+ else if (exprMode == EXPR_MODE_MLP) {
+ initMLPExpressions();
+ }
+ else {
+ // add more modes if needed
+ }
+ }
+ }
+#endif // _ENABLE_UI
+
+ BAIL_IF_ERR(err = NvCVImage_Transfer(&_srcImg, &_srcGpu, 1.f, _stream, nullptr));
+ BAIL_IF_ERR(err = NvAR_Run(_featureHan));
+ unsigned isFaceDetected = (_outputBboxes.num_boxes > 0) ? 0xFF : 0;
+ if (_performCalibration) {
+ calibrateExpressionWeights();
+ }
+ normalizeExpressionsWeights();
+ if (_viewMode & VIEW_LM & isFaceDetected) {
+ BAIL_IF_ERR(err = overlayLandmarks(&_landmarks.data()->x, _renderHeight, &_srcImg));
+ }
+ if (_viewMode & VIEW_IMAGE) {
+ NvCVImage_InitView(&view, &_compImg, _miniX, _miniY, _miniWidth, _miniHeight);
+ err = ResizeNvCVImage(&_srcImg, &view);
+ }
+ if (_viewMode & VIEW_PLOT) {
+ if (!isFaceDetected) std::fill(_expressions.begin(), _expressions.end(), 0);
+ barPlotExprs();
+ }
+ if (_viewMode & VIEW_MESH) {
+ if (isFaceDetected) {
+ BAIL_IF_ERR(err = _renderer->render(_expressions.data(), &_pose.rotation.x, nullptr, &_renderImg)); // GL _renderImg is upside down
+ NvCVImage_InitView(&view, &_compImg, _renderX, _renderY, _renderWidth, _renderHeight);
+ NvCVImage_FlipY(&view, &view); // Since OpenGL renderImg is upside-down, we copy it to a flipped dst
+ NvCVImage_Transfer(&_renderImg, &view, 1.0f, _stream, nullptr); // VFlip RGBA --> BGR
+ }
+ else {
+ cv::Mat compImgCVMat;
+ CVWrapperForNvCVImage(&_compImg, &compImgCVMat);
+ cv::rectangle(compImgCVMat, cv::Rect(_renderX, _renderY, _renderWidth, _renderHeight), cv::Scalar(0, 0, 0), -1);
+ }
+ }
+ if (_vidOut.isOpened())
+ _vidOut.write(_ocvDstImg);
+ drawFPS(_ocvDstImg);
+ if (FLAG_show && _ocvDstImg.cols && _ocvDstImg.rows) {
+ cv::imshow(_windowTitle, _ocvDstImg);
+ }
+
+ int key = cv::waitKey(1);
+ if (key >= 0 && FLAG_debug)
+ printf("Key press '%c' (%02x)\n", ((0x20 <= key && key <= 0x7f) ? key : '#'), key);
+#ifdef _ENABLE_UI
+ if (FLAG_showUI){
+ ui_obj_.stateSetbyCore(_expressions, _expressionZeroPoint, _expressionScale, _expressionExponent, (uncalibrate || calibrate), key);
+ }
+#endif // _ENABLE_UI
+ if (!FLAG_showUI) {
+ switch (key) {
+ case 27 /*ESC*/:
+ case 'q': case 'Q': return NvFromAppErr(APP_ERR_CANCEL); // Quit
+ case 'i': _viewMode ^= VIEW_IMAGE; resizeDst(); break;
+ case 'l': _viewMode ^= VIEW_LM; resizeDst(); break;
+ case 'm': _viewMode ^= VIEW_MESH; resizeDst(); break;
+ case 'n': _performCalibration = true; break;
+ case 'p': _viewMode ^= VIEW_PLOT; resizeDst(); break;
+ case 'f': _showFPS = !_showFPS; break;
+ case '1': initFaceFit(); break;
+ case '2': initMLPExpressions(); break;
+ case 'L': case CTL('L'): toggleLandmarkFiltering(); break;
+ case 'N': case CTL('N'): unCalibrateExpressionWeights(); break;
+ case 'P': case CTL('P'): togglePoseFiltering(); break;
+ case 'E': case CTL('E'): toggleExpressionFiltering(); break;
+ case 'G': case CTL('G'): toggleGazeFiltering(); break;
+ case 'C': case CTL('C'): toggleClosureEnhancement(); break;
+ default:
+ if (key < 0) continue; // No key
+ break; // Non-mapped key
+ }
+ }
+ }
+bail:
+ return err;
+}
+
+NvCV_Status App::resizeDst() {
+ NvCV_Status err = NVCV_SUCCESS;
+ unsigned width = 0, height = 0;
+ if (_viewMode & VIEW_IMAGE) {
+ width += _miniWidth;
+ if (height < _miniHeight)
+ height = _miniHeight;
+ _miniX = 0;
+ _miniY = 0;
+ }
+ if (_viewMode & VIEW_MESH) {
+ width += _renderWidth;
+ if (height < _renderHeight)
+ height = _renderHeight;
+ _renderY = 0;
+ _renderX = (_viewMode & VIEW_IMAGE) ? _miniWidth : 0;
+ }
+ if (_viewMode & VIEW_PLOT) {
+ _plotX = 0;
+ _plotY = height;
+ _plotWidth = width;
+ height += _plotHeight;
+ }
+ BAIL_IF_ERR(err = NvCVImage_Realloc(&_compImg, width, height, _compImg.pixelFormat, _compImg.componentType,
+ NVCV_CHUNKY, NVCV_CPU, 0));
+ memset(_compImg.deletePtr, 0, _compImg.bufferBytes);
+ CVWrapperForNvCVImage(&_compImg, &_ocvDstImg);
+ cv::resizeWindow(_windowTitle, width, height);
+
+bail:
+ return err;
+}
+
+void App::barPlotExprs() {
+ int barWidth = (int)(_plotWidth / _expressions.size()),
+ barHeight = (int)_plotHeight;
+ cv::Scalar fgColor, bgColor, txColor;
+ cv::Rect r;
+ cv::Point pt;
+ std::string str;
+ const char *(*exprAbbr)[4] = (_expressions.size() > 6) ? _exprAbbr : _sfmExprAbbr;
+
+ bgColor = { 0, 0, 0, 255 };
+ r = { _plotX, _plotY, (int)_plotWidth, (int)_plotHeight };
+ cv::rectangle(_ocvDstImg, r, bgColor, cv::FILLED, cv::LINE_4, 0);
+ bgColor = { 32, 32, 32, 255 };
+ fgColor = { 0, 255, 0, 255 };
+ txColor = CV_RGB(255, 255, 0); //{ 255, 255, 0, 255 };
+ r.width = barWidth - 1;
+ for (unsigned i = unsigned(_expressions.size()); i--;) {
+ r.x = _plotX + i * barWidth;
+ r.y = _plotY;
+ r.height = _plotHeight;
+ cv::rectangle(_ocvDstImg, r, bgColor, cv::FILLED, cv::LINE_4, 0);
+ r.height = (int)(_expressions[i] * barHeight + 0.5f);
+ r.y = _plotY + _plotHeight - r.height;
+ cv::rectangle(_ocvDstImg, r, fgColor, cv::FILLED, cv::LINE_4, 0);
+ cv::putText(_ocvDstImg, std::to_string(i), cv::Point(r.x + 1, _plotY + 10),
+ cv::FONT_HERSHEY_SIMPLEX, 0.25, txColor, 1, cv::LINE_8, false);
+ for (unsigned j = 0; j < 4; ++j)
+ if (exprAbbr[i][j])
+ cv::putText(_ocvDstImg, exprAbbr[i][j], cv::Point(r.x + 1, _plotY + 20 + 10 * j),
+ cv::FONT_HERSHEY_SIMPLEX, 0.25, txColor, 1, cv::LINE_8, false);
+ }
+}
+
+void App::getFPS() {
+ const float timeConstant = 16.f;
+ _timer.stop();
+ float t = (float)_timer.elapsedTimeFloat();
+ if (t < 100.f) {
+ if (_frameTime)
+ _frameTime += (t - _frameTime) * (1.f / timeConstant); // 1 pole IIR filter
+ else
+ _frameTime = t;
+ }
+ else { // Ludicrous time interval; reset
+ _frameTime = 0.f; // WAKE UP
+ }
+ _timer.start();
+}
+
+void App::drawFPS(cv::Mat& img) {
+ getFPS();
+ if (_frameTime && _showFPS) {
+ char buf[32];
+ snprintf(buf, sizeof(buf), "%.1f", 1. / _frameTime);
+ cv::putText(img, buf, cv::Point(img.cols - 80, 30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255, 255, 255), 1);
+ }
+}
+
+NvCV_Status App::toggleFaceBoxFiltering() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_FACE_BOX;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("FaceBox Filtering %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_FACE_BOX) ? "ON" : "OFF"));
+ return err;
+}
+
+NvCV_Status App::toggleLandmarkFiltering() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_FACIAL_LANDMARKS;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("Landmark Filtering %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_FACIAL_LANDMARKS) ? "ON" : "OFF"));
+ return err;
+}
+
+NvCV_Status App::togglePoseFiltering() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_FACE_ROTATIONAL_POSE;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("Pose Filtering %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_FACE_ROTATIONAL_POSE) ? "ON" : "OFF"));
+ return err;
+}
+
+NvCV_Status App::toggleExpressionFiltering() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_FACIAL_EXPRESSIONS;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("Expression Filtering %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_FACIAL_EXPRESSIONS) ? "ON" : "OFF"));
+ return err;
+}
+
+NvCV_Status App::toggleGazeFiltering() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_FACIAL_GAZE;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("Gaze Filtering %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_FACIAL_GAZE) ? "ON" : "OFF"));
+ return err;
+}
+
+NvCV_Status App::toggleClosureEnhancement() {
+ _filtering ^= NVAR_TEMPORAL_FILTER_ENHANCE_EXPRESSIONS;
+ NvCV_Status err = NvAR_SetU32(_featureHan, NvAR_Parameter_Config(Temporal), _filtering);
+ if (NVCV_SUCCESS == err)
+ err = NvAR_Load(_featureHan);
+ if (FLAG_verbose)
+ printf("Closure Enhancement %s\n", ((_filtering & NVAR_TEMPORAL_FILTER_ENHANCE_EXPRESSIONS) ? "ON" : "OFF"));
+ return err;
+}
+
+
+/********************************************************************************
+ * main
+ ********************************************************************************/
+
+int main(int argc, char **argv) {
+ NvCV_Status err = NVCV_SUCCESS;
+ App app;
+ int nErrs;
+
+ if (0 != (nErrs = ParseMyArgs(argc, argv))) {
+ if (HELP_REQUESTED == nErrs) // If it was a call for help ...
+ BAIL(err, NVCV_SUCCESS); // ... just exit quietly
+ printf("ERROR: argument syntax\n");
+ BAIL(err, NVCV_ERR_PARSE);
+ }
+
+ if (FLAG_fitModel.empty())
+ FLAG_fitModel = DEFAULT_FACE_MODEL;
+ if (FLAG_renderModel.empty())
+ FLAG_renderModel = DEFAULT_RENDER_MODEL;
+ if (FLAG_modelDir.empty()) {
+ do {
+ if (SetDirIfFileExists("../data", FLAG_renderModel, FLAG_modelDir)) break;
+ if (SetDirIfFileExists("../../data", FLAG_renderModel, FLAG_modelDir)) break;
+ if (SetDirIfFileExists("../../../data", FLAG_renderModel, FLAG_modelDir)) break;
+ if (SetDirIfFileExists("data", FLAG_renderModel, FLAG_modelDir)) break;
+ printf("Please set --model_dir=/path/to/models\n");
+ BAIL(err, NVCV_ERR_MISSINGINPUT);
+ } while (0);
+ }
+
+
+ if (!FLAG_inFile.empty()) { // Input from a video file
+ err = app.setInputVideo(FLAG_inFile);
+ if (NVCV_SUCCESS != err) {
+ printf("ERROR: \"%s\": %s\n", FLAG_inFile.c_str(), app.getErrorStringFromCode(err));
+ goto bail;
+ }
+ } else { // Input from a webcam, #0
+ err = app.setInputCamera(0, FLAG_camRes);
+ if (NVCV_SUCCESS != err) {
+ printf("ERROR: cam0: %s\n", app.getErrorStringFromCode(err));
+ goto bail;
+ }
+ }
+ if (!FLAG_outFile.empty()) { // Output to a file
+ err = app.setOutputVideo(FLAG_outFile);
+ if (NVCV_SUCCESS != err) {
+ printf("ERROR: \"%s\": %s\n", FLAG_outFile.c_str(), app.getErrorStringFromCode(err));
+ goto bail;
+ }
+ }
+ else if (!FLAG_show) { // Without choosing to show or write to an output file, it would be wasting energy
+ printf("WARNING: Setting --show since neither --show nor --out were supplied\n");
+ FLAG_show = true;
+ }
+ if (FLAG_verbose) printf("Enabled filters = %x\n", FLAG_filter);
+ if (FLAG_verbose) printf("Enabled cnn gaze = %x\n", FLAG_gaze);
+ err = app.init();
+ BAIL_IF_ERR(err);
+
+ err = app.run();
+ switch ((int)err) {
+ case App::APP_ERR_CANCEL: // The user stopped
+ case App::APP_ERR_EOF: // The end of the file was reached.
+ err = NVCV_SUCCESS;
+ break;
+ default:
+ break;
+ }
+ BAIL_IF_ERR(err);
+
+bail:
+ if (err)
+ printf("ERROR: %s\n", app.getErrorStringFromCode(err));
+ return (int)err;
+}
diff --git a/samples/ExpressionApp/ExpressionApp.exe b/samples/ExpressionApp/ExpressionApp.exe
new file mode 100644
index 0000000..9ab42f4
Binary files /dev/null and b/samples/ExpressionApp/ExpressionApp.exe differ
diff --git a/samples/ExpressionApp/ExpressionAppUI.cpp b/samples/ExpressionApp/ExpressionAppUI.cpp
new file mode 100644
index 0000000..50a8756
--- /dev/null
+++ b/samples/ExpressionApp/ExpressionAppUI.cpp
@@ -0,0 +1,711 @@
+/*###############################################################################
+#
+# Copyright(c) 2020 NVIDIA CORPORATION.All Rights Reserved.
+#
+# NVIDIA CORPORATION and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto.Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION is strictly prohibited.
+#
+###############################################################################*/
+
+#if _ENABLE_UI
+
+#include