diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644
index 0000000..5aea672
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,6 @@
+Changelog (v0.7.6.2)
+ - 3D Body Pose Keypoint Tracking
+ - Quality improvement in accuracy and stability
+ - Property NvAR_Parameter_Config(NVAR_MODE) is now NvAR_Parameter_Config(Mode)
+ - Migrated to TensorRT 8.0.1.6
+ - Migrated to CUDA 11.3u1
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b232a98..154af54 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.12.0)
+cmake_minimum_required(VERSION 3.10.0)
# Set path where samples will be installed
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR} CACHE PATH "Path to where the samples will be installed")
@@ -8,16 +8,13 @@ project(NvAR_SDK CXX)
set(CMAKE_CONFIGURATION_TYPES "Release")
# Require C++11 and disable non-standard extensions
+
+set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
+
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
-add_definitions(-DNOMINMAX -DWIN32_LEAN_AND_MEAN)
-
-# Set common build path for all targets
-set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
-set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
-
set(SDK_INCLUDES_PATH ${CMAKE_CURRENT_SOURCE_DIR}/nvar/include)
# Add target for nvARPose
@@ -27,10 +24,13 @@ target_include_directories(nvARPose INTERFACE ${SDK_INCLUDES_PATH})
# Add target for NVCVImage
add_library(NVCVImage INTERFACE)
target_include_directories(NVCVImage INTERFACE ${SDK_INCLUDES_PATH})
-if(UNIX)
- target_link_libraries(NVCVImage INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/bin/libNVCVImage.so)
-endif(UNIX)
+
+add_definitions(-DNOMINMAX -DWIN32_LEAN_AND_MEAN)
+
+# Set common build path for all targets
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+
set(ENABLE_SAMPLES TRUE)
add_subdirectory(samples)
-
diff --git a/README.MD b/README.MD
index 740cc0b..141c330 100644
--- a/README.MD
+++ b/README.MD
@@ -18,7 +18,7 @@ The SDK has the following features:
-
+
The SDK provides two sample applications that demonstrate the features listed above in real time by using a webcam or offline videos.
@@ -28,17 +28,19 @@ The SDK provides two sample applications that demonstrate the features listed ab
NVIDIA MAXINE AR SDK is distributed in the following parts:
- This open source repository that includes the [SDK API and proxy linking source code](https://github.com/NVIDIA/MAXINE-AR-SDK/tree/master/nvar), and [sample applications and their dependency libraries](https://github.com/NVIDIA/MAXINE-AR-SDK/tree/master/samples).
-- An installer hosted on [NVIDIA Maxine developer page](https://www.nvidia.com/broadcast-sdk-resources) that installs the SDK DLLs, the models, and the SDK dependency libraries.
+- An installer hosted on [NVIDIA Maxine End-user Redistributables page](https://www.nvidia.com/broadcast-sdk-resources) that installs the SDK DLLs, the models, and the SDK dependency libraries.
-Please refer to [SDK programming guide](https://github.com/NVIDIA/MAXINE-AR-SDK/blob/master/docs/NVIDIA%20AR%20SDK%20Programming%20Guide.pdf) for configuring and integrating the SDK, compiling and running the sample applications. Please visit the [NVIDIA MAXINE AR SDK](https://developer.nvidia.com/maxine-getting-started) webpage for more information about the SDK.
+Please refer to [SDK System guide](https://docs.nvidia.com/deeplearning/maxine/ar-sdk-system-guide/index.html) for configuring and integrating the SDK, compiling and running the sample applications. Please visit the [NVIDIA MAXINE AR SDK](https://developer.nvidia.com/maxine-getting-started) webpage for more information about the SDK.
## System requirements
The SDK is supported on NVIDIA GPUs that are based on the NVIDIA® Turing™ or Ampere™ architecture and have Tensor Cores.
-* Windows OS supported: 64-bit Windows 10
-* Microsoft Visual Studio: 2015 (MSVC14.0) or later
+* Windows OS supported: 64-bit Windows 10 or later
+* Microsoft Visual Studio: 2017 (MSVC15.0) or later
* CMake: v3.12 or later
-* NVIDIA Graphics Driver for Windows: 455.57 or later
+* NVIDIA Graphics Driver for Windows: 465.89 or later
+* NVIDIA CUDA Toolkit: 11.3.1
+* NVIDIA TensorRT: 8.0.1.6
## NVIDIA MAXINE Branding Guidelines
If you integrate an NVIDIA MAXINE SDK within your product, please follow the required branding guidelines that are available [here](
@@ -66,3 +68,15 @@ The open source repository includes the source code to build the sample applicat
* In CMake, to open Visual Studio, click Open Project.
* In Visual Studio, select Build > Build Solution.
+## Documentation
+
+Please refer to the online documentation guides -
+* [NVIDIA AR SDK Programming Guide](https://docs.nvidia.com/deeplearning/maxine/ar-sdk-programming-guide/index.html)
+* [NVIDIA AR SDK System Guide](https://docs.nvidia.com/deeplearning/maxine/ar-sdk-system-guide/index.html)
+* [NvCVImage API Guide](https://docs.nvidia.com/deeplearning/maxine/nvcvimage-api-guide/index.html)
+
+PDF versions of these guides are also available at the following locations -
+* [NVIDIA AR SDK Programming Guide](https://docs.nvidia.com/deeplearning/maxine/pdf/ar-sdk-programming-guide.pdf)
+* [NVIDIA AR SDK System Guide](https://docs.nvidia.com/deeplearning/maxine/pdf/ar-sdk-system-guide.pdf)
+* [NvCVImage API Guide](https://docs.nvidia.com/deeplearning/maxine/pdf/nvcvimage-api-guide.pdf)
+
diff --git a/docs/NVIDIA AR SDK Programming Guide.pdf b/docs/NVIDIA AR SDK Programming Guide.pdf
deleted file mode 100644
index 88073a4..0000000
Binary files a/docs/NVIDIA AR SDK Programming Guide.pdf and /dev/null differ
diff --git a/nvar/include/nvAR_defs.h b/nvar/include/nvAR_defs.h
index e7eb59f..769e9be 100644
--- a/nvar/include/nvAR_defs.h
+++ b/nvar/include/nvAR_defs.h
@@ -38,7 +38,6 @@
#define NvAR_API
#endif // OS dependencies
-// TODO: Change the representation to x,y,z instead of array
typedef struct NvAR_Vector3f
{
float vec[3];
@@ -96,7 +95,8 @@ typedef struct NvAR_RenderingParams {
// Parameters provided by client application
typedef const char* NvAR_FeatureID;
-#define NvAR_Feature_FaceDetection "FaceDetection"
+#define NvAR_Feature_FaceBoxDetection "FaceBoxDetection"
+#define NvAR_Feature_FaceDetection "FaceDetection" // deprecated in favor of FaceBox
#define NvAR_Feature_LandmarkDetection "LandmarkDetection"
#define NvAR_Feature_Face3DReconstruction "Face3DReconstruction"
#define NvAR_Feature_BodyDetection "BodyDetection"
@@ -174,6 +174,45 @@ NvAR_Parameter_Output(Pose) - OPTIONAL
NvAR_Parameter_Output(LandmarksConfidence) - OPTIONAL
NvAR_Parameter_Output(ExpressionCoefficients) - OPTIONAL
NvAR_Parameter_Output(ShapeEigenValues) - OPTIONAL
+
+*******NvAR_Feature_BodyDetection*******
+Config:
+NvAR_Parameter_Config(FeatureDescription)
+NvAR_Parameter_Config(CUDAStream)
+NvAR_Parameter_Config(TRTModelDir)
+NvAR_Parameter_Config(Temporal)
+
+Input:
+NvAR_Parameter_Input(Image)
+
+Output:
+NvAR_Parameter_Output(BoundingBoxes)
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
+
+*******NvAR_Feature_BodyPoseEstimation*******
+Config:
+NvAR_Parameter_Config(FeatureDescription)
+NvAR_Parameter_Config(CUDAStream)
+NvAR_Parameter_Config(ModelDir)
+NvAR_Parameter_Config(BatchSize)
+NvAR_Parameter_Config(Mode)
+NvAR_Parameter_Config(NumKeyPoints)
+NvAR_Parameter_Config(ReferencePose)
+NvAR_Parameter_Config(Temporal)
+NvAR_Parameter_Config(UseCudaGraph)
+NvAR_Parameter_Config(FocalLength)
+
+Input:
+NvAR_Parameter_Input(Image)
+NvAR_Parameter_Input(BoundingBoxes) - OPTIONAL
+
+Output:
+NvAR_Parameter_Output(BoundingBoxes) - OPTIONAL
+NvAR_Parameter_Output(BoundingBoxesConfidence) - OPTIONAL
+NvAR_Parameter_Output(KeyPoints)
+NvAR_Parameter_Output(KeyPoints3D)
+NvAR_Parameter_Output(JointAngles)
+NvAR_Parameter_Output(KeyPointsConfidence) - OPTIONAL
*/
diff --git a/nvar/include/nvCVImage.h b/nvar/include/nvCVImage.h
index c5ed9ed..014cd94 100644
--- a/nvar/include/nvCVImage.h
+++ b/nvar/include/nvCVImage.h
@@ -204,21 +204,24 @@ NvCVImage {
//! \param[in] dstY The top coordinate of the dst rectangle.
//! \param[in] width The width of the rectangle to be copied, in pixels.
//! \param[in] height The height of the rectangle to be copied, in pixels.
+ //! \param[in] stream the CUDA stream.
//! \note NvCVImage_Transfer() can handle more cases.
//! \return NVCV_SUCCESS if successful
//! \return NVCV_ERR_MISMATCH if the formats are different
//! \return NVCV_ERR_CUDA if a CUDA error occurred
//! \return NVCV_ERR_PIXELFORMAT if the pixel format is not yet accommodated.
- inline NvCV_Status copyFrom(const NvCVImage *src, int srcX, int srcY, int dstX, int dstY, unsigned width, unsigned height);
+ inline NvCV_Status copyFrom(const NvCVImage *src, int srcX, int srcY, int dstX, int dstY,
+ unsigned width, unsigned height, struct CUstream_st* stream = 0);
//! Copy from one image to another. This works for CPU->CPU, CPU->GPU, GPU->GPU, and GPU->CPU.
//! \param[in] src The source image from which to copy.
+ //! \param[in] stream the CUDA stream.
//! \note NvCVImage_Transfer() can handle more cases.
//! \return NVCV_SUCCESS if successful
//! \return NVCV_ERR_MISMATCH if the formats are different
//! \return NVCV_ERR_CUDA if a CUDA error occurred
//! \return NVCV_ERR_PIXELFORMAT if the pixel format is not yet accommodated.
- inline NvCV_Status copyFrom(const NvCVImage *src);
+ inline NvCV_Status copyFrom(const NvCVImage *src, struct CUstream_st* stream = 0);
#endif // ___cplusplus
} NvCVImage;
@@ -466,6 +469,8 @@ NvCV_Status NvCV_API NvCVImage_TransferRect(
//! \param[in] tmp a staging image.
//! \return NVCV_SUCCESS if the operation was completed successfully.
//! \note The actual transfer region may be smaller, because the rects are clipped against the images.
+//! \note This is supplied for use with YUV buffers that do not have the standard structure
+//! that are expected for NvCVImage_Transfer() and NvCVImage_TransferRect.
NvCV_Status NvCV_API NvCVImage_TransferFromYUV(
const void *y, int yPixBytes, int yPitch,
const void *u, const void *v, int uvPixBytes, int uvPitch,
@@ -492,6 +497,8 @@ NvCV_Status NvCV_API NvCVImage_TransferFromYUV(
//! \param[in] tmp a staging image.
//! \return NVCV_SUCCESS if the operation was completed successfully.
//! \note The actual transfer region may be smaller, because the rects are clipped against the images.
+//! \note This is supplied for use with YUV buffers that do not have the standard structure
+//! that are expected for NvCVImage_Transfer() and NvCVImage_TransferRect.
NvCV_Status NvCV_API NvCVImage_TransferToYUV(
const NvCVImage *src, const NvCVRect2i *srcRect,
const void *y, int yPixBytes, int yPitch,
@@ -507,7 +514,9 @@ NvCV_Status NvCV_API NvCVImage_TransferToYUV(
//! \param[in,out] im the image to be mapped.
//! \param[in] stream the stream on which the mapping is to be performed.
//! \return NVCV_SUCCESS is the operation was completed successfully.
-NvCV_Status NvCV_API NvCVImage_MapResource(NvCVImage *im, struct CUstream_st *stream);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com,
+//! otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_MapResource(NvCVImage *im, struct CUstream_st *stream);
//! After transfer by CUDA, the texture resource must be unmapped in order to be used by the graphics system again.
@@ -516,7 +525,9 @@ NvCV_Status NvCV_API NvCVImage_MapResource(NvCVImage *im, struct CUstream_st *st
//! \param[in,out] im the image to be mapped.
//! \param[in] stream the CUDA stream on which the mapping is to be performed.
//! \return NVCV_SUCCESS is the operation was completed successfully.
-NvCV_Status NvCV_API NvCVImage_UnmapResource(NvCVImage *im, struct CUstream_st *stream);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com,
+//! otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_UnmapResource(NvCVImage *im, struct CUstream_st *stream);
//! Composite one source image over another using the given matte.
@@ -530,6 +541,7 @@ NvCV_Status NvCV_API NvCVImage_UnmapResource(NvCVImage *im, struct CUstream_st *
//! \return NVCV_ERR_PIXELFORMAT if the pixel format is not accommodated.
//! \return NVCV_ERR_MISMATCH if either the fg & bg & dst formats do not match, or if fg & bg & dst & mat are not
//! in the same address space (CPU or GPU).
+//! \bug Though RGBA destinations are accommodated, the A channel is not updated at all.
#if RTX_CAMERA_IMAGE == 0
NvCV_Status NvCV_API NvCVImage_Composite(const NvCVImage *fg, const NvCVImage *bg, const NvCVImage *mat, NvCVImage *dst,
struct CUstream_st *stream);
@@ -537,8 +549,9 @@ NvCV_Status NvCV_API NvCVImage_Composite(const NvCVImage *fg, const NvCVImage *b
NvCV_Status NvCV_API NvCVImage_Composite(const NvCVImage *fg, const NvCVImage *bg, const NvCVImage *mat, NvCVImage *dst);
#endif // RTX_CAMERA_IMAGE == 1
+
//! Composite one source image over another using the given matte.
-//! Not all pixel format combinations are accommodated.
+//! This accommodates all RGB and RGBA formats, with u8 and f32 components.
//! \param[in] fg the foreground source image.
//! \param[in] fgOrg the upper-left corner of the fg image to be composited (NULL implies (0,0)).
//! \param[in] bg the background source image.
@@ -566,17 +579,27 @@ NvCV_Status NvCV_API NvCVImage_CompositeRect(
NvCVImage *dst, const NvCVPoint2i *dstOrg,
struct CUstream_st *stream);
-//! Composite a BGRu8 source image over a constant color field using the given matte.
-//! \param[in] src the source BGRu8 (or RGBu8) image.
-//! \param[in] mat the matte Yu8 (or Au8) image, indicating where the src should come through.
-//! \param[in] bgColor the desired flat background color, with the same component ordering as the src and dst.
-//! \param[in,out] dst the destination BGRu8 (or RGBu8) image. May be the same as src.
+
+//! Composite a source image over a constant color field using the given matte.
+//! \param[in] src the source image.
+//! \param[in] mat the matte image, indicating where the src should come through.
+//! \param[in] bgColor pointer to a location holding the desired flat background color, with the same format
+//! and component ordering as the dst. This acts as a 1x1 background pixel buffer,
+//! so should reside in the same memory space (CUDA or CPU) as the other buffers.
+//! \param[in,out] dst the destination image. May be the same as src.
//! \return NVCV_SUCCESS if the operation was successful.
//! \return NVCV_ERR_PIXELFORMAT if the pixel format is not accommodated.
-//! \bug This is only implemented for 3-component u8 src and dst, and 1-component mat,
-//! where all images are resident on the CPU.
+//! \return NVCV_ERR_MISMATCH if fg & mat & dst & bgColor are not in the same address space (CPU or GPU).
+//! \note The bgColor must remain valid until complete; this is an important consideration especially if
+//! the buffers are on the GPU and NvCVImage_CompositeOverConstant() runs asynchronously.
+//! \bug Though RGBA destinations are accommodated, the A channel is not updated at all.
NvCV_Status NvCV_API NvCVImage_CompositeOverConstant(
- const NvCVImage *src, const NvCVImage *mat, const unsigned char bgColor[3], NvCVImage *dst);
+#if RTX_CAMERA_IMAGE == 0
+ const NvCVImage *src, const NvCVImage *mat, const void *bgColor, NvCVImage *dst, struct CUstream_st *stream
+#else // RTX_CAMERA_IMAGE == 1
+ const NvCVImage *src, const NvCVImage *mat, const unsigned char bgColor[3], NvCVImage *dst
+#endif // RTX_CAMERA_IMAGE
+);
//! Flip the image vertically.
@@ -649,16 +672,16 @@ NvCVImage::~NvCVImage() { NvCVImage_Dealloc(this); }
********************************************************************************/
NvCV_Status NvCVImage::copyFrom(const NvCVImage *src, int srcX, int srcY, int dstX, int dstY, unsigned wd,
- unsigned ht) {
+ unsigned ht, struct CUstream_st* stream) {
#if RTX_CAMERA_IMAGE // This only works for chunky images
NvCVImage srcView, dstView;
NvCVImage_InitView(&srcView, const_cast(src), srcX, srcY, wd, ht);
NvCVImage_InitView(&dstView, this, dstX, dstY, wd, ht);
- return NvCVImage_Transfer(&srcView, &dstView, 1.f, 0, nullptr);
+ return NvCVImage_Transfer(&srcView, &dstView, 1.f, stream, nullptr);
#else // !RTX_CAMERA_IMAGE bug fix for non-chunky images
NvCVRect2i srcRect = { (int)srcX, (int)srcY, (int)wd, (int)ht };
NvCVPoint2i dstPt = { (int)dstX, (int)dstY };
- return NvCVImage_TransferRect(src, &srcRect, this, &dstPt, 1.f, 0, nullptr);
+ return NvCVImage_TransferRect(src, &srcRect, this, &dstPt, 1.f, stream, nullptr);
#endif // RTX_CAMERA_IMAGE
}
@@ -666,7 +689,9 @@ NvCV_Status NvCVImage::copyFrom(const NvCVImage *src, int srcX, int srcY, int ds
* copy image
********************************************************************************/
-NvCV_Status NvCVImage::copyFrom(const NvCVImage *src) { return NvCVImage_Transfer(src, this, 1.f, 0, nullptr); }
+NvCV_Status NvCVImage::copyFrom(const NvCVImage *src, struct CUstream_st* stream) {
+ return NvCVImage_Transfer(src, this, 1.f, stream, nullptr);
+}
#endif // ___cplusplus
diff --git a/nvar/include/nvTransferD3D.h b/nvar/include/nvTransferD3D.h
index e914eb5..b560322 100644
--- a/nvar/include/nvTransferD3D.h
+++ b/nvar/include/nvTransferD3D.h
@@ -32,7 +32,8 @@ extern "C" {
//! \param[in] layout the layout.
//! \param[out] d3dFormat a place to store the corresponding D3D format.
//! \return NVCV_SUCCESS if successful.
-NvCV_Status NvCV_API NvCVImage_ToD3DFormat(NvCVImage_PixelFormat format, NvCVImage_ComponentType type, unsigned layout, DXGI_FORMAT *d3dFormat);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com, otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_ToD3DFormat(NvCVImage_PixelFormat format, NvCVImage_ComponentType type, unsigned layout, DXGI_FORMAT *d3dFormat);
//! Utility to determine the NvCVImage format, component type and layout from a D3D format.
@@ -41,7 +42,8 @@ NvCV_Status NvCV_API NvCVImage_ToD3DFormat(NvCVImage_PixelFormat format, NvCVIma
//! \param[out] type a place to store the NvCVImage component type.
//! \param[out] layout a place to store the NvCVImage layout.
//! \return NVCV_SUCCESS if successful.
-NvCV_Status NvCV_API NvCVImage_FromD3DFormat(DXGI_FORMAT d3dFormat, NvCVImage_PixelFormat *format, NvCVImage_ComponentType *type, unsigned char *layout);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com, otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_FromD3DFormat(DXGI_FORMAT d3dFormat, NvCVImage_PixelFormat *format, NvCVImage_ComponentType *type, unsigned char *layout);
#ifdef __dxgicommon_h__
@@ -51,7 +53,8 @@ NvCV_Status NvCV_API NvCVImage_FromD3DFormat(DXGI_FORMAT d3dFormat, NvCVImage_Pi
//! \param[out] pD3dColorSpace a place to store the resultant D3D color space.
//! \return NVCV_SUCCESS if successful.
//! \return NVCV_ERR_PIXELFORMAT if there is no equivalent color space.
-NvCV_Status NvCV_API NvCVImage_ToD3DColorSpace(unsigned char nvcvColorSpace, DXGI_COLOR_SPACE_TYPE *pD3dColorSpace);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com, otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_ToD3DColorSpace(unsigned char nvcvColorSpace, DXGI_COLOR_SPACE_TYPE *pD3dColorSpace);
//! Utility to determine the NvCVImage color space from the D3D color space.
@@ -59,7 +62,8 @@ NvCV_Status NvCV_API NvCVImage_ToD3DColorSpace(unsigned char nvcvColorSpace, DXG
//! \param[out] pNvcvColorSpace a place to store the resultant NvCVImage color space.
//! \return NVCV_SUCCESS if successful.
//! \return NVCV_ERR_PIXELFORMAT if there is no equivalent color space.
-NvCV_Status NvCV_API NvCVImage_FromD3DColorSpace(DXGI_COLOR_SPACE_TYPE d3dColorSpace, unsigned char *pNvcvColorSpace);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com, otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_FromD3DColorSpace(DXGI_COLOR_SPACE_TYPE d3dColorSpace, unsigned char *pNvcvColorSpace);
#endif // __dxgicommon_h__
diff --git a/nvar/include/nvTransferD3D11.h b/nvar/include/nvTransferD3D11.h
index fabf067..b827ed1 100644
--- a/nvar/include/nvTransferD3D11.h
+++ b/nvar/include/nvTransferD3D11.h
@@ -26,13 +26,14 @@ extern "C" {
//! Initialize an NvCVImage from a D3D11 texture.
//! The pixelFormat and component types with be transferred over, and a cudaGraphicsResource will be registered;
//! the NvCVImage destructor will unregister the resource.
-//! This is designed to work with NvCVImage_TransferFromArray() (and eventually NvCVImage_Transfer());
-//! however it is necessary to call NvCVImage_MapResource beforehand, and NvCVImage_UnmapResource
-//! before allowing D3D to render into it.
+//! It is necessary to call NvCVImage_MapResource() after rendering D3D and before calling NvCVImage_Transfer(),
+//! and to call NvCVImage_UnmapResource() before rendering in D3D again.
//! \param[in,out] im the image to be initialized.
//! \param[in] tx the texture to be used for initialization.
//! \return NVCV_SUCCESS if successful.
-NvCV_Status NvCV_API NvCVImage_InitFromD3D11Texture(NvCVImage *im, struct ID3D11Texture2D *tx);
+//! \note This is an experimental API. If you find it useful, please respond to XXX@YYY.com,
+//! otherwise we may drop support.
+/* EXPERIMENTAL */ NvCV_Status NvCV_API NvCVImage_InitFromD3D11Texture(NvCVImage *im, struct ID3D11Texture2D *tx);
diff --git a/nvar/src/nvARProxy.cpp b/nvar/src/nvARProxy.cpp
index ad73ea1..38f3638 100644
--- a/nvar/src/nvARProxy.cpp
+++ b/nvar/src/nvARProxy.cpp
@@ -63,18 +63,34 @@ inline int nvFreeLibrary(HINSTANCE handle) {
HINSTANCE getNvARLib() {
TCHAR path[MAX_PATH], fullPath[MAX_PATH];
+ bool bSDKPathSet = false;
- // There can be multiple apps on the system,
- // some might include the SDK in the app package and
- // others might expect the SDK to be installed in Program Files
- GetEnvironmentVariable(TEXT("NV_AR_SDK_PATH"), path, MAX_PATH);
- if (_tcscmp(path, TEXT("USE_APP_PATH"))) {
- // App has not set environment variable to "USE_APP_PATH"
- // So pick up the SDK dll and dependencies from Program Files
- GetEnvironmentVariable(TEXT("ProgramFiles"), path, MAX_PATH);
- size_t max_len = sizeof(fullPath)/sizeof(TCHAR);
- _stprintf_s(fullPath, max_len, TEXT("%s\\NVIDIA Corporation\\NVIDIA AR SDK\\"), path);
+ extern char* g_nvARSDKPath;
+ if (g_nvARSDKPath && g_nvARSDKPath[0]) {
+#ifndef UNICODE
+ strncpy_s(fullPath, MAX_PATH, g_nvARSDKPath, MAX_PATH);
+#else
+ size_t res = 0;
+ mbstowcs_s(&res, fullPath, MAX_PATH, g_nvARSDKPath, MAX_PATH);
+#endif
SetDllDirectory(fullPath);
+ bSDKPathSet = true;
+ }
+
+ if (!bSDKPathSet) {
+
+ // There can be multiple apps on the system,
+ // some might include the SDK in the app package and
+ // others might expect the SDK to be installed in Program Files
+ GetEnvironmentVariable(TEXT("NV_AR_SDK_PATH"), path, MAX_PATH);
+ if (_tcscmp(path, TEXT("USE_APP_PATH"))) {
+ // App has not set environment variable to "USE_APP_PATH"
+ // So pick up the SDK dll and dependencies from Program Files
+ GetEnvironmentVariable(TEXT("ProgramFiles"), path, MAX_PATH);
+ size_t max_len = sizeof(fullPath) / sizeof(TCHAR);
+ _stprintf_s(fullPath, max_len, TEXT("%s\\NVIDIA Corporation\\NVIDIA AR SDK\\"), path);
+ SetDllDirectory(fullPath);
+ }
}
static const HINSTANCE NvArLib = nvLoadLibrary("nvARPose");
return NvArLib;
diff --git a/nvar/src/nvCVImageProxy.cpp b/nvar/src/nvCVImageProxy.cpp
index f724d7a..4f5199a 100644
--- a/nvar/src/nvCVImageProxy.cpp
+++ b/nvar/src/nvCVImageProxy.cpp
@@ -67,7 +67,11 @@ inline int nvFreeLibrary(HINSTANCE handle) {
HINSTANCE getNvCVImageLib() {
TCHAR path[MAX_PATH], tmpPath[MAX_PATH], fullPath[MAX_PATH];
static HINSTANCE nvCVImageLib = NULL;
- static bool bSDKPathSet = false;
+ static bool bSDKPathSet = false;
+ if (!bSDKPathSet) {
+ nvCVImageLib = nvLoadLibrary("NVCVImage");
+ if (nvCVImageLib) bSDKPathSet = true;
+ }
if (!bSDKPathSet) {
// There can be multiple apps on the system,
// some might include the SDK in the app package and
@@ -234,16 +238,28 @@ NvCV_Status NvCV_API NvCVImage_CompositeRect(
return funcPtr(fg, fgOrg, bg, bgOrg, mat, mode, dst, dstOrg, stream);
}
-NvCV_Status NvCV_API NvCVImage_CompositeOverConstant(const NvCVImage* src, const NvCVImage* mat,
- const unsigned char bgColor[3], NvCVImage* dst) {
+#if RTX_CAMERA_IMAGE == 0
+NvCV_Status NvCV_API NvCVImage_CompositeOverConstant(const NvCVImage *src, const NvCVImage *mat,
+ const void *bgColor, NvCVImage *dst, struct CUstream_st *stream) {
+ static const auto funcPtr =
+ (decltype(NvCVImage_CompositeOverConstant)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_CompositeOverConstant");
+
+ if (nullptr == funcPtr) return NVCV_ERR_LIBRARY;
+ return funcPtr(src, mat, bgColor, dst, stream);
+}
+#else // RTX_CAMERA_IMAGE == 1
+NvCV_Status NvCV_API NvCVImage_CompositeOverConstant(const NvCVImage *src, const NvCVImage *mat,
+ const unsigned char bgColor[3], NvCVImage *dst) {
static const auto funcPtr =
(decltype(NvCVImage_CompositeOverConstant)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_CompositeOverConstant");
if (nullptr == funcPtr) return NVCV_ERR_LIBRARY;
return funcPtr(src, mat, bgColor, dst);
}
+#endif // RTX_CAMERA_IMAGE
+
-NvCV_Status NvCV_API NvCVImage_FlipY(const NvCVImage* src, NvCVImage* dst) {
+NvCV_Status NvCV_API NvCVImage_FlipY(const NvCVImage *src, NvCVImage *dst) {
static const auto funcPtr = (decltype(NvCVImage_FlipY)*)nvGetProcAddress(getNvCVImageLib(), "NvCVImage_FlipY");
if (nullptr == funcPtr) return NVCV_ERR_LIBRARY;
diff --git a/resources/ar_005.png b/resources/ar_005.png
index 062422b..06c8007 100644
Binary files a/resources/ar_005.png and b/resources/ar_005.png differ
diff --git a/samples/BodyTrack/BodyEngine.cpp b/samples/BodyTrack/BodyEngine.cpp
index 7061cbf..d54994a 100644
--- a/samples/BodyTrack/BodyEngine.cpp
+++ b/samples/BodyTrack/BodyEngine.cpp
@@ -21,7 +21,7 @@
#
###############################################################################*/
#include "BodyEngine.h"
-
+#include
bool CheckResult(NvCV_Status nvErr, unsigned line) {
if (NVCV_SUCCESS == nvErr) return true;
@@ -42,7 +42,7 @@ BodyEngine::Err BodyEngine::createFeatures(const char* modelPath, unsigned int _
if (err != Err::errNone) {
printf("ERROR: An error has occured while initializing Body Detection\n");
}
- }
+ }
else if (appMode == keyPointDetection) {
err = createKeyPointDetectionFeature(modelPath, _batchSize, stream);
if (err != Err::errNone) {
@@ -104,7 +104,7 @@ BodyEngine::Err BodyEngine::createKeyPointDetectionFeature(const char* modelPath
nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(BatchSize), batchSize);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
- nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(NVAR_MODE), nvARMode);
+ nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(Mode), nvARMode);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
nvErr = NvAR_SetU32(keyPointDetectHandle, NvAR_Parameter_Config(Temporal), bStabilizeBody);
@@ -136,7 +136,7 @@ BodyEngine::Err BodyEngine::initFeatureIOParams() {
if (err != Err::errNone) {
printf("ERROR: An error has occured while setting input, output parmeters for Body Detection\n");
}
- }
+ }
else if (appMode == keyPointDetection) {
err = initKeyPointDetectionIOParams(&inputImageBuffer);
if (err != Err::errNone) {
@@ -182,7 +182,7 @@ BodyEngine::Err BodyEngine::initKeyPointDetectionIOParams(NvCVImage* inBuf) {
nvErr = NvAR_GetU32(keyPointDetectHandle, NvAR_Parameter_Config(NumKeyPoints), &numKeyPoints);
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
-
+
keypoints.assign(batchSize * numKeyPoints, {0.f, 0.f});
keypoints3D.assign(batchSize * numKeyPoints, {0.f, 0.f, 0.f});
jointAngles.assign(batchSize * numKeyPoints, {0.f, 0.f, 0.f, 1.f});
@@ -200,7 +200,7 @@ BodyEngine::Err BodyEngine::initKeyPointDetectionIOParams(NvCVImage* inBuf) {
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
nvErr = NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(KeyPoints3D), keypoints3D.data(),
- sizeof(NvAR_Point3f));
+ sizeof(NvAR_Point3f));
BAIL_IF_CVERR(nvErr, err, BodyEngine::Err::errParameter);
nvErr = NvAR_SetObject(keyPointDetectHandle, NvAR_Parameter_Output(JointAngles), jointAngles.data(),
@@ -373,7 +373,7 @@ NvAR_Quaternion* BodyEngine::getJointAngles() { return jointAngles.data(); }
float average_confidence = 0.0f;
float* keypoints_confidence_all = getKeyPointsConfidence();
for (int i = 0; i < batchSize; i++) {
- for (int j = 0; j < numKeyPoints; j++) {
+ for (unsigned int j = 0; j < numKeyPoints; j++) {
average_confidence += keypoints_confidence_all[i * numKeyPoints + j];
}
}
@@ -381,7 +381,7 @@ NvAR_Quaternion* BodyEngine::getJointAngles() { return jointAngles.data(); }
return average_confidence;
}
-unsigned BodyEngine::findLargestBodyBox(NvAR_Rect& bodyBox, int variant) {
+unsigned BodyEngine::findLargestBodyBox(NvAR_Rect& bodyBox, int /*variant*/) {
unsigned n;
NvAR_Rect* pBodyBox;
diff --git a/samples/BodyTrack/BodyEngine.h b/samples/BodyTrack/BodyEngine.h
index a8b7618..68aa4c3 100644
--- a/samples/BodyTrack/BodyEngine.h
+++ b/samples/BodyTrack/BodyEngine.h
@@ -84,7 +84,7 @@ bool CheckResult(NvCV_Status nvErr, unsigned line);
#define BAIL_IF_ERR(err) \
do { \
- if (0!=err) { \
+ if (0 != (err)) { \
goto bail; \
} \
} while (0)
diff --git a/samples/BodyTrack/BodyTrack.cpp b/samples/BodyTrack/BodyTrack.cpp
index 01eed8d..794bf67 100644
--- a/samples/BodyTrack/BodyTrack.cpp
+++ b/samples/BodyTrack/BodyTrack.cpp
@@ -28,6 +28,8 @@
#include
#include
#include
+#include
+#include
#include "BodyEngine.h"
#include "RenderingUtils.h"
@@ -68,7 +70,7 @@ bool FLAG_debug = false, FLAG_verbose = false, FLAG_temporal = true, FLAG_captur
FLAG_offlineMode = false, FLAG_useCudaGraph = true;
std::string FLAG_outDir, FLAG_inFile, FLAG_outFile, FLAG_modelPath, FLAG_captureCodec = "avc1",
FLAG_camRes, FLAG_bodyModel;
-unsigned int FLAG_batch = 1, FLAG_appMode = 1, FLAG_mode = 1;
+unsigned int FLAG_appMode = 1, FLAG_mode = 1, FLAG_camindex=0;
/********************************************************************************
* Usage
@@ -91,7 +93,6 @@ static void Usage() {
" --out_file= specify the output file\n"
" --out= specify the output file\n"
" --model_path= specify the directory containing the TRT models\n"
- " --batch= 1 - 8, used for batch inferencing in keypoints detector \n"
" --mode[=0|1] Model Mode. 0: High Quality, 1: High Performance\n"
" --app_mode[=(0|1)] App mode. 0: Body detection, 1: Keypoint detection "
"(Default).\n"
@@ -191,6 +192,7 @@ static int ParseMyArgs(int argc, char **argv) {
GetFlagArgVal("model_path", arg, &FLAG_modelPath) ||
GetFlagArgVal("app_mode", arg, &FLAG_appMode) ||
GetFlagArgVal("mode", arg, &FLAG_mode) ||
+ GetFlagArgVal("camindex", arg, &FLAG_camindex) ||
GetFlagArgVal("use_cuda_graph", arg, &FLAG_useCudaGraph) ||
GetFlagArgVal("temporal", arg, &FLAG_temporal))) {
continue;
@@ -250,9 +252,12 @@ std::string getCalendarTime() {
// Convert to tm to get structure holding a calendar date and time broken down into its components.
std::tm brokenTime = *std::localtime(¤tTime);
std::ostringstream calendarTime;
- calendarTime << std::put_time(
- &brokenTime,
- "%Y-%m-%d-%H-%M-%S"); // (YYYY-MM-DD-HH-mm-ss)-----
+ // calendarTime << std::put_time(
+ // &brokenTime,
+ // "%Y-%m-%d-%H-%M-%S"); // (YYYY-MM-DD-HH-mm-ss)-----
+ char time_string[24];
+ if (0 < strftime(time_string, sizeof(time_string), "%Y-%m-%d-%H-%M-%S] ", &brokenTime))
+ calendarTime << time_string; // (YYYY-MM-DD-HH-mm-ss)-----
// Get the time since epoch 0(Thu Jan 1 00:00:00 1970) and the remainder after division is
// our milliseconds
std::chrono::milliseconds currentMilliseconds =
@@ -589,7 +594,7 @@ void DoApp::DrawKeyPointsAndEdges(const cv::Mat& src, NvAR_Point2f* keypoints, i
int right_index_knuckle = 31;
int left_thumb_tip = 32;
int right_thumb_tip = 33;
-
+
// center body
DrawKeyPointLine(frm, keypoints, pelvis, torso, kColorGreen);
DrawKeyPointLine(frm, keypoints, torso, neck, kColorGreen);
@@ -749,7 +754,7 @@ DoApp::Err DoApp::acquireBodyBoxAndKeyPoints() {
}
DoApp::Err DoApp::initCamera(const char *camRes) {
- if (cap.open(0)) {
+ if (cap.open(FLAG_camindex)) {
if (camRes) {
int n;
n = sscanf(camRes, "%d%*[xX]%d", &inputWidth, &inputHeight);
@@ -836,7 +841,7 @@ int chooseGPU() {
// If the system has multiple supported GPUs then the application
// should use CUDA driver APIs or CUDA runtime APIs to enumerate
// the GPUs and select one based on the application's requirements
-
+
//Cuda device 0
return 0;
@@ -974,11 +979,11 @@ int main(int argc, char **argv) {
// Parse the arguments
if (0 != ParseMyArgs(argc, argv)) return -100;
- DoApp app;
+ DoApp app;
DoApp::Err doErr = DoApp::Err::errNone;
app.body_ar_engine.setAppMode(BodyEngine::mode(FLAG_appMode));
-
+
app.body_ar_engine.setMode(FLAG_mode);
if (FLAG_verbose) printf("Enable temporal optimizations in detecting body and keypoints = %d\n", FLAG_temporal);
diff --git a/samples/BodyTrack/BodyTrack.exe b/samples/BodyTrack/BodyTrack.exe
index de3f7d0..6257e1e 100644
Binary files a/samples/BodyTrack/BodyTrack.exe and b/samples/BodyTrack/BodyTrack.exe differ
diff --git a/samples/BodyTrack/CMakeLists.txt b/samples/BodyTrack/CMakeLists.txt
index 8bb2db9..a023f8c 100644
--- a/samples/BodyTrack/CMakeLists.txt
+++ b/samples/BodyTrack/CMakeLists.txt
@@ -4,14 +4,12 @@ set(SOURCE_FILES BodyEngine.cpp
)
set(HEADER_FILES BodyEngine.h)
-if(MSVC)
- set(SOURCE_FILES ${SOURCE_FILES}
- ../../nvar/src/nvARProxy.cpp
- ../../nvar/src/nvCVImageProxy.cpp)
+set(SOURCE_FILES ${SOURCE_FILES}
+ ../../nvar/src/nvARProxy.cpp
+ ../../nvar/src/nvCVImageProxy.cpp)
- set(HEADER_FILES ${HEADER_FILES}
- ../utils/RenderingUtils.h)
-endif(MSVC)
+set(HEADER_FILES ${HEADER_FILES}
+ ../utils/RenderingUtils.h)
# Set Visual Studio source filters
source_group("Source Files" FILES ${SOURCE_FILES})
@@ -23,7 +21,6 @@ target_include_directories(BodyTrack PUBLIC
${SDK_INCLUDES_PATH}
)
-if(MSVC)
target_link_libraries(BodyTrack PUBLIC
opencv346
utils_sample
@@ -32,21 +29,11 @@ target_link_libraries(BodyTrack PUBLIC
set(ARSDK_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../../bin)
set(OPENCV_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../external/opencv/bin)
set(PATH_STR "PATH=%PATH%" ${OPENCV_PATH_STR})
-set(CMD_ARG_STR "--model_path=\"${CMAKE_CURRENT_SOURCE_DIR}/../../bin/models\"")
- set_target_properties(BodyTrack PROPERTIES
- FOLDER SampleApps
- VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
- VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}"
- )
-elseif(UNIX)
- find_package(PNG REQUIRED)
- find_package(JPEG REQUIRED)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
- target_link_libraries(BodyTrack PUBLIC
- nvARPose
- NVCVImage
- OpenCV
- utils_sample
+set(CMD_ARG_STR "")
+set_target_properties(BodyTrack PROPERTIES
+ FOLDER SampleApps
+ VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
+ VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}"
)
-endif(MSVC)
+
diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt
index 3198bd1..fa5fdf2 100644
--- a/samples/CMakeLists.txt
+++ b/samples/CMakeLists.txt
@@ -1,7 +1,7 @@
# Sample apps
add_library(utils_sample INTERFACE)
-target_include_directories(utils_sample INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/utils)
+target_include_directories(utils_sample INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/utils ${OpenCV_INCLUDE_DIR})
target_link_libraries(utils_sample INTERFACE GLM)
add_subdirectory(external)
add_subdirectory(FaceTrack)
diff --git a/samples/FaceTrack/CMakeLists.txt b/samples/FaceTrack/CMakeLists.txt
index 3742de1..e6f3eb9 100644
--- a/samples/FaceTrack/CMakeLists.txt
+++ b/samples/FaceTrack/CMakeLists.txt
@@ -1,14 +1,14 @@
-set(SOURCE_FILES FaceEngine.cpp
- FaceTrack.cpp
- ../utils/RenderingUtils.cpp
- ../utils/FeatureVertexName.cpp
- ../utils/FeatureVertexName.h
+set(SOURCE_FILES FaceEngine.cpp
+ FaceTrack.cpp
+ ../utils/RenderingUtils.cpp
+ ../utils/FeatureVertexName.cpp
+ ../utils/FeatureVertexName.h
)
-if(MSVC)
- set(SOURCE_FILES ${SOURCE_FILES} ../../nvar/src/nvARProxy.cpp ../../nvar/src/nvCVImageProxy.cpp)
-endif(MSVC)
+
set(HEADER_FILES FaceEngine.h)
+set(SOURCE_FILES ${SOURCE_FILES} ../../nvar/src/nvARProxy.cpp ../../nvar/src/nvCVImageProxy.cpp)
+
# Set Visual Studio source filters
source_group("Source Files" FILES ${SOURCE_FILES})
source_group("Header Files" FILES ${HEADER_FILES})
@@ -16,23 +16,16 @@ source_group("Header Files" FILES ${HEADER_FILES})
add_executable(FaceTrack ${SOURCE_FILES} ${HEADER_FILES})
target_include_directories(FaceTrack PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(FaceTrack PUBLIC
- ${SDK_INCLUDES_PATH}
-)
-target_link_libraries(FaceTrack PUBLIC
- opencv346
- utils_sample
-)
+ ${SDK_INCLUDES_PATH}
+ )
-set(ARSDK_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../../bin)
+target_link_libraries(FaceTrack PUBLIC
+ opencv346
+ utils_sample
+ )
set(OPENCV_PATH_STR ${CMAKE_CURRENT_SOURCE_DIR}/../external/opencv/bin)
-set(PATH_STR "PATH=%PATH%" ${ARSDK_PATH_STR} ${OPENCV_PATH_STR})
-set(CMD_ARG_STR "--model_path=\"${CMAKE_CURRENT_SOURCE_DIR}/../../bin/models\"")
-if(MSVC)
- set_target_properties(FaceTrack PROPERTIES
- FOLDER SampleApps
- VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
- VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}"
- )
-endif(MSVC)
-
-
+set(PATH_STR "PATH=%PATH%" ${OPENCV_PATH_STR})
+set_target_properties(FaceTrack PROPERTIES
+ FOLDER SampleApps
+ VS_DEBUGGER_ENVIRONMENT "${PATH_STR}"
+ VS_DEBUGGER_COMMAND_ARGUMENTS "${CMD_ARG_STR}" )
\ No newline at end of file
diff --git a/samples/FaceTrack/FaceEngine.cpp b/samples/FaceTrack/FaceEngine.cpp
index 86d644f..79e0977 100644
--- a/samples/FaceTrack/FaceEngine.cpp
+++ b/samples/FaceTrack/FaceEngine.cpp
@@ -1,6 +1,6 @@
/*###############################################################################
#
-# Copyright 2020 NVIDIA Corporation
+# Copyright 2020-2021 NVIDIA Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
@@ -22,6 +22,7 @@
###############################################################################*/
#include "FaceEngine.h"
#include "RenderingUtils.h"
+#include
bool CheckResult(NvCV_Status nvErr, unsigned line) {
@@ -44,7 +45,7 @@ FaceEngine::Err FaceEngine::fitFaceModel(cv::Mat& frame) {
}
nvErr = NvAR_Run(faceFitHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errRun);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errRun);
if (getAverageLandmarksConfidence() < confidenceThreshold) return FaceEngine::Err::errRun;
@@ -56,6 +57,20 @@ NvAR_FaceMesh* FaceEngine::getFaceMesh() { return face_mesh; }
NvAR_RenderingParams* FaceEngine::getRenderingParams() { return rendering_params; }
+float* FaceEngine::getShapeEigenvalues() { return shapeEigenvalues.data(); }
+float* FaceEngine::getExpressionCoefficients() { return expressionCoefficients.data(); }
+int FaceEngine::getNumShapeEigenvalues() {
+ unsigned n = 0;
+ (void)NvAR_GetU32(faceFitHandle, NvAR_Parameter_Config(ShapeEigenValueCount), &n);
+ return n;
+}
+int FaceEngine::getNumExpressionCoefficients() {
+ unsigned n = 0;
+ (void)NvAR_GetU32(faceFitHandle, NvAR_Parameter_Config(ExpressionCount), &n);
+ return n;
+}
+
+
FaceEngine::Err FaceEngine::createFeatures(const char* modelPath, unsigned int _batchSize) {
FaceEngine::Err err = FaceEngine::Err::errNone;
@@ -67,19 +82,20 @@ FaceEngine::Err FaceEngine::createFeatures(const char* modelPath, unsigned int _
if (appMode == faceDetection) {
err = createFaceDetectionFeature(modelPath, stream);
if (err != Err::errNone) {
- printf("ERROR: An error has occured while initializing Face Detection\n");
+ printf("ERROR: An error has occurred while initializing Face Detection\n");
}
} else if (appMode == landmarkDetection) {
err = createLandmarkDetectionFeature(modelPath, _batchSize, stream);
if (err != Err::errNone) {
- printf("ERROR: An error has occured while initializing Landmark Detection\n");
+ printf("ERROR: An error has occurred while initializing Landmark Detection\n");
}
} else if (appMode == faceMeshGeneration) {
err = createFaceFittingFeature(modelPath, stream);
if (err != Err::errNone) {
- printf("ERROR: An error has occured while initializing Face Fitting\n");
+ printf("ERROR: An error has occurred while initializing Face Fitting\n");
}
}
+
return err;
}
@@ -88,19 +104,24 @@ FaceEngine::Err FaceEngine::createFaceDetectionFeature(const char* modelPath, CU
NvCV_Status nvErr;
nvErr = NvAR_Create(NvAR_Feature_FaceDetection, &faceDetectHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errEffect);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errEffect);
- nvErr = NvAR_SetString(faceDetectHandle, NvAR_Parameter_Config(ModelDir), modelPath);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ if (bUseOTAU && (!modelPath || !modelPath[0])) {
+ nvErr = NvAR_SetString(faceDetectHandle, NvAR_Parameter_Config(ModelDir), this->fdOTAModelPath);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ } else {
+ nvErr = NvAR_SetString(faceDetectHandle, NvAR_Parameter_Config(ModelDir), modelPath);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ }
nvErr = NvAR_SetCudaStream(faceDetectHandle, NvAR_Parameter_Config(CUDAStream), str);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(faceDetectHandle, NvAR_Parameter_Config(Temporal), bStabilizeFace);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_Load(faceDetectHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errInitialization);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errInitialization);
bail:
return err;
@@ -113,28 +134,34 @@ FaceEngine::Err FaceEngine::createLandmarkDetectionFeature(const char* modelPath
batchSize = _batchSize;
nvErr = NvAR_Create(NvAR_Feature_LandmarkDetection, &landmarkDetectHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errEffect);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errEffect);
- nvErr = NvAR_SetString(landmarkDetectHandle, NvAR_Parameter_Config(ModelDir), modelPath);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ if (bUseOTAU && (!modelPath || !modelPath[0])) {
+ nvErr = NvAR_SetString(landmarkDetectHandle, NvAR_Parameter_Config(ModelDir), this->ldOTAModelPath);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ }
+ else {
+ nvErr = NvAR_SetString(landmarkDetectHandle, NvAR_Parameter_Config(ModelDir), modelPath);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ }
nvErr = NvAR_SetCudaStream(landmarkDetectHandle, NvAR_Parameter_Config(CUDAStream), str);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(landmarkDetectHandle, NvAR_Parameter_Config(BatchSize), batchSize);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(landmarkDetectHandle, NvAR_Parameter_Config(Temporal), bStabilizeFace);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(landmarkDetectHandle, NvAR_Parameter_Config(Landmarks_Size), numLandmarks);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(landmarkDetectHandle, NvAR_Parameter_Config(LandmarksConfidence_Size), numLandmarks);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_Load(landmarkDetectHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errInitialization);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errInitialization);
bail:
return err;
@@ -145,24 +172,24 @@ FaceEngine::Err FaceEngine::createFaceFittingFeature(const char* modelPath, CUst
NvCV_Status nvErr;
nvErr = NvAR_Create(NvAR_Feature_Face3DReconstruction, &faceFitHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errEffect);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errEffect);
nvErr = NvAR_SetString(faceFitHandle, NvAR_Parameter_Config(ModelDir), modelPath);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetCudaStream(faceFitHandle, NvAR_Parameter_Config(CUDAStream), str);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetU32(faceFitHandle, NvAR_Parameter_Config(Landmarks_Size), numLandmarks); // TODO: Check if nonzero??
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
if (!face_model.empty()) {
nvErr = NvAR_SetString(faceFitHandle, NvAR_Parameter_Config(ModelName), face_model.c_str());
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
}
nvErr = NvAR_Load(faceFitHandle);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errInitialization);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errInitialization);
bail:
return err;
@@ -174,8 +201,7 @@ FaceEngine::Err FaceEngine::initFeatureIOParams() {
NvCV_Status cvErr = NvCVImage_Alloc(&inputImageBuffer, input_image_width, input_image_height, NVCV_BGR, NVCV_U8,
NVCV_CHUNKY, NVCV_GPU, 1);
- BAIL_IF_CVERR(cvErr, err, FaceEngine::Err::errInitialization);
-
+ BAIL_IF_NVERR(cvErr, err, FaceEngine::Err::errInitialization);
if (appMode == faceDetection) {
err = initFaceDetectionIOParams(&inputImageBuffer);
if (err != Err::errNone) {
@@ -203,7 +229,7 @@ FaceEngine::Err FaceEngine::initFaceDetectionIOParams(NvCVImage* inBuf) {
FaceEngine::Err err = FaceEngine::Err::errNone;
nvErr = NvAR_SetObject(faceDetectHandle, NvAR_Parameter_Input(Image), inBuf, sizeof(NvCVImage));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
output_bbox_data.assign(25, {0.f, 0.f, 0.f, 0.f});
output_bbox_conf_data.assign(25, 0.f);
@@ -211,11 +237,11 @@ FaceEngine::Err FaceEngine::initFaceDetectionIOParams(NvCVImage* inBuf) {
output_bboxes.max_boxes = (uint8_t)output_bbox_data.size();
output_bboxes.num_boxes = 0;
nvErr = NvAR_SetObject(faceDetectHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetF32Array(faceDetectHandle, NvAR_Parameter_Output(BoundingBoxesConfidence),
output_bbox_conf_data.data(), output_bboxes.max_boxes);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
bail:
return err;
@@ -225,16 +251,16 @@ FaceEngine::Err FaceEngine::initLandmarkDetectionIOParams(NvCVImage* inBuf) {
NvCV_Status nvErr = NVCV_SUCCESS;
FaceEngine::Err err = FaceEngine::Err::errNone;
uint output_bbox_size;
- unsigned int OUTPUT_SIZE_KPTS, OUTPUT_SIZE_KPTS_CONF;
nvErr = NvAR_SetObject(landmarkDetectHandle, NvAR_Parameter_Input(Image), inBuf, sizeof(NvCVImage));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ unsigned int OUTPUT_SIZE_KPTS, OUTPUT_SIZE_KPTS_CONF;
nvErr = NvAR_GetU32(landmarkDetectHandle, NvAR_Parameter_Config(Landmarks_Size), &OUTPUT_SIZE_KPTS);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_GetU32(landmarkDetectHandle, NvAR_Parameter_Config(LandmarksConfidence_Size), &OUTPUT_SIZE_KPTS_CONF);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
facial_landmarks.assign(batchSize * OUTPUT_SIZE_KPTS, {0.f, 0.f});
facial_pose.assign(batchSize, {0.f, 0.f, 0.f, 0.f});
@@ -242,15 +268,15 @@ FaceEngine::Err FaceEngine::initLandmarkDetectionIOParams(NvCVImage* inBuf) {
nvErr = NvAR_SetObject(landmarkDetectHandle, NvAR_Parameter_Output(Landmarks), facial_landmarks.data(),
sizeof(NvAR_Point2f));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr =
NvAR_SetObject(landmarkDetectHandle, NvAR_Parameter_Output(Pose), facial_pose.data(), sizeof(NvAR_Quaternion));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetF32Array(landmarkDetectHandle, NvAR_Parameter_Output(LandmarksConfidence),
facial_landmarks_confidence.data(), batchSize * OUTPUT_SIZE_KPTS);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
output_bbox_size = batchSize;
if (!bStabilizeFace) output_bbox_size = 25;
@@ -260,7 +286,7 @@ FaceEngine::Err FaceEngine::initLandmarkDetectionIOParams(NvCVImage* inBuf) {
output_bboxes.num_boxes = (uint8_t)output_bbox_size;
nvErr =
NvAR_SetObject(landmarkDetectHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
bail:
return err;
@@ -276,31 +302,31 @@ FaceEngine::Err FaceEngine::initFaceFittingIOParams(NvCVImage* inBuf) {
rendering_params = new NvAR_RenderingParams();
nvErr = NvAR_SetObject(faceFitHandle, NvAR_Parameter_Input(Image), inBuf, sizeof(NvCVImage));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetS32(faceFitHandle, NvAR_Parameter_Input(Width), input_image_width);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetS32(faceFitHandle, NvAR_Parameter_Input(Height), input_image_height);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
unsigned int OUTPUT_SIZE_KPTS;
nvErr = NvAR_GetU32(faceFitHandle, NvAR_Parameter_Config(Landmarks_Size), &OUTPUT_SIZE_KPTS);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
facial_landmarks.assign(batchSize * OUTPUT_SIZE_KPTS, {0.f, 0.f});
nvErr =
NvAR_SetObject(faceFitHandle, NvAR_Parameter_Output(Landmarks), facial_landmarks.data(), sizeof(NvAR_Point2f));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
facial_landmarks_confidence.assign(batchSize * OUTPUT_SIZE_KPTS, 0.f);
nvErr = NvAR_SetF32Array(faceFitHandle, NvAR_Parameter_Output(LandmarksConfidence),
facial_landmarks_confidence.data(), batchSize * OUTPUT_SIZE_KPTS);
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
facial_pose.assign(batchSize, {0.f, 0.f, 0.f, 0.f});
nvErr = NvAR_SetObject(faceFitHandle, NvAR_Parameter_Output(Pose), facial_pose.data(), sizeof(NvAR_Quaternion));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
output_bbox_data.assign(batchSize, {0.f, 0.f, 0.f, 0.f});
output_bboxes.boxes = output_bbox_data.data();
@@ -309,11 +335,24 @@ FaceEngine::Err FaceEngine::initFaceFittingIOParams(NvCVImage* inBuf) {
nvErr = NvAR_SetObject(faceFitHandle, NvAR_Parameter_Output(BoundingBoxes), &output_bboxes, sizeof(NvAR_BBoxes));
nvErr = NvAR_SetObject(faceFitHandle, NvAR_Parameter_Output(FaceMesh), face_mesh, sizeof(NvAR_FaceMesh));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
nvErr = NvAR_SetObject(faceFitHandle, NvAR_Parameter_Output(RenderingParams), rendering_params,
sizeof(NvAR_RenderingParams));
- BAIL_IF_CVERR(nvErr, err, FaceEngine::Err::errParameter);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+
+ unsigned n;
+ nvErr = NvAR_GetU32(faceFitHandle, NvAR_Parameter_Config(ShapeEigenValueCount), &n);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ shapeEigenvalues.resize(n);
+ nvErr = NvAR_SetF32Array(faceFitHandle, NvAR_Parameter_Output(ShapeEigenValues), shapeEigenvalues.data(), n);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+
+ nvErr = NvAR_GetU32(faceFitHandle, NvAR_Parameter_Config(ExpressionCount), &n);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
+ expressionCoefficients.resize(n);
+ nvErr = NvAR_SetF32Array(faceFitHandle, NvAR_Parameter_Output(ExpressionCoefficients), expressionCoefficients.data(), n);
+ BAIL_IF_NVERR(nvErr, err, FaceEngine::Err::errParameter);
bail:
return err;
@@ -634,6 +673,7 @@ unsigned FaceEngine::acquireFaceBoxAndLandmarks(cv::Mat& src, NvAR_Point2f* refM
return n;
}
+
if (findLandmarks() != NVCV_SUCCESS) return 0;
faceBox = output_bboxes.boxes[0];
n = 1;
diff --git a/samples/FaceTrack/FaceEngine.h b/samples/FaceTrack/FaceEngine.h
index 9e26858..44656bc 100644
--- a/samples/FaceTrack/FaceEngine.h
+++ b/samples/FaceTrack/FaceEngine.h
@@ -1,6 +1,6 @@
/*###############################################################################
#
-# Copyright 2020 NVIDIA Corporation
+# Copyright 2020-2021 NVIDIA Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
@@ -83,12 +83,12 @@ bool CheckResult(NvCV_Status nvErr, unsigned line);
#define BAIL_IF_ERR(err) \
do { \
- if (0!=err) { \
+ if (0 != (err)) { \
goto bail; \
} \
} while (0)
-#define BAIL_IF_CVERR(nvErr, err, code) \
+#define BAIL_IF_NVERR(nvErr, err, code) \
do { \
if (!CheckResult(nvErr, __LINE__)) { \
err = code; \
@@ -154,18 +154,23 @@ class FaceEngine {
Err fitFaceModel(cv::Mat& frame);
NvAR_FaceMesh* getFaceMesh();
NvAR_RenderingParams* getRenderingParams();
+ float* getShapeEigenvalues();
+ float* getExpressionCoefficients();
void setFaceStabilization(bool);
+ int getNumShapeEigenvalues();
+ int getNumExpressionCoefficients();
Err setNumLandmarks(int);
int getNumLandmarks() { return numLandmarks; }
void DrawPose(const cv::Mat& src, const NvAR_Quaternion* pose);
- NvCVImage inputImageBuffer{}, tmpImage{};
+ NvCVImage inputImageBuffer{}, tmpImage{}, outputImageBuffer{};
NvAR_FeatureHandle faceDetectHandle{}, landmarkDetectHandle{}, faceFitHandle{};
std::vector facial_landmarks;
std::vector facial_landmarks_confidence;
std::vector facial_pose;
NvAR_FaceMesh* face_mesh{};
NvAR_RenderingParams* rendering_params{};
+ std::vector shapeEigenvalues, expressionCoefficients;
CUstream stream{};
std::vector output_bbox_data;
std::vector output_bbox_conf_data;
@@ -177,6 +182,8 @@ class FaceEngine {
std::string face_model;
bool bStabilizeFace;
+ bool bUseOTAU;
+ char *fdOTAModelPath, *ldOTAModelPath;
FaceEngine() {
batchSize = 1;
@@ -187,8 +194,12 @@ class FaceEngine {
input_image_width = 640;
input_image_height = 480;
input_image_pitch = 3 * input_image_width * sizeof(unsigned char); // RGB
+ bUseOTAU = false;
+ fdOTAModelPath = NULL;
+ ldOTAModelPath = NULL;
}
- enum mode { faceDetection = 0, landmarkDetection, faceMeshGeneration } appMode;
+ enum mode { faceDetection = 0, landmarkDetection, faceMeshGeneration
+ } appMode;
void setAppMode(FaceEngine::mode _mAppMode);
};
#endif
diff --git a/samples/FaceTrack/FaceTrack.cpp b/samples/FaceTrack/FaceTrack.cpp
index cf4bb1c..1d8bc3c 100644
--- a/samples/FaceTrack/FaceTrack.cpp
+++ b/samples/FaceTrack/FaceTrack.cpp
@@ -22,25 +22,21 @@
###############################################################################*/
#include
#include
-#include
-#include
-#include
#include
#include
#include
#include
#include
#include
+#include
#include
-#include
-#include
-#include
+#include
#include "FaceEngine.h"
+#include "RenderingUtils.h"
#include "nvAR.h"
#include "nvAR_defs.h"
#include "opencv2/opencv.hpp"
-#include "RenderingUtils.h"
#ifndef M_PI
#define M_PI 3.1415926535897932385
@@ -73,7 +69,7 @@ bool FLAG_debug = false, FLAG_verbose = false, FLAG_temporal = true, FLAG_captur
FLAG_offlineMode = false, FLAG_isNumLandmarks126 = false;
std::string FLAG_outDir, FLAG_inFile, FLAG_outFile, FLAG_modelPath, FLAG_landmarks, FLAG_proxyWireframe,
FLAG_captureCodec = "avc1", FLAG_camRes, FLAG_faceModel;
-unsigned int FLAG_batch = 1, FLAG_appMode = 2;
+unsigned int FLAG_appMode = 2;
/********************************************************************************
* Usage
@@ -81,7 +77,7 @@ unsigned int FLAG_batch = 1, FLAG_appMode = 2;
static void Usage() {
printf(
- "AboutFace [ ...]\n"
+ "FaceTrack [ ...]\n"
"where is\n"
" --verbose[=(true|false)] report interesting info\n"
" --debug[=(true|false)] report debugging info\n"
@@ -89,16 +85,13 @@ static void Usage() {
" --capture_outputs[=(true|false)] enables video/image capture and writing face detection/landmark outputs\n"
" --offline_mode[=(true|false)] disables webcam, reads video from file and writes output video results\n"
" --cam_res=[WWWx]HHH specify resolution as height or width x height\n"
- " --in_file= specify the input file\n"
" --codec= FOURCC code for the desired codec (default H264)\n"
" --in= specify the input file\n"
- " --out_file= specify the output file\n"
" --out= specify the output file\n"
" --model_path= specify the directory containing the TRT models\n"
" --landmarks_126[=(true|false)] set the number of facial landmark points to 126, otherwise default to 68\n"
" --face_model= specify the name of the face model\n"
" --wireframe_mesh= specify the path to a proxy wireframe mesh\n"
- " --batch= 1 - 8, used for batch inferencing in landmark detector\n"
" --app_mode[=(0|1|2)] App mode. 0: Face detection, 1: Landmark detection, 2: Face fitting "
"(Default)."
" --benchmarks[=] run benchmarks\n");
@@ -255,9 +248,12 @@ std::string getCalendarTime() {
// Convert to tm to get structure holding a calendar date and time broken down into its components.
std::tm brokenTime = *std::localtime(¤tTime);
std::ostringstream calendarTime;
- calendarTime << std::put_time(
- &brokenTime,
- "%Y-%m-%d-%H-%M-%S"); // (YYYY-MM-DD-HH-mm-ss)-----
+ // calendarTime << std::put_time(
+ // &brokenTime,
+ // "%Y-%m-%d-%H-%M-%S"); // (YYYY-MM-DD-HH-mm-ss)-----
+ char time_string[24];
+ if (0 < strftime(time_string, sizeof(time_string), "%Y-%m-%d-%H-%M-%S] ", &brokenTime))
+ calendarTime << time_string; // (YYYY-MM-DD-HH-mm-ss)-----
// Get the time since epoch 0(Thu Jan 1 00:00:00 1970) and the remainder after division is
// our milliseconds
std::chrono::milliseconds currentMilliseconds =
@@ -325,7 +321,7 @@ class DoApp {
static const char *errorStringFromCode(Err code);
cv::VideoCapture cap{};
- cv::Mat frame;
+ cv::Mat frame, outputFrame;
int inputWidth, inputHeight;
cv::VideoWriter faceDetectOutputVideo{}, landMarkOutputVideo{}, faceFittingOutputVideo{};
int frameIndex;
@@ -335,6 +331,7 @@ class DoApp {
MyTimer frameTimer;
cv::VideoWriter capturedVideo;
std::ofstream faceEngineVideoOutputFile;
+ FILE *exprFile, *shapeFile, *poseFile;
FaceEngine::Err nvErr;
float expr[6];
@@ -353,12 +350,12 @@ void DoApp::processKey(int key) {
face_ar_engine.destroyFeatures();
face_ar_engine.setAppMode(FaceEngine::mode::faceMeshGeneration);
nvErr = face_ar_engine.createFeatures(FLAG_modelPath.c_str());
- // If there is an error, fallback to mode '2' i.e. landmark detection
if (nvErr == FaceEngine::Err::errNone) {
face_ar_engine.initFeatureIOParams();
break;
} else if (nvErr == FaceEngine::Err::errInitialization) {
showFaceFitErrorMessage();
+ // If there is an error, fallback to mode '2' i.e. landmark detection
}
case '2':
face_ar_engine.destroyFeatures();
@@ -394,8 +391,6 @@ void DoApp::processKey(int key) {
}
DoApp::Err DoApp::initFaceEngine(const char *modelPath, bool isNumLandmarks126) {
- Err err = errNone;
-
if (!cap.isOpened()) return errVideo;
int numLandmarkPoints = isNumLandmarks126 ? 126 : 68;
@@ -468,6 +463,7 @@ void DoApp::DrawBBoxes(const cv::Mat &src, NvAR_Rect *output_bbox) {
if (FLAG_offlineMode) faceDetectOutputVideo.write(frm);
}
+
void DoApp::writeVideoAndEstResults(const cv::Mat &frm, NvAR_BBoxes output_bboxes, NvAR_Point2f *landmarks) {
if (captureVideo) {
if (!capturedVideo.isOpened()) {
@@ -668,13 +664,21 @@ DoApp::Err DoApp::acquireFaceBox() {
writeFrameAndEstResults(frame, face_ar_engine.output_bboxes);
writeVideoAndEstResults(frame, face_ar_engine.output_bboxes);
}
- if (0 == n) return errNoFace;
#ifdef VISUALIZE
-
- if (drawVisualization) {
- DrawBBoxes(frame, &output_bbox);
+ if (n > 0) { // At least one face was found
+ if (drawVisualization) {
+ DrawBBoxes(frame, &output_bbox); // This will write a frame if in offlineMode
+ }
}
+ else { // No faces found
+ if (FLAG_offlineMode) {
+ faceDetectOutputVideo.write(frame); // This will write a frame if in offlineMode
+ }
+ err = errNoFace;
+ }
+#else // !VISUALIZE
+ if (0 == n) err = errNoFace;
#endif // VISUALIZE
frameIndex++;
@@ -701,17 +705,25 @@ DoApp::Err DoApp::acquireFaceBoxAndLandmarks() {
writeFrameAndEstResults(frame, face_ar_engine.output_bboxes, facial_landmarks.data());
writeVideoAndEstResults(frame, face_ar_engine.output_bboxes, facial_landmarks.data());
}
- if (0 == n) return errNoFace;
#ifdef VISUALIZE
-
- if (drawVisualization) {
- DrawLandmarkPoints(frame, facial_landmarks.data(), numLandmarks);
+ if (n > 0) { // At least one face found
+ if (drawVisualization) {
+ DrawLandmarkPoints(frame, facial_landmarks.data(), numLandmarks); // Writes frame in offline mode
+ if (FLAG_offlineMode) {
+ DrawBBoxes(frame, &output_bbox); // Writes frame in offline mode
+ }
+ }
+ }
+ else { // No faces found
if (FLAG_offlineMode) {
- DrawBBoxes(frame, &output_bbox);
+ faceDetectOutputVideo.write(frame); // These two wrtite frames if a face was not detected
+ landMarkOutputVideo.write(frame);
}
}
-#endif // VISUALIZE
+#else // !VISUALIZE
+ if (0 == n) err = errNoFace;
+#endif // VISUALIZE
frameIndex++;
return err;
@@ -758,34 +770,57 @@ DoApp::Err DoApp::initOfflineMode(const char *inputFilename, const char *outputF
return Err::errVideo;
}
- std::string fdOutputVideoName, fldOutputVideoName, ffOutputVideoName;
- std::string outputFilePrefix;
+ std::string fdOutputVideoName, fldOutputVideoName, ffOutputVideoName,
+ exprOutputFileName, shapeOutputFileName, poseOutputFileName, outputFilePrefix;
if (outputFilename && strlen(outputFilename) != 0) {
outputFilePrefix = outputFilename;
} else {
size_t lastindex = std::string(inputFilename).find_last_of(".");
outputFilePrefix = std::string(inputFilename).substr(0, lastindex);
}
- fdOutputVideoName = outputFilePrefix + "_bbox.mp4";
- fldOutputVideoName = outputFilePrefix + "_landmarks.mp4";
- ffOutputVideoName = outputFilePrefix + "_faceModel.mp4";
-
- if (!faceDetectOutputVideo.open(fdOutputVideoName, StringToFourcc(FLAG_captureCodec), cap.get(CV_CAP_PROP_FPS),
- cv::Size(inputWidth, inputHeight))) {
+ fdOutputVideoName = outputFilePrefix + "_bbox.mp4";
+ fldOutputVideoName = outputFilePrefix + "_landmarks.mp4";
+ ffOutputVideoName = outputFilePrefix + "_faceModel.mp4";
+ exprOutputFileName = outputFilePrefix + "_expr.json";
+ shapeOutputFileName = outputFilePrefix + "_shape.json";
+ poseOutputFileName = outputFilePrefix + "_pose.json";
+
+ const int codec = StringToFourcc(FLAG_captureCodec);
+ const double fps = cap.get(CV_CAP_PROP_FPS);
+ const cv::Size frameSize(inputWidth, inputHeight);
+ if (FLAG_verbose) {
+ const unsigned long long frameCount = (unsigned long long)cap.get(CV_CAP_PROP_FRAME_COUNT);
+ printf("codec='%.4s' fps=%.8g frameCount=%llu\n", (const char*)&codec, fps, frameCount);
+ }
+ if (!faceDetectOutputVideo.open(fdOutputVideoName, codec, fps, frameSize)) {
printf("ERROR: Unable to open the output video file \"%s\" \n", fdOutputVideoName.c_str());
return Err::errGeneral;
}
- if (!landMarkOutputVideo.open(fldOutputVideoName, StringToFourcc(FLAG_captureCodec), cap.get(CV_CAP_PROP_FPS),
- cv::Size(inputWidth, inputHeight))) {
+ if (!landMarkOutputVideo.open(fldOutputVideoName, codec, fps, frameSize)) {
printf("ERROR: Unable to open the output video file \"%s\" \n", fldOutputVideoName.c_str());
return Err::errGeneral;
}
- if (!faceFittingOutputVideo.open(ffOutputVideoName, StringToFourcc(FLAG_captureCodec), cap.get(CV_CAP_PROP_FPS),
- cv::Size(inputWidth, inputHeight))) {
+ if (!faceFittingOutputVideo.open(ffOutputVideoName, codec, fps, frameSize)) {
printf("ERROR: Unable to open the output video file \"%s\" \n", ffOutputVideoName.c_str());
return Err::errGeneral;
}
-
+ if (FLAG_debug) {
+ if (nullptr == (exprFile = fopen(exprOutputFileName.c_str(), "w"))) {
+ printf("ERROR: Unable to open the output file \"%s\" \n", exprOutputFileName.c_str());
+ return Err::errGeneral;
+ }
+ fprintf(exprFile, "{\n \"expression_series\":[");
+ if (nullptr == (shapeFile = fopen(shapeOutputFileName.c_str(), "w"))) {
+ printf("ERROR: Unable to open the output file \"%s\" \n", shapeOutputFileName.c_str());
+ return Err::errGeneral;
+ }
+ fprintf(shapeFile, "{\n \"shape_series\":[");
+ if (nullptr == (poseFile = fopen(poseOutputFileName.c_str(), "w"))) {
+ printf("ERROR: Unable to open the output file \"%s\" \n", poseOutputFileName.c_str());
+ return Err::errGeneral;
+ }
+ fprintf(poseFile, "{\n \"pose_series\":[");
+ }
return Err::errNone;
}
@@ -797,21 +832,48 @@ DoApp::Err DoApp::fitFaceModel() {
writeVideoAndEstResults(frame, face_ar_engine.output_bboxes, face_ar_engine.getLandmarks());
}
- if (FaceEngine::Err::errNone == nvErr) {
+ frameTimer.pause();
#ifdef VISUALIZE
- frameTimer.pause();
+ if (FaceEngine::Err::errNone == nvErr) { // If a face and its landmarks were detected
if (drawVisualization) {
- DrawFaceMesh(frame, face_ar_engine.getFaceMesh());
- if (FLAG_offlineMode) {
+ DrawFaceMesh(frame, face_ar_engine.getFaceMesh()); // This writes a frame in offline mode ...
+ if (FLAG_offlineMode) { // ... as do the following two statements
DrawLandmarkPoints(frame, face_ar_engine.getLandmarks(), face_ar_engine.getNumLandmarks());
DrawBBoxes(frame, face_ar_engine.getLargestBox());
}
}
- frameTimer.resume();
-#endif // VISUALIZE
} else {
+ if (FLAG_offlineMode) {
+ faceDetectOutputVideo.write(frame); // These three will draw unannotated frames if a face was not detected
+ landMarkOutputVideo.write(frame);
+ faceFittingOutputVideo.write(frame);
+ }
doErr = errFaceFit;
}
+#else // !VISUALIZE
+ if (FaceEngine::Err::errNone != nvErr)
+ doErr = errFaceFit;
+#endif // VISUALIZE
+ if (FLAG_offlineMode && FLAG_debug) {
+ unsigned n;
+ const float *f;
+ if (frameIndex)
+ fprintf(exprFile, ",");
+ fprintf(exprFile, "\n [");
+ for (n = face_ar_engine.getNumExpressionCoefficients(), f = face_ar_engine.getExpressionCoefficients(); n--; ++f)
+ fprintf(exprFile, "%8.6f%c", *f, (n ? ',' : ']'));
+ if (frameIndex)
+ fprintf(shapeFile, ",");
+ fprintf(shapeFile, "\n [");
+ for (n = face_ar_engine.getNumShapeEigenvalues(), f = face_ar_engine.getShapeEigenvalues(); n--; ++f)
+ fprintf(shapeFile, "%+9.6f%c", *f, (n ? ',' : ']'));
+ if (frameIndex)
+ fprintf(poseFile, ",");
+ f = (const float*)face_ar_engine.getPose();
+ fprintf(poseFile, "\n [%+9.6f,%+9.6f,%+9.6f,%+9.6f]", f[0], f[1], f[2], f[3]);
+ }
+ frameTimer.resume();
+ frameIndex++;
return doErr;
}
@@ -828,9 +890,29 @@ DoApp::DoApp() {
nvErr = FaceEngine::errNone;
scaleOffsetXY[0] = scaleOffsetXY[2] = 1.f;
scaleOffsetXY[1] = scaleOffsetXY[3] = 0.f;
+ exprFile = nullptr;
+ shapeFile = nullptr;
+ poseFile = nullptr;
}
-DoApp::~DoApp() {}
+DoApp::~DoApp() {
+ static const char termJsFile[] = { "\n ]\n}\n" };
+ if (exprFile) { fprintf(exprFile, termJsFile); fclose(exprFile); }
+ if (shapeFile) { fprintf(shapeFile, termJsFile); fclose(shapeFile); }
+ if (poseFile) { fprintf(poseFile, termJsFile); fclose(poseFile); }
+}
+
+char *g_nvARSDKPath = NULL;
+
+int chooseGPU() {
+ // If the system has multiple supported GPUs then the application
+ // should use CUDA driver APIs or CUDA runtime APIs to enumerate
+ // the GPUs and select one based on the application's requirements
+
+ //Cuda device 0
+ return 0;
+
+}
void DoApp::getFPS() {
const float timeConstant = 16.f;
@@ -893,11 +975,6 @@ DoApp::Err DoApp::run() {
} else if (face_ar_engine.appMode == FaceEngine::mode::faceMeshGeneration) {
doErr = fitFaceModel();
}
- if ((DoApp::errNoFace == doErr || DoApp::errFaceFit == doErr) && FLAG_offlineMode) {
- faceDetectOutputVideo.write(frame);
- landMarkOutputVideo.write(frame);
- faceFittingOutputVideo.write(frame);
- }
if (DoApp::errCancel == doErr || DoApp::errVideo == doErr) return doErr;
if (!frame.empty() && !FLAG_offlineMode) {
if (drawVisualization) {
@@ -907,7 +984,6 @@ DoApp::Err DoApp::run() {
}
cv::imshow(windowTitle, frame);
}
-
if (!FLAG_offlineMode) {
int n = cv::waitKey(1);
if (n >= 0) {
@@ -980,12 +1056,12 @@ const char *DoApp::errorStringFromCode(DoApp::Err code) {
********************************************************************************/
int main(int argc, char **argv) {
- DoApp app;
- DoApp::Err doErr;
-
// Parse the arguments
if (0 != ParseMyArgs(argc, argv)) return -100;
+ DoApp app;
+ DoApp::Err doErr = DoApp::Err::errNone;
+
app.face_ar_engine.setAppMode(FaceEngine::mode(FLAG_appMode));
if (FLAG_verbose) printf("Enable temporal optimizations in detecting face and landmarks = %d\n", FLAG_temporal);
diff --git a/samples/FaceTrack/FaceTrack.exe b/samples/FaceTrack/FaceTrack.exe
index 7b6dfb1..c2afdfe 100644
Binary files a/samples/FaceTrack/FaceTrack.exe and b/samples/FaceTrack/FaceTrack.exe differ
diff --git a/samples/external/CMakeLists.txt b/samples/external/CMakeLists.txt
index f036c91..c1be0b6 100644
--- a/samples/external/CMakeLists.txt
+++ b/samples/external/CMakeLists.txt
@@ -22,12 +22,40 @@ if(MSVC)
else()
message("MSVC_VERSION ${MSVC_VERSION} is not accommodated")
endif()
-endif()
-add_library(opencv346 INTERFACE)
-set(OpenCV_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include/opencv2)
-target_include_directories(opencv346 INTERFACE ${OpenCV_INCLUDE_DIR})
-target_link_libraries(opencv346 INTERFACE optimized ${CMAKE_CURRENT_SOURCE_DIR}/opencv/lib/opencv_world346.lib)
+ add_library(opencv346 INTERFACE)
+ set(OpenCV_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include/opencv2)
+ target_include_directories(opencv346 INTERFACE ${OpenCV_INCLUDE_DIR})
+ target_link_libraries(opencv346 INTERFACE optimized ${CMAKE_CURRENT_SOURCE_DIR}/opencv/lib/opencv_world346.lib)
+ set(OpenCV_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include ${CMAKE_CURRENT_SOURCE_DIR}/opencv/include/opencv2)
+else()
+ find_package(OpenCV REQUIRED
+ PATHS /usr /usr/local
+ PATH_SUFFIXES share/OpenCV share/opencv4)
+ add_library(OpenCV INTERFACE)
+ target_include_directories(OpenCV INTERFACE ${OpenCV_INCLUDE_DIRS})
+ target_link_libraries(OpenCV INTERFACE ${OpenCV_LIBRARIES})
+
+ message("OpenCV_INCLUDE_DIRS ${OpenCV_INCLUDE_DIRS}")
+ message("OpenCV_LIBRARIES ${OpenCV_LIBRARIES}")
+ message("OpenCV_LIBS ${OpenCV_LIBS}")
+
+ find_package(CUDA 11.3 REQUIRED)
+ add_library(CUDA INTERFACE)
+ target_include_directories(CUDA INTERFACE ${CUDA_INCLUDE_DIRS})
+ target_link_libraries(CUDA INTERFACE "${CUDA_LIBRARIES};cuda")
+
+ message("CUDA_INCLUDE_DIRS ${CUDA_INCLUDE_DIRS}")
+ message("CUDA_LIBRARIES ${CUDA_LIBRARIES}")
+
+ find_package(TensorRT 8 REQUIRED)
+ add_library(TensorRT INTERFACE)
+ target_include_directories(TensorRT INTERFACE ${TensorRT_INCLUDE_DIRS})
+ target_link_libraries(TensorRT INTERFACE ${TensorRT_LIBRARIES})
+
+ message("TensorRT_INCLUDE_DIRS ${TensorRT_INCLUDE_DIRS}")
+ message("TensorRT_LIBRARIES ${TensorRT_LIBRARIES}")
+endif()
####################
# Interface to GLM #
diff --git a/samples/utils/RenderingUtils.cpp b/samples/utils/RenderingUtils.cpp
index 31e4135..177d6bc 100644
--- a/samples/utils/RenderingUtils.cpp
+++ b/samples/utils/RenderingUtils.cpp
@@ -52,9 +52,9 @@ void draw_wireframe(const cv::Mat& image, const NvAR_FaceMesh& mesh, const NvAR_
glm::project({mesh.vertices[triangle.vec[2]].vec[0], mesh.vertices[triangle.vec[2]].vec[1], mesh.vertices[triangle.vec[2]].vec[2]},
modelview, projection, viewport);
if (are_vertices_ccw_in_screen_space(glm::vec2(p1), glm::vec2(p2), glm::vec2(p3))) {
- cv::line(image, cv::Point(p1.x, p1.y), cv::Point(p2.x, p2.y), color);
- cv::line(image, cv::Point(p2.x, p2.y), cv::Point(p3.x, p3.y), color);
- cv::line(image, cv::Point(p3.x, p3.y), cv::Point(p1.x, p1.y), color);
+ cv::line(image, cv::Point2f(p1.x, p1.y), cv::Point2f(p2.x, p2.y), color);
+ cv::line(image, cv::Point2f(p2.x, p2.y), cv::Point2f(p3.x, p3.y), color);
+ cv::line(image, cv::Point2f(p3.x, p3.y), cv::Point2f(p1.x, p1.y), color);
}
}
};
diff --git a/samples/utils/nvCVOpenCV.h b/samples/utils/nvCVOpenCV.h
index ddb9274..271d59a 100644
--- a/samples/utils/nvCVOpenCV.h
+++ b/samples/utils/nvCVOpenCV.h
@@ -58,21 +58,22 @@ inline void CVWrapperForNvCVImage(const NvCVImage *nvcvIm, cv::Mat *cvIm) {
// Wrap a cv::Mat in an NvCVImage.
inline void NVWrapperForCVMat(const cv::Mat *cvIm, NvCVImage *nvcvIm) {
static const NvCVImage_PixelFormat nvFormat[] = { NVCV_FORMAT_UNKNOWN, NVCV_Y, NVCV_YA, NVCV_BGR, NVCV_BGRA };
- static const NvCVImage_ComponentType nvType[] = { NVCV_U8, NVCV_TYPE_UNKNOWN, NVCV_U16, NVCV_S16, NVCV_S32, NVCV_F32, NVCV_F64 };
+ static const NvCVImage_ComponentType nvType[] = {NVCV_U8, NVCV_TYPE_UNKNOWN, NVCV_U16, NVCV_S16,
+ NVCV_S32, NVCV_F32, NVCV_F64, NVCV_TYPE_UNKNOWN};
nvcvIm->pixels = cvIm->data;
nvcvIm->width = cvIm->cols;
nvcvIm->height = cvIm->rows;
- nvcvIm->pitch = (unsigned)cvIm->step1();
- nvcvIm->pixelFormat = nvFormat[cvIm->channels()];
- nvcvIm->componentType = nvType[cvIm->depth()];
+ nvcvIm->pitch = (int)cvIm->step[0];
+ nvcvIm->pixelFormat = nvFormat[cvIm->channels() <= 4 ? cvIm->channels() : 0];
+ nvcvIm->componentType = nvType[cvIm->depth() & 7];
nvcvIm->bufferBytes = 0;
nvcvIm->deletePtr = nullptr;
nvcvIm->deleteProc = nullptr;
- nvcvIm->pixelBytes = (unsigned char)cvIm->elemSize();
+ nvcvIm->pixelBytes = (unsigned char)cvIm->step[1];
nvcvIm->componentBytes = (unsigned char)cvIm->elemSize1();
nvcvIm->numComponents = (unsigned char)cvIm->channels();
- nvcvIm->planar = 0;
- nvcvIm->gpuMem = 0;
+ nvcvIm->planar = NVCV_CHUNKY;
+ nvcvIm->gpuMem = NVCV_CPU;
nvcvIm->reserved[0] = 0;
nvcvIm->reserved[1] = 0;
}
diff --git a/tools/ConvertSurreyFaceModel.exe b/tools/ConvertSurreyFaceModel.exe
index 7404c86..05cd13d 100644
Binary files a/tools/ConvertSurreyFaceModel.exe and b/tools/ConvertSurreyFaceModel.exe differ
diff --git a/version.h b/version.h
index a93b723..f7dafca 100644
--- a/version.h
+++ b/version.h
@@ -23,11 +23,12 @@
#define NVIDIA_AR_SDK_VERSION_MAJOR 0
#define NVIDIA_AR_SDK_VERSION_MINOR 7
-#define NVIDIA_AR_SDK_VERSION_RELEASE 5
+#define NVIDIA_AR_SDK_VERSION_RELEASE 6
+#define NVIDIA_AR_SDK_VERSION_BUILD 2
-#define NVIDIA_AR_SDK_VERSION 0,7,5,0
+#define NVIDIA_AR_SDK_VERSION 0,7,6,2
#define NVIDIA_AR_SDK_VERSION_MAJOR_MINOR 0,7
-#define NVIDIA_AR_SDK_VERSION_STRING "0.7.5.0"
-#define NVIDIA_AR_SDK_VERSION_STRING_SHORT "0.7.5"
+#define NVIDIA_AR_SDK_VERSION_STRING "0.7.6.2"
+#define NVIDIA_AR_SDK_VERSION_STRING_SHORT "0.7.6"
#define NVIDIA_AR_SDK_VERSION_STRING_MAJOR_MINOR "0.7"