From 3c0580d9fc34dff54c68cc6b7e7595c1412ceb32 Mon Sep 17 00:00:00 2001 From: Rishabh Bali Date: Sun, 24 Dec 2023 04:02:40 +0530 Subject: [PATCH] Requested changes --- docs/userDocs/source/user/UsingClad.rst | 16 ++- docs/userDocs/source/user/tutorials.rst | 132 +++++++++++------------- 2 files changed, 67 insertions(+), 81 deletions(-) diff --git a/docs/userDocs/source/user/UsingClad.rst b/docs/userDocs/source/user/UsingClad.rst index 5fd0d2a46..3c32c3dca 100644 --- a/docs/userDocs/source/user/UsingClad.rst +++ b/docs/userDocs/source/user/UsingClad.rst @@ -223,16 +223,14 @@ that needs to be differentiated even when we want to differentiate w.r.t entire .. code-block:: cpp #include "clad/Differentiator/Differentiator.h" - double fn(double x, double arr[2]) { - return x * arr[0] * arr[1]; - } + double fn(double x, double arr[2]) { return x * arr[0] * arr[1]; } int main() { - auto fn_hessian = clad::hessian(fn, "x, arr[0:1]"); - // We have 3 independent variables thus we require space of 9. - double mat_fn[9] = {0}; - clad::array_ref mat_fn_ref(mat_fn, 9); - double num[2] = {1, 2}; - fn_hessian.execute(3, num, mat_fn_ref); + auto fn_hessian = clad::hessian(fn, "x, arr[0:1]"); + // We have 3 independent variables thus we require space of 9. + double mat_fn[9] = {0}; + clad::array_ref mat_fn_ref(mat_fn, 9); + double num[2] = {1, 2}; + fn_hessian.execute(3, num, mat_fn_ref); } Jacobian Computation diff --git a/docs/userDocs/source/user/tutorials.rst b/docs/userDocs/source/user/tutorials.rst index 92f8fb44f..a64b223a8 100644 --- a/docs/userDocs/source/user/tutorials.rst +++ b/docs/userDocs/source/user/tutorials.rst @@ -12,26 +12,23 @@ API call. .. code-block:: cpp - #include "clad/Differentiator/Differentiator.h" #include + #include "clad/Differentiator/Differentiator.h" - - double func(int x) { - return x * x; - } + double func(int x) { return x * x; } int main() { - /*Calling clad::differentiate to get the forward mode derivative of - the given mathematical function*/ - auto d_func = clad::differentiate(func, "x"); - // execute the generated derivative function. - std::cout << d_func.execute(/*x =*/3) < + #include "clad/Differentiator/Differentiator.h" - double f(double x, double y, double z) { - return x * y * z; - } + double f(double x, double y, double z) { return x * y * z; } int main() { - auto d_f = clad::gradient(f, "x, y"); - double dx = 0, dy = 0; - d_f.execute(/*x=*/2,/*y=*/ 3,/*z=*/ 4, &dx, &dy); - std::cout <<"dx : "<< dx << "dy :"<< dy << std::endl; + auto d_f = clad::gradient(f, "x, y"); + double dx = 0, dy = 0; + d_f.execute(/*x=*/2, /*y=*/3, /*z=*/4, &dx, &dy); + std::cout << "dx : " << dx << "dy :" << dy << std::endl; } In the above example we are differentiating w.r.t `x and y` we can also @@ -65,33 +59,32 @@ of the function w.r.t to each input. **The Hessian Mode** Clad can also produce an hessian matrix through the `clad::hessian` API call. -This API call is more or less similar to the reverse mode API call and differs -in case of an array type input argument. +It returns the hessian matrix as a flattened vector in row major format. .. code-block:: cpp - #include "clad/Differentiator/Differentiator.h" #include + #include "clad/Differentiator/Differentiator.h" - double f(double x, double y, double z) { - return x * y * z; - } + double f(double x, double y, double z) { return x * y * z; } - // Function with array input + // Function with array input - double f_arr(double x, double y, double z[2]) { - return x * y * z[0] * z[1]; - } + double f_arr(double x, double y, double z[2]) { return x * y * z[0] * z[1]; } int main() { - // Workflow similar to clad::gradient for non-array input arguments. - auto f_hess = clad::hessian(f, "x, y"); - double matrix_f[9] = {0}; - clad::array_ref matrix_f_ref(matrix_f, 9); - f_hess.execute(3, 4, 5, matrix_f_ref); - std::cout << "[" << matrix_f_ref[0] << ", " << matrix_f_ref[1] << matrix_f_ref[2] << "\n" - << matrix_f_ref[3] << ", " << matrix_f_ref[4] << matrix_f_ref[5] << "\n" - << matrix_f_ref[6] << ", " << matrix_f_ref[7] << matrix_f_ref[8] << "]" << "\n"; + // Workflow similar to clad::gradient for non-array input arguments. + auto f_hess = clad::hessian(f, "x, y"); + double matrix_f[9] = {0}; + clad::array_ref matrix_f_ref(matrix_f, 9); + f_hess.execute(3, 4, 5, matrix_f_ref); + std::cout << "[" << matrix_f_ref[0] << ", " << matrix_f_ref[1] + << matrix_f_ref[2] << "\n" + << matrix_f_ref[3] << ", " << matrix_f_ref[4] << matrix_f_ref[5] + << "\n" + << matrix_f_ref[6] << ", " << matrix_f_ref[7] << matrix_f_ref[8] + << "]" + << "\n"; } When arrays are involved we need to specify the array index that needs to be @@ -108,26 +101,26 @@ jacobian matrix as a flattened vector with elements arranged in row-major format .. code-block:: cpp + #include #include "clad/Differentiator/Differentiator.h" - #include - void f(doubl x, double y, double z, double *output) { - output[0] = x*y; - output[1] = y * y * x; - output[2] = 6 * x * y * z; + void f(double x, double y, double z, double* output) { + output[0] = x * y; + output[1] = y * y * x; + output[2] = 6 * x * y * z; } int main() { - auto f_jac = clad::jacobian(f); - - double jac[9] = {0}; - double output[3] = {0}; - f_jac.execute(3, 4, 5, output, jac); - std::cout << jac[0] << " " << jac[1] << std::endl - << jac[2] << " " << jac[3] << std::endl - << jac[4] << " " << jac[5] << std::endl - << jac[6] << " " << jac[7] << std::endl - << jac[8]<< std::endl; + auto f_jac = clad::jacobian(f); + + double jac[9] = {0}; + double output[3] = {0}; + f_jac.execute(3, 4, 5, output, jac); + std::cout << jac[0] << " " << jac[1] << std::endl + << jac[2] << " " << jac[3] << std::endl + << jac[4] << " " << jac[5] << std::endl + << jac[6] << " " << jac[7] << std::endl + << jac[8] << std::endl; } The jacobian matrix size should be equal to `no. of independent variables times @@ -139,30 +132,25 @@ an array of size 3x3 = 9. Clad is capable of annotating a given function with floating point error estimation code using reverse mode AD. - .. code-block:: cpp - #include "clad/Differentiator/Differentiator.h" #include + #include "clad/Differentiator/Differentiator.h" - void func(double x, double y) { - return x * y; - } + double func(double x, double y) { return x * y; } int main() { - - auto dfunc_error = clad::estimate_error(func); - // Used to print generated code to standard output. - dfunc_error.dump(); - double x, y, d_x, d_y, final_error = 0; - // Call execute - df.execute(x, y, &d_x, &d_y, final_error); - - std::cout << final_error; + + auto dfunc_error = clad::estimate_error(func); + // Used to print generated code to standard output. + dfunc_error.dump(); + double x, y, d_x, d_y, final_error = 0; + // Call execute + dfunc_error.execute(x, y, &d_x, &d_y, final_error); + + std::cout << final_error; } The function signature is similar to `clad::gradient` except we need to add an extra argument of type `double&` which is used to store the total floating point error. - -