Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DO NOT MERGE - Debug] Windows segfault debug #1621

Closed
wants to merge 38 commits into from
Closed
Changes from 1 commit
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
ca89f78
Initial implementation
Dec 14, 2019
de964ba
Merge branch 'develop' into feature/vec_gen_design
Dec 15, 2019
6b54c52
Add forwarding, rev & fwd versions
Dec 16, 2019
eebfd95
Merge branch 'develop' into feature/vec_gen_design
Dec 17, 2019
2b085d1
Add autodiff tests, remove arr versions
Dec 17, 2019
ac3d5e3
Nested testing
Dec 24, 2019
77f3a59
Fix tests, update doc
Dec 26, 2019
b3a1132
Tidy doc
Dec 27, 2019
27d1b1f
Merge branch 'develop' into feature/vec_gen_design
Dec 27, 2019
a4b83a7
Cpplint
Dec 27, 2019
d97c963
Tidy missing doc
Dec 29, 2019
461f0b0
log_softmax doc errors
Dec 29, 2019
cd0a362
Merge branch 'develop' into feature/vec_gen_design
Dec 29, 2019
9fa5124
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Dec 29, 2019
cba3a30
Fix failing test
Dec 30, 2019
e5a6b28
Merge develop
Dec 30, 2019
5a934fe
Revert head replacement
Dec 31, 2019
b7b2171
Merge branch 'develop' into feature/vec_gen_design
Dec 31, 2019
8ebea40
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Dec 31, 2019
a73be6d
Merge commit '426ad8fe5a2858b9d367aade1b25a631ac5e97e8' into merge_af…
rok-cesnovar Jan 5, 2020
8b5cc7f
Merge commit 'd7eb73884e5fad18eaf323760e4625317e1c4c91' into merge_af…
rok-cesnovar Jan 5, 2020
df34056
Merge commit '2b2f7ddff32c12e1e253a6179bf81c1845962306' into merge_af…
rok-cesnovar Jan 5, 2020
8a7017a
Merge commit '731b5f8cf6566db4f13a06851d56cc9e54029146' into merge_af…
rok-cesnovar Jan 5, 2020
8214c93
Merge branch 'develop' into merge_after_flatten
rok-cesnovar Jan 5, 2020
d776eac
merge conflicts fix
rok-cesnovar Jan 5, 2020
09c4004
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Jan 5, 2020
a0eb3df
fix header guard
rok-cesnovar Jan 5, 2020
4e83afb
remove include
rok-cesnovar Jan 5, 2020
2e4f6b1
Merge branch 'develop' into feature/vec_gen_design
Jan 10, 2020
febffbe
Address review comments
Jan 11, 2020
67e23b8
Merge branch 'develop' into feature/vec_gen_design
Jan 11, 2020
477cf9f
[Jenkins] auto-formatting by clang-format version 5.0.0-3~16.04.1 (ta…
stan-buildbot Jan 11, 2020
02afdb9
Fix merge error
Jan 11, 2020
8d8539a
cpplint
Jan 11, 2020
1fce5ac
Merge branch 'develop' into feature/vec_gen_design
Jan 13, 2020
f3b3286
Address comments
Jan 13, 2020
ebd9051
remove steps for debug
rok-cesnovar Jan 16, 2020
95a9372
Merge remote-tracking branch 'andrjohns/feature/vec_gen_design' into …
rok-cesnovar Jan 16, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge branch 'develop' into feature/vec_gen_design
Andrew Johnson committed Jan 10, 2020
commit 2e4f6b1533a0f627b79abeb6f16edac6ede3430b
1 change: 1 addition & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
@@ -11,6 +11,7 @@ def runTests(String testPath) {
def runTestsWin(String testPath) {
withEnv(['PATH+TBB=./lib/tbb']) {
bat "echo $PATH"
bat "mingw32-make.exe -f make/standalone math-libs"
bat "runTests.py -j${env.PARALLEL} ${testPath} --make-only"
try { bat "runTests.py -j${env.PARALLEL} ${testPath}" }
finally { junit 'test/**/*.xml' }
2 changes: 1 addition & 1 deletion stan/math.hpp
Original file line number Diff line number Diff line change
@@ -145,6 +145,6 @@
* type.
*/

#include <stan/math/rev/mat.hpp>
#include <stan/math/rev.hpp>

#endif
3 changes: 2 additions & 1 deletion stan/math/fwd/core/fvar.hpp
Original file line number Diff line number Diff line change
@@ -20,7 +20,7 @@ namespace math {
* direction of a single independent variable.
*
* By using reverse-mode automatic derivative variables, second-order
* derivatives may be calculated. By using fvar&lt;<var&gt;
* derivatives may be calculated. By using fvar&lt;var&gt;
* instances, third-order derivatives may be calculated. These are
* called mixed-mode automatic differentiation variable in Stan.
*
@@ -262,6 +262,7 @@ struct fvar {
return os << v.val_;
}
};

} // namespace math
} // namespace stan
#endif
3 changes: 2 additions & 1 deletion stan/math/fwd/fun/beta.hpp
Original file line number Diff line number Diff line change
@@ -42,7 +42,7 @@ namespace math {
\end{cases}
\f]
*
* @tparam T Type of values.
* @tparam T inner type of the fvar
* @param x1 First value
* @param x2 Second value
* @return Fvar with result beta function of arguments and gradients.
@@ -69,6 +69,7 @@ inline fvar<T> beta(const fvar<T>& x1, double x2) {
return fvar<T>(beta_ab,
x1.d_ * (digamma(x1.val_) - digamma(x1.val_ + x2)) * beta_ab);
}

} // namespace math
} // namespace stan
#endif
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/cbrt.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,7 @@ namespace math {
/**
* Return cube root of specified argument.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @return Cube root of argument.
*/
3 changes: 2 additions & 1 deletion stan/math/fwd/fun/digamma.hpp
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@ namespace math {
* Return the derivative of the log gamma function at the
* specified argument.
*
* @tparam T scalar type of autodiff variable
* @tparam T inner type of the fvar
* @param[in] x argument
* @return derivative of the log gamma function at the specified
* argument
@@ -23,6 +23,7 @@ template <typename T>
inline fvar<T> digamma(const fvar<T>& x) {
return fvar<T>(digamma(x.val_), x.d_ * trigamma(x.val_));
}

} // namespace math
} // namespace stan
#endif
4 changes: 2 additions & 2 deletions stan/math/fwd/fun/falling_factorial.hpp
Original file line number Diff line number Diff line change
@@ -17,19 +17,19 @@ namespace math {
* Will throw for NaN x and for negative n, as
* implemented in primitive function.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @param n Argument
* @return tangent of falling factorial at arguments.
*/

template <typename T>
inline fvar<T> falling_factorial(const fvar<T>& x, int n) {
T falling_fact(falling_factorial(x.val_, n));
return fvar<T>(
falling_fact,
falling_fact * (digamma(x.val_ + 1) - digamma(x.val_ - n + 1)) * x.d_);
}

} // namespace math
} // namespace stan
#endif
6 changes: 3 additions & 3 deletions stan/math/fwd/fun/fdim.hpp
Original file line number Diff line number Diff line change
@@ -11,7 +11,7 @@ namespace math {
/**
* Return the positive difference of the specified values (C++11).
*
* @tparam T Scalar type of autodiff variables.
* @tparam T inner type of the fvar
* @param x First argument.
* @param y Second argument.
* @return Return the differences of the arguments if it is
@@ -29,7 +29,7 @@ inline fvar<T> fdim(const fvar<T>& x, const fvar<T>& y) {
/**
* Return the positive difference of the specified values (C++11).
*
* @tparam T Scalar type of autodiff variables.
* @tparam T inner type of the fvar
* @param x First argument.
* @param y Second argument.
* @return Return the differences of the arguments if it is
@@ -47,7 +47,7 @@ inline fvar<T> fdim(const fvar<T>& x, double y) {
/**
* Return the positive difference of the specified values (C++11).
*
* @tparam T Scalar type of autodiff variables.
* @tparam T inner type of the fvar
* @param x First argument.
* @param y Second argument.
* @return Return the differences of the arguments if it is
4 changes: 4 additions & 0 deletions stan/math/fwd/fun/fmax.hpp
Original file line number Diff line number Diff line change
@@ -14,6 +14,7 @@ namespace math {
* Return the greater of the two specified arguments. If one is
* not-a-number, return the other.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return maximum of arguments, and if one is NaN return the other
@@ -41,6 +42,7 @@ inline fvar<T> fmax(const fvar<T>& x1, const fvar<T>& x2) {
* Return the greater of the two specified arguments. If one is
* not-a-number, return the other.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return maximum of arguments, and if one is NaN return the other
@@ -68,6 +70,7 @@ inline fvar<T> fmax(double x1, const fvar<T>& x2) {
* Return the greater of the two specified arguments. If one is
* not-a-number, return the other.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return maximum of arguments, and if one is NaN return the other
@@ -90,6 +93,7 @@ inline fvar<T> fmax(const fvar<T>& x1, double x2) {
return fvar<T>(x2, 0.0);
}
}

} // namespace math
} // namespace stan
#endif
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/grad_inc_beta.hpp
Original file line number Diff line number Diff line change
@@ -23,7 +23,7 @@ namespace math {
* Uses the equivalence to a hypergeometric function. See
* http://dlmf.nist.gov/8.17#ii
*
* @tparam T type of fvar
* @tparam T inner type of the fvar
* @param[out] g1 d/da
* @param[out] g2 d/db
* @param[in] a a
18 changes: 9 additions & 9 deletions stan/math/fwd/fun/hypot.hpp
Original file line number Diff line number Diff line change
@@ -10,16 +10,16 @@ namespace stan {
namespace math {

/**
* Return the length of the hypoteneuse of a right triangle with
* Return the length of the hypotenuse of a right triangle with
* opposite and adjacent side lengths given by the specified
* arguments (C++11). In symbols, if the arguments are
* <code>1</code> and <code>x2</code>, the result is <code>sqrt(x1 *
* x1 + x2 * x2)</code>.
*
* @tparam T Scalar type of autodiff variables.
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Length of hypoteneuse of right triangle with opposite
* @return Length of hypotenuse of right triangle with opposite
* and adjacent side lengths x1 and x2.
*/
template <typename T>
@@ -30,16 +30,16 @@ inline fvar<T> hypot(const fvar<T>& x1, const fvar<T>& x2) {
}

/**
* Return the length of the hypoteneuse of a right triangle with
* Return the length of the hypotenuse of a right triangle with
* opposite and adjacent side lengths given by the specified
* arguments (C++11). In symbols, if the arguments are
* <code>1</code> and <code>x2</code>, the result is <code>sqrt(x1 *
* x1 + x2 * x2)</code>.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Length of hypoteneuse of right triangle with opposite
* @return Length of hypotenuse of right triangle with opposite
* and adjacent side lengths x1 and x2.
*/
template <typename T>
@@ -50,16 +50,16 @@ inline fvar<T> hypot(const fvar<T>& x1, double x2) {
}

/**
* Return the length of the hypoteneuse of a right triangle with
* Return the length of the hypotenuse of a right triangle with
* opposite and adjacent side lengths given by the specified
* arguments (C++11). In symbols, if the arguments are
* <code>1</code> and <code>x2</code>, the result is <code>sqrt(x1 *
* x1 + x2 * x2)</code>.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Length of hypoteneuse of right triangle with opposite
* @return Length of hypotenuse of right triangle with opposite
* and adjacent side lengths x1 and x2.
*/
template <typename T>
4 changes: 2 additions & 2 deletions stan/math/fwd/fun/inv_logit.hpp
Original file line number Diff line number Diff line change
@@ -12,8 +12,7 @@ namespace math {
/**
* Returns the inverse logit function applied to the argument.
*
* @tparam T scalar type of forward-mode autodiff variable
* argument.
* @tparam T inner type of the fvar
* @param x argument
* @return inverse logit of argument
*/
@@ -24,6 +23,7 @@ inline fvar<T> inv_logit(const fvar<T>& x) {
return fvar<T>(inv_logit(x.val_),
x.d_ * inv_logit(x.val_) * (1 - inv_logit(x.val_)));
}

} // namespace math
} // namespace stan
#endif
1 change: 1 addition & 0 deletions stan/math/fwd/fun/is_inf.hpp
Original file line number Diff line number Diff line change
@@ -13,6 +13,7 @@ namespace math {
*
* Delegates to <code>is_inf</code>.
*
* @tparam T inner type of the fvar
* @param x Value to test.
* @return <code>1</code> if the value is infinite and <code>0</code> otherwise.
*/
1 change: 1 addition & 0 deletions stan/math/fwd/fun/is_nan.hpp
Original file line number Diff line number Diff line change
@@ -14,6 +14,7 @@ namespace math {
*
* Delegates to <code>is_nan</code>.
*
* @tparam T inner type of the fvar
* @param x Value to test.
* @return <code>1</code> if the value is NaN and <code>0</code> otherwise.
*/
3 changes: 2 additions & 1 deletion stan/math/fwd/fun/ldexp.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,7 @@ namespace math {
* Returns the product of a (the significand) times
* 2 to power b (the exponent).
*
* @tparam T Scalar type of significand
* @tparam T inner type of the fvar
* @param[in] a the significand
* @param[in] b an integer that is the exponent
* @return product of a times 2 to the power b
@@ -21,6 +21,7 @@ template <typename T>
inline fvar<T> ldexp(const fvar<T>& a, int b) {
return fvar<T>(ldexp(a.val_, b), ldexp(a.d_, b));
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/fwd/fun/lgamma.hpp
Original file line number Diff line number Diff line change
@@ -13,7 +13,7 @@ namespace math {
* Return the natural logarithm of the gamma function applied to
* the specified argument.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @return natural logarithm of the gamma function of argument.
*/
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/log1m_exp.hpp
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@ namespace math {
* Return the natural logarithm of one minus the
* exponentiation of the specified argument.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @return log of one minus the exponentiation of the argument.
*/
3 changes: 1 addition & 2 deletions stan/math/fwd/fun/log1m_inv_logit.hpp
Original file line number Diff line number Diff line change
@@ -13,8 +13,7 @@ namespace math {
* Return the natural logarithm of one minus the inverse logit of
* the specified argument.
*
* @tparam T scalar type of forward-mode autodiff variable
* argument.
* @tparam T inner type of the fvar
* @param x argument
* @return log of one minus the inverse logit of the argument
*/
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/log2.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,7 @@ namespace math {
/**
* Return the base two logarithm of the specified argument.
*
* @tparam T scalar type
* @tparam T inner type of the fvar
* @param x argument
* @return base two logarithm of argument
*/
3 changes: 1 addition & 2 deletions stan/math/fwd/fun/log_inv_logit_diff.hpp
Original file line number Diff line number Diff line change
@@ -28,8 +28,7 @@ namespace math {
\frac{\partial }{\partial x} = -\frac{e^y}{e^x-e^y}-\frac{e^y}{e^y+1}
\f]
*
* @tparam T1 Type of x argument.
* @tparam T2 Type of y argument.
* @tparam T inner type of the fvar
* @param x Argument.
* @param y Argument.
* @return Fvar with result of log difference of inverse logits of arguments
3 changes: 1 addition & 2 deletions stan/math/fwd/fun/log_mix.hpp
Original file line number Diff line number Diff line change
@@ -87,8 +87,7 @@ inline void log_mix_partial_helper(
* {\left( \theta \exp(\lambda_1) + (1 - \theta) \exp(\lambda_2) \right)}
* \f]
*
* @tparam T scalar type.
*
* @tparam T inner type of the fvar
* @param[in] theta mixing proportion in [0, 1].
* @param[in] lambda1 first log density.
* @param[in] lambda2 second log density.
12 changes: 10 additions & 2 deletions stan/math/fwd/fun/mdivide_left_ldlt.hpp
Original file line number Diff line number Diff line change
@@ -12,12 +12,20 @@ namespace math {

/**
* Returns the solution of the system Ax=b given an LDLT_factor of A
*
* @tparam R1 number of rows in the LDLT_factor, can be Eigen::Dynamic
* @tparam C1 number of columns in the LDLT_factor, can be Eigen::Dynamic
* @tparam R2 number of rows in the right-hand side matrix, can be
* Eigen::Dynamic
* @tparam C2 number of columns in the right-hand side matrix, can be
* Eigen::Dynamic
* @tparam T2 type of elements in the right-hand side matrix or vector
*
* @param A LDLT_factor
* @param b Right hand side matrix or vector.
* @param b right-hand side matrix or vector
* @return x = b A^-1, solution of the linear system.
* @throws std::domain_error if rows of b don't match the size of A.
*/

template <int R1, int C1, int R2, int C2, typename T2>
inline Eigen::Matrix<fvar<T2>, R1, C2> mdivide_left_ldlt(
const LDLT_factor<double, R1, C1> &A,
3 changes: 3 additions & 0 deletions stan/math/fwd/fun/owens_t.hpp
Original file line number Diff line number Diff line change
@@ -16,6 +16,7 @@ namespace math {
* Return Owen's T function applied to the specified
* arguments.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Owen's T function applied to the specified arguments.
@@ -37,6 +38,7 @@ inline fvar<T> owens_t(const fvar<T>& x1, const fvar<T>& x2) {
/**
* Return Owen's T function applied to the specified arguments.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Owen's T function applied to the specified arguments.
@@ -55,6 +57,7 @@ inline fvar<T> owens_t(double x1, const fvar<T>& x2) {
/**
* Return Owen's T function applied to the specified arguments.
*
* @tparam T inner type of the fvar
* @param x1 First argument.
* @param x2 Second argument.
* @return Owen's T function applied to the specified arguments.
3 changes: 1 addition & 2 deletions stan/math/fwd/fun/primitive_value.hpp
Original file line number Diff line number Diff line change
@@ -13,7 +13,7 @@ namespace math {
* autodiff variable. This function applies recursively to
* higher-order autodiff types to return a primitive double value.
*
* @tparam T scalar type for autodiff variable.
* @tparam T inner type of the fvar
* @param v input variable.
* @return primitive value of input.
*/
@@ -23,7 +23,6 @@ inline double primitive_value(const fvar<T>& v) {
}

} // namespace math

} // namespace stan

#endif
3 changes: 2 additions & 1 deletion stan/math/fwd/fun/rising_factorial.hpp
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@ namespace math {
* Will throw for NaN x and for negative n, as
* implemented in primitive function.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @param n Argument
* @return tangent of rising factorial at arguments.
@@ -28,6 +28,7 @@ inline fvar<T> rising_factorial(const fvar<T>& x, int n) {
return fvar<T>(rising_fact,
rising_fact * x.d_ * (digamma(x.val_ + n) - digamma(x.val_)));
}

} // namespace math
} // namespace stan
#endif
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/round.hpp
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@ namespace math {
*
* The derivative is always zero.
*
* @tparam T Scalar type for autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @return The rounded value of the argument.
*/
55 changes: 31 additions & 24 deletions stan/math/fwd/fun/squared_distance.hpp
Original file line number Diff line number Diff line change
@@ -13,9 +13,10 @@ namespace math {
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R Rows at compile time of vector inputs
* @tparam C columns at compile time of vector inputs
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
@@ -36,11 +37,12 @@ inline fvar<T> squared_distance(const Eigen::Matrix<fvar<T>, R, C>& v1,
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R1 Rows at compile time of first vector input
* @tparam C1 Columns at compile time of first vector input
* @tparam R2 Rows at compile time of second vector input
* @tparam C2 Columns at compile time of second vector input
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R1 number of rows in the first vector, can be Eigen::Dynamic
* @tparam C1 number of columns in the first vector, can be Eigen::Dynamic
* @tparam R2 number of rows in the second vector, can be Eigen::Dynamic
* @tparam C2 number of columns in the second vector, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
@@ -62,9 +64,10 @@ inline fvar<T> squared_distance(const Eigen::Matrix<fvar<T>, R1, C1>& v1,
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R Rows at compile time of vector inputs
* @tparam C columns at compile time of vector inputs
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
@@ -85,11 +88,12 @@ inline fvar<T> squared_distance(const Eigen::Matrix<double, R, C>& v1,
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R1 Rows at compile time of first vector input
* @tparam C1 Columns at compile time of first vector input
* @tparam R2 Rows at compile time of second vector input
* @tparam C2 Columns at compile time of second vector input
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R1 number of rows in the first vector, can be Eigen::Dynamic
* @tparam C1 number of columns in the first vector, can be Eigen::Dynamic
* @tparam R2 number of rows in the second vector, can be Eigen::Dynamic
* @tparam C2 number of columns in the second vector, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
@@ -106,13 +110,15 @@ inline fvar<T> squared_distance(const Eigen::Matrix<double, R1, C1>& v1,
Eigen::Matrix<fvar<T>, R2, C2> v3 = subtract(t_v1, v2);
return dot_self(v3);
}

/**
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R Rows at compile time of vector inputs
* @tparam C columns at compile time of vector inputs
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
@@ -133,11 +139,12 @@ inline fvar<T> squared_distance(const Eigen::Matrix<fvar<T>, R, C>& v1,
* Returns the squared distance between the specified vectors
* of the same dimensions.
*
* @tparam R1 Rows at compile time of first vector input
* @tparam C1 Columns at compile time of first vector input
* @tparam R2 Rows at compile time of second vector input
* @tparam C2 Columns at compile time of second vector input
* @tparam T Child scalar type of fvar vector input
* @tparam T inner type of the fvar vector
* @tparam R1 number of rows in the first vector, can be Eigen::Dynamic
* @tparam C1 number of columns in the first vector, can be Eigen::Dynamic
* @tparam R2 number of rows in the second vector, can be Eigen::Dynamic
* @tparam C2 number of columns in the second vector, can be Eigen::Dynamic
*
* @param v1 First vector.
* @param v2 Second vector.
* @return Dot product of the vectors.
9 changes: 5 additions & 4 deletions stan/math/fwd/fun/sum.hpp
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@ namespace math {
* Return the sum of the entries of the specified standard
* vector.
*
* @tparam T Type of vector entries.
* @tparam T type of elements in the vector
* @param m Vector.
* @return Sum of vector entries.
*/
@@ -35,9 +35,10 @@ inline fvar<T> sum(const std::vector<fvar<T> >& m) {
/**
* Return the sum of the entries of the specified matrix.
*
* @tparam T Type of matrix entries.
* @tparam R Row type of matrix.
* @tparam C Column type of matrix.
* @tparam T inner type of the fvar matrix
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param m Matrix.
* @return Sum of matrix entries.
*/
3 changes: 2 additions & 1 deletion stan/math/fwd/fun/tgamma.hpp
Original file line number Diff line number Diff line change
@@ -13,7 +13,7 @@ namespace math {
* Return the result of applying the gamma function to the
* specified argument.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param x Argument.
* @return Gamma function applied to argument.
*/
@@ -22,6 +22,7 @@ inline fvar<T> tgamma(const fvar<T>& x) {
T u = tgamma(x.val_);
return fvar<T>(u, x.d_ * u * digamma(x.val_));
}

} // namespace math
} // namespace stan
#endif
16 changes: 12 additions & 4 deletions stan/math/fwd/fun/to_fvar.hpp
Original file line number Diff line number Diff line change
@@ -59,7 +59,7 @@ inline std::vector<fvar<T>> to_fvar(const std::vector<T>& v,
/**
* Specialization of to_fvar for const fvar input
*
* @tparam The inner type of the fvar.
* @tparam T inner type of the fvar
* @param[in,out] v A vector of forward automatic differentiation variable.
* @return The input vector of forward automatic differentiation variable.
*/
@@ -71,7 +71,7 @@ inline const std::vector<fvar<T>>& to_fvar(const std::vector<fvar<T>>& v) {
/**
* Specialization of to_fvar for non-const fvar input
*
* @tparam The inner type of the fvar.
* @tparam T inner type of the fvar
* @param[in,out] v A vector of forward automatic differentiation variable.
* @return The input vector of forward automatic differentiation variable.
*/
@@ -83,21 +83,29 @@ inline std::vector<fvar<T>>& to_fvar(std::vector<fvar<T>>& v) {
/**
* Specialization of to_fvar for const matrices of fvars
*
* @tparam T type of elements in the matrix
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param[in,out] m A matrix of forward automatic differentation variables.
* @return The input matrix of forward automatic differentiation variables.
*/
template <int R, int C, typename T>
template <typename T, int R, int C>
inline const Eigen::Matrix<T, R, C>& to_fvar(const Eigen::Matrix<T, R, C>& m) {
return m;
}

/**
* Specialization of to_fvar for non-const matrices of fvars
*
* @tparam T type of elements in the matrix
* @tparam R number of rows, can be Eigen::Dynamic
* @tparam C number of columns, can be Eigen::Dynamic
*
* @param[in,out] m A matrix of forward automatic differentation variables.
* @return The input matrix of forward automatic differentiation variables.
*/
template <int R, int C, typename T>
template <typename T, int R, int C>
inline Eigen::Matrix<T, R, C>& to_fvar(Eigen::Matrix<T, R, C>& m) {
return m;
}
1 change: 1 addition & 0 deletions stan/math/fwd/fun/trigamma.hpp
Original file line number Diff line number Diff line change
@@ -13,6 +13,7 @@ namespace math {
* argument (i.e., the second derivative of the log Gamma function
* at the specified argument).
*
* @tparam T inner type of the fvar
* @param u argument
* @return trigamma function at argument
*/
2 changes: 1 addition & 1 deletion stan/math/fwd/fun/trunc.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,7 @@ namespace math {
* Return the nearest integral value that is not larger in
* magnitude than the specified argument.
*
* @tparam T Scalar type of autodiff variable.
* @tparam T inner type of the fvar
* @param[in] x Argument.
* @return The truncated argument.
*/
1 change: 1 addition & 0 deletions stan/math/fwd/fun/value_of.hpp
Original file line number Diff line number Diff line change
@@ -10,6 +10,7 @@ namespace math {
/**
* Return the value of the specified variable.
*
* @tparam T inner type of the fvar
* @param v Variable.
* @return Value of variable.
*/
4 changes: 1 addition & 3 deletions stan/math/fwd/fun/value_of_rec.hpp
Original file line number Diff line number Diff line change
@@ -11,9 +11,7 @@ namespace math {
/**
* Return the value of the specified variable.
*
* T must implement value_of_rec.
*
* @tparam T Scalar type
* @tparam T inner type of the fvar, must implement value_of_rec
* @param v Variable.
* @return Value of variable.
*/
1 change: 1 addition & 0 deletions stan/math/fwd/functor/gradient.hpp
Original file line number Diff line number Diff line change
@@ -29,6 +29,7 @@ namespace math {
* fully unfolded expression for the function applied to the
* argument, independently of dimension.
*
* @tparam T type of the elements in the vector
* @tparam F Type of function
* @param[in] f Function
* @param[in] x Argument to function
4 changes: 2 additions & 2 deletions stan/math/fwd/functor/hessian.hpp
Original file line number Diff line number Diff line change
@@ -29,8 +29,8 @@ namespace math {
* of functions with appropriately general namespace imports that
* eventually depend on functions defined in Stan.
*
* @tparam T Type of underlying scalar
* @tparam F Type of function
* @tparam T type of elements in the vector and matrix
* @tparam F type of function
* @param[in] f Function
* @param[in] x Argument to function
* @param[out] fx Function applied to argument
5 changes: 4 additions & 1 deletion stan/math/fwd/meta/operands_and_partials.hpp
Original file line number Diff line number Diff line change
@@ -11,6 +11,7 @@
namespace stan {
namespace math {
namespace internal {

template <typename Dx>
class ops_partials_edge<Dx, fvar<Dx>> {
public:
@@ -38,7 +39,7 @@ class ops_partials_edge<Dx, fvar<Dx>> {
* primitives, reverse mode, and forward mode variables
* seamlessly.
*
* Conceptually, this class is used when we want to manually calculate
* Conceptually, this class is used when we want to calculate manually
* the derivative of a function and store this manual result on the
* autodiff stack in a sort of "compressed" form. Think of it like an
* easy-to-use interface to rev/core/precomputed_gradients.
@@ -108,6 +109,7 @@ class operands_and_partials<Op1, Op2, Op3, Op4, Op5, fvar<Dx>> {
};

namespace internal {

// Vectorized Univariate
template <typename Dx>
class ops_partials_edge<Dx, std::vector<fvar<Dx>>> {
@@ -219,6 +221,7 @@ class ops_partials_edge<Dx, std::vector<std::vector<fvar<Dx>>>> {
return derivative;
}
};

} // namespace internal
} // namespace math
} // namespace stan
14 changes: 14 additions & 0 deletions stan/math/mix.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#ifndef STAN_MATH_MIX_HPP
#define STAN_MATH_MIX_HPP

#include <stan/math/mix/meta.hpp>

#include <stan/math/prim/arr.hpp>
#include <stan/math/prim/mat.hpp>
#include <stan/math/prim/scal.hpp>
#include <stan/math/fwd.hpp>
#include <stan/math/rev.hpp>
#include <stan/math/mix/fun.hpp>
#include <stan/math/mix/functor.hpp>

#endif
10 changes: 0 additions & 10 deletions stan/math/mix/arr.hpp

This file was deleted.

6 changes: 6 additions & 0 deletions stan/math/mix/fun.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#ifndef STAN_MATH_MIX_FUN_HPP
#define STAN_MATH_MIX_FUN_HPP

#include <stan/math/mix/fun/typedefs.hpp>

#endif
File renamed without changes.
15 changes: 2 additions & 13 deletions stan/math/mix/mat.hpp → stan/math/mix/functor.hpp
Original file line number Diff line number Diff line change
@@ -1,16 +1,5 @@
#ifndef STAN_MATH_MIX_MAT_HPP
#define STAN_MATH_MIX_MAT_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>

#include <stan/math/mix/mat/fun/typedefs.hpp>
#include <stan/math/mix/meta.hpp>

#include <stan/math/rev/mat.hpp>

#include <stan/math/fwd.hpp>

#include <stan/math/prim/mat.hpp>
#ifndef STAN_MATH_MIX_FUNCTOR_HPP
#define STAN_MATH_MIX_FUNCTOR_HPP

#include <stan/math/mix/functor/derivative.hpp>
#include <stan/math/mix/functor/finite_diff_grad_hessian.hpp>
10 changes: 0 additions & 10 deletions stan/math/mix/scal.hpp

This file was deleted.

1 change: 1 addition & 0 deletions stan/math/opencl/kernel_generator.hpp
Original file line number Diff line number Diff line change
@@ -15,6 +15,7 @@
#include <stan/math/opencl/kernel_generator/scalar.hpp>
#include <stan/math/opencl/kernel_generator/binary_operation.hpp>
#include <stan/math/opencl/kernel_generator/unary_function_cl.hpp>
#include <stan/math/opencl/kernel_generator/block.hpp>

#endif
#endif
240 changes: 240 additions & 0 deletions stan/math/opencl/kernel_generator/block.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,240 @@
#ifndef STAN_MATH_OPENCL_KERNEL_GENERATOR_BLOCK_HPP
#define STAN_MATH_OPENCL_KERNEL_GENERATOR_BLOCK_HPP
#ifdef STAN_OPENCL

#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/err/throw_domain_error.hpp>
#include <stan/math/opencl/matrix_cl_view.hpp>
#include <stan/math/opencl/kernel_generator/type_str.hpp>
#include <stan/math/opencl/kernel_generator/name_generator.hpp>
#include <stan/math/opencl/kernel_generator/operation_cl_lhs.hpp>
#include <stan/math/opencl/kernel_generator/as_operation_cl.hpp>
#include <stan/math/opencl/kernel_generator/is_valid_expression.hpp>
#include <set>
#include <string>
#include <type_traits>
#include <utility>

namespace stan {
namespace math {

/**
* Represents submatrix block in kernel generator expressions.
* @tparam Derived derived type
* @tparam T type of argument
*/
template <typename T>
class block_
: public operation_cl_lhs<block_<T>,
typename std::remove_reference_t<T>::Scalar, T> {
public:
using Scalar = typename std::remove_reference_t<T>::Scalar;
using base = operation_cl_lhs<block_<T>, Scalar, T>;
using base::var_name;

protected:
int start_row_, start_col_, rows_, cols_;
using base::arguments_;

public:
/**
* Constructor
* @param a expression
* @param start_row first row of block
* @param start_col first column of a block
* @param rows number of rows in block
* @param cols number of columns in block
*/
block_(T&& a, int start_row, int start_col, int rows, int cols)
: base(std::forward<T>(a)),
start_row_(start_row),
start_col_(start_col),
rows_(rows),
cols_(cols) {
if ((a.rows() != base::dynamic && (start_row + rows) > a.rows())
|| (a.cols() != base::dynamic && (start_col + cols) > a.cols())) {
throw_domain_error("block", "block of \"a\"", " is out of bounds", "");
}
}

/**
* Generates kernel code for this expression.
* @param i row index variable name
* @param j column index variable name
* @param var_name_arg name of the variable in kernel that holds argument to
* this expression
* @return part of kernel with code for this expression
*/
inline kernel_parts generate(const std::string& i, const std::string& j,
const std::string& var_name_arg) const {
kernel_parts res;
res.body
= type_str<Scalar>() + " " + var_name + " = " + var_name_arg + ";\n";
res.args = "int " + var_name + "_i, int " + var_name + "_j, ";
return res;
}

/**
* Sets offset of block to indices of the argument expression
* @param[in, out] i row index
* @param[in, out] j column index
*/
inline void modify_argument_indices(std::string& i, std::string& j) const {
i = "(" + i + " + " + var_name + "_i)";
j = "(" + j + " + " + var_name + "_j)";
}

/**
* Generates kernel code for this and nested expressions if this expression
* appears on the left hand side of an assignment.
* @param i row index variable name
* @param j column index variable name
* @param var_name_arg name of the variable in kernel that holds argument to
* this expression
* @return part of kernel with code for this expression
*/
inline kernel_parts generate_lhs(const std::string& i, const std::string& j,
const std::string& var_name_arg) const {
kernel_parts res;
res.args = "int " + var_name + "_i, int " + var_name + "_j, ";
return res;
}

/**
* Sets kernel arguments for this and nested expressions.
* @param[in,out] generated set of expressions that already set their kernel
* arguments
* @param kernel kernel to set arguments on
* @param[in,out] arg_num consecutive number of the first argument to set.
* This is incremented for each argument set by this function.
*/
inline void set_args(std::set<const operation_cl_base*>& generated,
cl::Kernel& kernel, int& arg_num) const {
if (generated.count(this) == 0) {
generated.insert(this);
std::get<0>(arguments_).set_args(generated, kernel, arg_num);
kernel.setArg(arg_num++, start_row_);
kernel.setArg(arg_num++, start_col_);
}
}

/**
* View of a matrix that would be the result of evaluating this expression.
* @return view
*/
inline matrix_cl_view view() const {
matrix_cl_view view;
if (bottom_diagonal() < 0) {
view = matrix_cl_view::Lower;
} else {
view = matrix_cl_view::Diagonal;
}
if (top_diagonal() > 0) {
view = either(view, matrix_cl_view::Upper);
}
return view;
}

/**
* Number of rows of a matrix that would be the result of evaluating this
* expression.
* @return number of rows
*/
inline int rows() const { return rows_; }

/**
* Number of columns of a matrix that would be the result of evaluating this
* expression.
* @return number of columns
*/
inline int cols() const { return cols_; }

/**
* Sets view of the underlying matrix depending on which part is written.
* @param bottom_diagonal Index of the top sub- or super- diagonal written
* with nonzero elements.
* @param top_diagonal Index of the top sub- or super- diagonal written with
* nonzero elements.
* @param bottom_zero_diagonal Index of the top sub- or super- diagonal
* written with zeros if it ie more extreme than \c bottom_diagonal. Otherwise
* it should be set to equal value as \c bottom_diagonal.
* @param top_zero_diagonal Index of the top sub- or super- diagonal written
* with zeros if it ie more extreme than \c top_diagonal. Otherwise it should
* be set to equal value as \c top_diagonal.
*/
inline void set_view(int bottom_diagonal, int top_diagonal,
int bottom_zero_diagonal, int top_zero_diagonal) const {
int change = start_col_ - start_row_;
std::get<0>(arguments_)
.set_view(bottom_diagonal + change, top_diagonal + change,
bottom_zero_diagonal + change, top_zero_diagonal + change);
}

/**
* Determine index of bottom diagonal written.
* @return number of columns
*/
inline int bottom_diagonal() const {
return std::max(
std::get<0>(arguments_).bottom_diagonal() - start_col_ + start_row_,
1 - rows_);
}

/**
* Determine index of top diagonal written.
* @return number of columns
*/
inline int top_diagonal() const {
return std::min(
std::get<0>(arguments_).top_diagonal() - start_col_ + start_row_,
cols_ - 1);
}

/**
* Evaluates an expression and assigns it to the block.
* @tparam T_expression type of expression
* @param rhs input expression
*/
template <typename T_expression,
typename
= require_all_valid_expressions_and_none_scalar_t<T_expression>>
const block_<T>& operator=(T_expression&& rhs) const {
check_size_match("block.operator=", "Rows of ", "rhs", rhs.rows(),
"rows of ", "*this", this->rows());
check_size_match("block.operator=", "Cols of ", "rhs", rhs.cols(),
"cols of ", "*this", this->cols());
auto expression = as_operation_cl(std::forward<T_expression>(rhs));
if (rows_ * cols_ == 0) {
return *this;
}
expression.evaluate_into(*this);

this->set_view(expression.bottom_diagonal(), expression.top_diagonal(),
1 - expression.rows(), expression.cols() - 1);
return *this;
}
};

/**
* Block of a kernel generator expression.
* @tparam T type of argument
* @param a input argument
* @param start_row first row of block
* @param start_col first column of a block
* @param rows number of rows in block
* @param cols number of columns in block
* @return Block of given expression
*/
template <typename T,
typename = require_all_valid_expressions_and_none_scalar_t<T>>
inline block_<as_operation_cl_t<T>> block(T&& a, int start_row, int start_col,
int rows, int cols) {
return block_<as_operation_cl_t<T>>(as_operation_cl(std::forward<T>(a)),
start_row, start_col, rows, cols);
}

} // namespace math
} // namespace stan

#endif
#endif
45 changes: 45 additions & 0 deletions stan/math/opencl/kernel_generator/load.hpp
Original file line number Diff line number Diff line change
@@ -133,6 +133,51 @@ class load_
*/
inline matrix_cl_view view() const { return a_.view(); }

/**
* Sets view of the matrix depending on which part is written.
* @param bottom_diagonal Index of the top sub- or super- diagonal written
* with nonzero elements.
* @param top_diagonal Index of the top sub- or super- diagonal written with
* nonzero elements.
* @param bottom_zero_diagonal Index of the top sub- or super- diagonal
* written with zeros if it ie more extreme than \c bottom_diagonal. Otherwise
* it should be set to equal value as \c bottom_diagonal.
* @param top_zero_diagonal Index of the top sub- or super- diagonal written
* with zeros if it ie more extreme than \c top_diagonal. Otherwise it should
* be set to equal value as \c top_diagonal.
*/
inline void set_view(int bottom_diagonal, int top_diagonal,
int bottom_zero_diagonal, int top_zero_diagonal) const {
if (bottom_diagonal < 0) {
a_.view(either(a_.view(), matrix_cl_view::Lower));
} else if (bottom_zero_diagonal <= 1 - a_.rows()) {
a_.view(both(a_.view(), matrix_cl_view::Upper));
}
if (top_diagonal > 0) {
a_.view(either(a_.view(), matrix_cl_view::Upper));
} else if (top_zero_diagonal >= a_.cols() - 1) {
a_.view(both(a_.view(), matrix_cl_view::Lower));
}
}

/**
* Determine index of bottom diagonal written.
* @return number of columns
*/
inline int bottom_diagonal() const {
return contains_nonzero(a_.view(), matrix_cl_view::Lower) ? -a_.rows() + 1
: 0;
}

/**
* Determine index of top diagonal written.
* @return number of columns
*/
inline int top_diagonal() const {
return contains_nonzero(a_.view(), matrix_cl_view::Upper) ? a_.cols() - 1
: 0;
}

/**
* Evaluates the expression. \c load_ returns a const reference to stored
* matrix_cl.
44 changes: 39 additions & 5 deletions stan/math/opencl/kernel_generator/operation_cl.hpp
Original file line number Diff line number Diff line change
@@ -135,11 +135,15 @@ class operation_cl : public operation_cl_base {
const std::string& i, const std::string& j) const {
kernel_parts res{};
if (generated.count(this) == 0) {
this->var_name = name_gen.generate();
generated.insert(this);
std::string i_arg = i;
std::string j_arg = j;
derived().modify_argument_indices(i_arg, j_arg);
std::array<kernel_parts, N> args_parts = index_apply<N>([&](auto... Is) {
return std::array<kernel_parts, N>{
std::get<Is>(arguments_)
.get_kernel_parts(generated, name_gen, i, j)...};
.get_kernel_parts(generated, name_gen, i_arg, j_arg)...};
});
res.body
= std::accumulate(args_parts.begin(), args_parts.end(), std::string(),
@@ -151,7 +155,6 @@ class operation_cl : public operation_cl_base {
[](const std::string& a, const kernel_parts& b) {
return a + b.args;
});
this->var_name = name_gen.generate();
kernel_parts my_part = index_apply<N>([&](auto... Is) {
return this->derived().generate(i, j,
std::get<Is>(arguments_).var_name...);
@@ -162,6 +165,16 @@ class operation_cl : public operation_cl_base {
return res;
}

/**
* Does nothing. Derived classes can override this to modify how indices are
* passed to its argument expressions. On input arguments \c i and \c j are
* expressions for indices of this operation. On output they are expressions
* for indices of argument operations.
* @param[in, out] i row index
* @param[in, out] j column index
*/
inline void modify_argument_indices(std::string& i, std::string& j) const {}

/**
* Sets kernel arguments for nested expressions.
* @param[in,out] generated set of expressions that already set their kernel
@@ -206,7 +219,7 @@ class operation_cl : public operation_cl_base {
inline int rows() const {
return index_apply<N>([&](auto... Is) {
// assuming all non-dynamic sizes match
return std::max({get<Is>(arguments_).rows()...});
return std::max({std::get<Is>(arguments_).rows()...});
});
}

@@ -215,11 +228,32 @@ class operation_cl : public operation_cl_base {
* expression. Some subclasses may need to override this.
* @return number of columns
*/
template <size_t... I>
inline int cols() const {
return index_apply<N>([&](auto... Is) {
// assuming all non-dynamic sizes match
return std::max({get<Is>(arguments_).cols()...});
return std::max({std::get<Is>(arguments_).cols()...});
});
}

/**
* Determine index of bottom diagonal written. Some subclasses may need to
* override this.
* @return number of columns
*/
inline int bottom_diagonal() const {
return index_apply<N>([&](auto... Is) {
return std::min({std::get<Is>(arguments_).bottom_diagonal()...});
});
}

/**
* Determine index of top diagonal written. Some subclasses may need to
* override this.
* @return number of columns
*/
inline int top_diagonal() const {
return index_apply<N>([&](auto... Is) {
return std::max({std::get<Is>(arguments_).top_diagonal()...});
});
}
};
52 changes: 48 additions & 4 deletions stan/math/opencl/kernel_generator/operation_cl_lhs.hpp
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@
#define STAN_MATH_OPENCL_KERNEL_GENERATOR_OPERATION_LHS_HPP
#ifdef STAN_OPENCL

#include <stan/math/prim/meta.hpp>
#include <stan/math/opencl/kernel_generator/operation_cl.hpp>
#include <string>
#include <set>
@@ -24,8 +25,11 @@ class operation_cl_lhs : public operation_cl<Derived, Scalar, Args...> {
using base = operation_cl<Derived, Scalar, Args...>;
static constexpr int N = sizeof...(Args);
using base::arguments_;
using base::derived;

public:
using base::operation_cl;

/**
* generates kernel code for this expression if it appears on the left hand
* side of an assigment.
@@ -38,32 +42,72 @@ class operation_cl_lhs : public operation_cl<Derived, Scalar, Args...> {
inline kernel_parts get_kernel_parts_lhs(
std::set<const operation_cl_base*>& generated, name_generator& name_gen,
const std::string& i, const std::string& j) const {
if (generated.count(this) == 0) {
this->var_name = name_gen.generate();
}
std::string i_arg = i;
std::string j_arg = j;
this->derived().modify_argument_indices(i_arg, j_arg);
std::array<kernel_parts, N> args_parts = index_apply<N>([&](auto... Is) {
return std::array<kernel_parts, N>{
std::get<Is>(arguments_)
.get_kernel_parts_lhs(generated, name_gen, i, j)...};
std::get<Is>(this->arguments_)
.get_kernel_parts_lhs(generated, name_gen, i_arg, j_arg)...};
});
kernel_parts res{};
res.body = std::accumulate(
args_parts.begin(), args_parts.end(), std::string(),
[](const std::string& a, const kernel_parts& b) { return a + b.body; });
if (generated.count(this) == 0) {
generated.insert(this);
this->var_name = name_gen.generate();
res.args
= std::accumulate(args_parts.begin(), args_parts.end(), std::string(),
[](const std::string& a, const kernel_parts& b) {
return a + b.args;
});
kernel_parts my_part = index_apply<N>([&](auto... Is) {
return this->derived().generate_lhs(
i, j, std::get<Is>(arguments_).var_name...);
i, j, std::get<Is>(this->arguments_).var_name...);
});
res.body += my_part.body;
res.args += my_part.args;
}
return res;
}

/**
* Sets view of the underlying matrix depending on which part is written.
* @param bottom_diagonal Index of the top sub- or super- diagonal written
* with nonzero elements.
* @param top_diagonal Index of the top sub- or super- diagonal written with
* nonzero elements.
* @param bottom_zero_diagonal Index of the top sub- or super- diagonal
* written with zeros if it ie more extreme than \c bottom_diagonal. Otherwise
* it should be set to equal value as \c bottom_diagonal.
* @param top_zero_diagonal Index of the top sub- or super- diagonal written
* with zeros if it ie more extreme than \c top_diagonal. Otherwise it should
* be set to equal value as \c top_diagonal.
*/
inline void set_view(int bottom_diagonal, int top_diagonal,
int bottom_zero_diagonal, int top_zero_diagonal) const {
index_apply<N>([&](auto... Is) {
(void)std::initializer_list<int>{
(std::get<Is>(this->arguments_)
.set_view(bottom_diagonal, top_diagonal, bottom_zero_diagonal,
top_zero_diagonal),
0)...};
});
}

/**
* Adds write event to any matrices used by nested expressions.
* @param e the event to add
*/
inline void add_write_event(cl::Event& e) const {
index_apply<N>([&](auto... Is) {
(void)std::initializer_list<int>{
(std::get<Is>(this->arguments_).add_write_event(e), 0)...};
});
}
};

} // namespace math
42 changes: 18 additions & 24 deletions stan/math/opencl/kernels/poisson_log_glm_lpmf.hpp
Original file line number Diff line number Diff line change
@@ -32,9 +32,7 @@ static const char* poisson_log_glm_kernel_code = STRINGIFY(
* it is a scalar)
* @param is_alpha_vector 0 or 1 - whether alpha is a vector (alternatively
* it is a scalar)
* @param need_logp1 interpreted as boolean - whether first part of
* logp_global needs to be computed
* @param need_logp2 interpreted as boolean - whether second part of
* @param need_logp interpreted as boolean - whether first part of
* logp_global needs to be computed
*/
__kernel void poisson_log_glm(
@@ -43,7 +41,7 @@ static const char* poisson_log_glm_kernel_code = STRINGIFY(
const __global int* y_global, const __global double* x,
const __global double* alpha, const __global double* beta, const int N,
const int M, const int is_y_vector, const int is_alpha_vector,
const int need_logp1, const int need_logp2) {
const int need_logp) {
const int gid = get_global_id(0);
const int lid = get_local_id(0);
const int lsize = get_local_size(0);
@@ -68,12 +66,10 @@ static const char* poisson_log_glm_kernel_code = STRINGIFY(
// this signals that an exception must be raised
theta_derivative = NAN;
}
if (need_logp1) {
if (need_logp) {
logp = -lgamma(y + 1);
}
if (need_logp2) {
logp += y * theta - exp_theta;
}
logp += y * theta - exp_theta;
theta_derivative_global[gid] = theta_derivative;
}
// Sum theta_derivative, calculated by different threads.
@@ -94,23 +90,21 @@ static const char* poisson_log_glm_kernel_code = STRINGIFY(
theta_derivative_sum[wg_id] = local_storage[0];
}

if (need_logp1 || need_logp2) {
// Sum logp, calculated by different threads.
barrier(CLK_LOCAL_MEM_FENCE);
local_storage[lid] = logp;
barrier(CLK_LOCAL_MEM_FENCE);
for (int step = lsize / REDUCTION_STEP_SIZE; step > 0;
step /= REDUCTION_STEP_SIZE) {
if (lid < step) {
for (int i = 1; i < REDUCTION_STEP_SIZE; i++) {
local_storage[lid] += local_storage[lid + step * i];
}
// Sum logp, calculated by different threads.
barrier(CLK_LOCAL_MEM_FENCE);
local_storage[lid] = logp;
barrier(CLK_LOCAL_MEM_FENCE);
for (int step = lsize / REDUCTION_STEP_SIZE; step > 0;
step /= REDUCTION_STEP_SIZE) {
if (lid < step) {
for (int i = 1; i < REDUCTION_STEP_SIZE; i++) {
local_storage[lid] += local_storage[lid + step * i];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if (lid == 0) {
logp_global[wg_id] = local_storage[0];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if (lid == 0) {
logp_global[wg_id] = local_storage[0];
}
}
// \cond
@@ -122,7 +116,7 @@ static const char* poisson_log_glm_kernel_code = STRINGIFY(
* poisson_log_glm_lpmf() \endlink
*/
const kernel_cl<out_buffer, out_buffer, out_buffer, in_buffer, in_buffer,
in_buffer, in_buffer, int, int, int, int, int, int>
in_buffer, in_buffer, int, int, int, int, int>
poisson_log_glm("poisson_log_glm", {poisson_log_glm_kernel_code},
{{"REDUCTION_STEP_SIZE", 4}, {"LOCAL_SIZE_", 64}});

84 changes: 0 additions & 84 deletions stan/math/opencl/kernels/sub_block.hpp

This file was deleted.

6 changes: 3 additions & 3 deletions stan/math/opencl/opencl_context.hpp
Original file line number Diff line number Diff line change
@@ -166,7 +166,7 @@ class opencl_context_base {
int cholesky_min_L11_size = 256;
int cholesky_partition = 4;
int cholesky_size_worth_transfer = 1250;
// Used in math/rev/mat/fun/cholesky_decompose
// Used in math/rev/fun/cholesky_decompose
int cholesky_rev_min_block_size = 512;
int cholesky_rev_block_partition = 8;
// used in math/opencl/multiply
@@ -175,10 +175,10 @@ class opencl_context_base {
double gp_exp_quad_cov_complex = 1'000'000;
double gp_exp_quad_cov_simple = 1'250;
// used in math/prim/mat/fun/multiply
// and math/rev/mat/fun/multiply
// and math/rev/fun/multiply
int multiply_dim_prod_worth_transfer = 2000000;
// used in math/prim/mat/fun/mdivide_left_tri
// and math/rev/mat/fun/mdivide_left_tri
// and math/rev/fun/mdivide_left_tri
int tri_inverse_size_worth_transfer = 100;
} tuning_opts_;

16 changes: 6 additions & 10 deletions stan/math/opencl/prim/poisson_log_glm_lpmf.hpp
Original file line number Diff line number Diff line change
@@ -90,27 +90,23 @@ return_type_t<T_alpha, T_beta> poisson_log_glm_lpmf(

matrix_cl<double> theta_derivative_cl(N, 1);
matrix_cl<double> theta_derivative_sum_cl(wgs, 1);
const bool need_logp1 = include_summand<propto>::value;
const bool need_logp2 = include_summand<propto, T_partials_return>::value;
matrix_cl<double> logp_cl((need_logp1 || need_logp2) ? wgs : 0, 1);
const bool need_logp = include_summand<propto>::value;
matrix_cl<double> logp_cl(wgs, 1);

try {
opencl_kernels::poisson_log_glm(
cl::NDRange(local_size * wgs), cl::NDRange(local_size),
theta_derivative_cl, theta_derivative_sum_cl, logp_cl, y_cl, x_cl,
alpha_cl, beta_cl, N, M, y_cl.size() != 1, size(alpha) != 1, need_logp1,
need_logp2);
alpha_cl, beta_cl, N, M, y_cl.size() != 1, size(alpha) != 1, need_logp);
} catch (const cl::Error& e) {
check_opencl_error(function, e);
}
Matrix<T_partials_return, Dynamic, 1> theta_derivative_partial_sum(wgs);
theta_derivative_partial_sum = from_matrix_cl(theta_derivative_sum_cl);
double theta_derivative_sum = sum(theta_derivative_partial_sum);
if (need_logp1 || need_logp2) {
Eigen::VectorXd logp_partial_sum(wgs);
logp_partial_sum = from_matrix_cl(logp_cl);
logp += sum(logp_partial_sum);
}
Eigen::VectorXd logp_partial_sum(wgs);
logp_partial_sum = from_matrix_cl(logp_cl);
logp += sum(logp_partial_sum);
if (!std::isfinite(theta_derivative_sum)) {
check_nonnegative(function, "Vector of dependent variables",
from_matrix_cl(y_cl));
62 changes: 4 additions & 58 deletions stan/math/opencl/sub_block.hpp
Original file line number Diff line number Diff line change
@@ -6,8 +6,9 @@
#include <stan/math/prim/err.hpp>
#include <stan/math/opencl/opencl_context.hpp>
#include <stan/math/opencl/matrix_cl_view.hpp>
#include <stan/math/opencl/kernels/sub_block.hpp>
#include <stan/math/opencl/matrix_cl.hpp>
#include <stan/math/opencl/kernel_generator/block.hpp>
#include <stan/math/prim/err/throw_domain_error.hpp>
#include <CL/cl2.hpp>
#include <vector>
#include <algorithm>
@@ -29,63 +30,8 @@ namespace math {
template <typename T>
inline void matrix_cl<T, require_arithmetic_t<T>>::sub_block(
const matrix_cl<T, require_arithmetic_t<T>>& A, size_t A_i, size_t A_j,
size_t this_i, size_t this_j, size_t nrows, size_t ncols) try {
if (nrows == 0 || ncols == 0) {
return;
}
if ((A_i + nrows) > A.rows() || (A_j + ncols) > A.cols()
|| (this_i + nrows) > this->rows() || (this_j + ncols) > this->cols()) {
throw_domain_error("sub_block", "submatrix in *this", " is out of bounds",
"");
}
cl::CommandQueue cmdQueue = opencl_context.queue();
if (A.view() == matrix_cl_view::Entire) {
std::array<size_t, 3> src_offset({A_i * sizeof(double), A_j, 0});
std::array<size_t, 3> dst_offset({this_i * sizeof(double), this_j, 0});
std::array<size_t, 3> size({nrows * sizeof(double), ncols, 1});
std::vector<cl::Event> kernel_events
= vec_concat(A.write_events(), this->read_write_events());
cl::Event copy_event;
cmdQueue.enqueueCopyBufferRect(A.buffer(), this->buffer(), src_offset,
dst_offset, size, A.rows() * sizeof(double),
A.rows() * A.cols() * sizeof(double),
sizeof(double) * this->rows(),
this->rows() * this->cols() * sizeof(double),
&kernel_events, &copy_event);
A.add_read_event(copy_event);
this->add_write_event(copy_event);
} else {
opencl_kernels::sub_block(cl::NDRange(nrows, ncols), A, *this, A_i, A_j,
this_i, this_j, nrows, ncols, A.rows(), A.cols(),
this->rows(), this->cols(), A.view());
}
// calculation of extreme sub- and super- diagonal written
const int diag_in_copy = A_i - A_j;
const int copy_low = contains_nonzero(A.view(), matrix_cl_view::Lower)
? 1 - nrows
: diag_in_copy;
const int copy_high = contains_nonzero(A.view(), matrix_cl_view::Upper)
? ncols - 1
: diag_in_copy;
const int start = this_j - this_i;

if (start + copy_low < 0) {
this->view_ = either(this->view_, matrix_cl_view::Lower);
} else if (this_i <= 1 && this_j == 0 && nrows + this_i >= rows_
&& ncols >= std::min(rows_, cols_) - 1
&& !contains_nonzero(A.view_, matrix_cl_view::Lower)) {
this->view_ = both(this->view_, matrix_cl_view::Upper);
}

if (start + copy_high > 0) {
this->view_ = either(this->view_, matrix_cl_view::Upper);
} else if (this_i == 0 && this_j <= 1 && ncols + this_j >= cols_
&& nrows >= std::min(rows_, cols_) - 1
&& !contains_nonzero(A.view_, matrix_cl_view::Upper)) {
this->view_ = both(this->view_, matrix_cl_view::Lower);
}
} catch (const cl::Error& e) {
check_opencl_error("copy_submatrix", e);
size_t this_i, size_t this_j, size_t nrows, size_t ncols) {
block(*this, this_i, this_j, nrows, ncols) = block(A, A_i, A_j, nrows, ncols);
}

} // namespace math
1 change: 0 additions & 1 deletion stan/math/prim/arr.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,6 @@
#include <stan/math/prim/arr/fun/inverse_softmax.hpp>
#include <stan/math/prim/arr/fun/promote_elements.hpp>
#include <stan/math/prim/arr/fun/promote_scalar.hpp>
#include <stan/math/prim/arr/fun/promote_scalar_type.hpp>
#include <stan/math/prim/arr/fun/rep_array.hpp>
#include <stan/math/prim/arr/fun/scaled_add.hpp>
#include <stan/math/prim/arr/fun/sort_asc.hpp>
1 change: 0 additions & 1 deletion stan/math/prim/arr/fun/promote_scalar.hpp
Original file line number Diff line number Diff line change
@@ -3,7 +3,6 @@

#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/promote_scalar.hpp>
#include <stan/math/prim/scal/fun/promote_scalar_type.hpp>
#include <vector>

namespace stan {
29 changes: 0 additions & 29 deletions stan/math/prim/arr/fun/promote_scalar_type.hpp

This file was deleted.

1 change: 0 additions & 1 deletion stan/math/prim/mat.hpp
Original file line number Diff line number Diff line change
@@ -184,7 +184,6 @@
#include <stan/math/prim/mat/fun/promote_common.hpp>
#include <stan/math/prim/mat/fun/promote_elements.hpp>
#include <stan/math/prim/mat/fun/promote_scalar.hpp>
#include <stan/math/prim/mat/fun/promote_scalar_type.hpp>
#include <stan/math/prim/mat/fun/qr_Q.hpp>
#include <stan/math/prim/mat/fun/qr_R.hpp>
#include <stan/math/prim/mat/fun/qr_thin_Q.hpp>
4 changes: 3 additions & 1 deletion stan/math/prim/mat/fun/LDLT_factor.hpp
Original file line number Diff line number Diff line change
@@ -3,6 +3,8 @@

#include <stan/math/prim/err/check_square.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/mat/fun/log.hpp>
#include <stan/math/prim/mat/fun/sum.hpp>
#include <stan/math/prim/scal/fun/is_nan.hpp>
#include <boost/shared_ptr.hpp>

@@ -96,7 +98,7 @@ class LDLT_factor {
return true;
}

inline T log_abs_det() const { return ldltP_->vectorD().array().log().sum(); }
inline T log_abs_det() const { return sum(log(ldltP_->vectorD())); }

inline void inverse(matrix_t& invA) const {
invA.setIdentity(N_);
2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/Phi.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct Phi_fun {
* @return Unit normal CDF of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<Phi_fun, T>::return_t Phi(const T& x) {
inline auto Phi(const T& x) {
return apply_scalar_unary<Phi_fun, T>::apply(x);
}

3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/Phi_approx.hpp
Original file line number Diff line number Diff line change
@@ -36,8 +36,7 @@ struct Phi_approx_fun {
* @return elementwise Phi_approx of container elements
*/
template <typename T>
inline typename apply_scalar_unary<Phi_approx_fun, T>::return_t Phi_approx(
const T& x) {
inline auto Phi_approx(const T& x) {
return apply_scalar_unary<Phi_approx_fun, T>::apply(x);
}

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/acos.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_ACOS_HPP
#define STAN_MATH_PRIM_MAT_FUN_ACOS_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct acos_fun {
* @param x container
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename T>
inline typename apply_scalar_unary<acos_fun, T>::return_t acos(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto acos(const T& x) {
return apply_scalar_unary<acos_fun, T>::apply(x);
}

/**
* Version of acos() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto acos(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().acos().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/acosh.hpp
Original file line number Diff line number Diff line change
@@ -35,7 +35,7 @@ struct acosh_fun {
* @return Elementwise acosh of members of container.
*/
template <typename T>
inline typename apply_scalar_unary<acosh_fun, T>::return_t acosh(const T& x) {
inline auto acosh(const T& x) {
return apply_scalar_unary<acosh_fun, T>::apply(x);
}

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/asin.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_ASIN_HPP
#define STAN_MATH_PRIM_MAT_FUN_ASIN_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct asin_fun {
* @param x container
* @return Arcsine of each variable in the container, in radians.
*/
template <typename T>
inline typename apply_scalar_unary<asin_fun, T>::return_t asin(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto asin(const T& x) {
return apply_scalar_unary<asin_fun, T>::apply(x);
}

/**
* Version of asin() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arcsine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto asin(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().asin().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/asinh.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct asinh_fun {
* @return Inverse hyperbolic sine of each value in the container.
*/
template <typename T>
inline typename apply_scalar_unary<asinh_fun, T>::return_t asinh(const T& x) {
inline auto asinh(const T& x) {
return apply_scalar_unary<asinh_fun, T>::apply(x);
}

16 changes: 15 additions & 1 deletion stan/math/prim/mat/fun/atan.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_ATAN_HPP
#define STAN_MATH_PRIM_MAT_FUN_ATAN_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct atan_fun {
* @param x container
* @return Arctan of each value in x, in radians.
*/
template <typename T>
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline typename apply_scalar_unary<atan_fun, T>::return_t atan(const T& x) {
return apply_scalar_unary<atan_fun, T>::apply(x);
}

/**
* Version of atan() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Elementwise atan of members of container.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto atan(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().atan().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/atanh.hpp
Original file line number Diff line number Diff line change
@@ -35,7 +35,7 @@ struct atanh_fun {
* @return Elementwise atanh of members of container.
*/
template <typename T>
inline typename apply_scalar_unary<atanh_fun, T>::return_t atanh(const T& x) {
inline auto atanh(const T& x) {
return apply_scalar_unary<atanh_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/cbrt.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct cbrt_fun {
* @return Cube root of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<cbrt_fun, T>::return_t cbrt(const T& x) {
inline auto cbrt(const T& x) {
return apply_scalar_unary<cbrt_fun, T>::apply(x);
}

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/ceil.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_CEIL_HPP
#define STAN_MATH_PRIM_MAT_FUN_CEIL_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct ceil_fun {
* @param x container
* @return Least integer >= each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<ceil_fun, T>::return_t ceil(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto ceil(const T& x) {
return apply_scalar_unary<ceil_fun, T>::apply(x);
}

/**
* Version of ceil() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Least integer >= each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto ceil(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().ceil().matrix();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/cos.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_COS_HPP
#define STAN_MATH_PRIM_MAT_FUN_COS_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct cos_fun {
* @param x angles in radians
* @return Cosine of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<cos_fun, T>::return_t cos(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto cos(const T& x) {
return apply_scalar_unary<cos_fun, T>::apply(x);
}

/**
* Version of cos() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Cosine of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto cos(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().cos().matrix();
}

} // namespace math
} // namespace stan

16 changes: 15 additions & 1 deletion stan/math/prim/mat/fun/cosh.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_COSH_HPP
#define STAN_MATH_PRIM_MAT_FUN_COSH_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct cosh_fun {
* @param x angles in radians
* @return Hyberbolic cosine of x.
*/
template <typename T>
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline typename apply_scalar_unary<cosh_fun, T>::return_t cosh(const T& x) {
return apply_scalar_unary<cosh_fun, T>::apply(x);
}

/**
* Version of cosh() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Hyberbolic cosine of x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto cosh(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().cosh().matrix();
}

} // namespace math
} // namespace stan

3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/digamma.hpp
Original file line number Diff line number Diff line change
@@ -31,8 +31,7 @@ struct digamma_fun {
* @throw std::domain_error if any value is a negative integer or 0
*/
template <typename T>
inline typename apply_scalar_unary<digamma_fun, T>::return_t digamma(
const T& x) {
inline auto digamma(const T& x) {
return apply_scalar_unary<digamma_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/erf.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct erf_fun {
* @return Error function applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<erf_fun, T>::return_t erf(const T& x) {
inline auto erf(const T& x) {
return apply_scalar_unary<erf_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/erfc.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct erfc_fun {
* @return Complementary error function applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<erfc_fun, T>::return_t erfc(const T& x) {
inline auto erfc(const T& x) {
return apply_scalar_unary<erfc_fun, T>::apply(x);
}

20 changes: 17 additions & 3 deletions stan/math/prim/mat/fun/exp.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_EXP_HPP
#define STAN_MATH_PRIM_MAT_FUN_EXP_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/exp.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

namespace stan {
@@ -36,11 +38,23 @@ struct exp_fun {
* @param[in] x container
* @return Elementwise application of exponentiation to the argument.
*/
template <typename T>
inline typename apply_scalar_unary<exp_fun, T>::return_t exp(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto exp(const T& x) {
return apply_scalar_unary<exp_fun, T>::apply(x);
}

/**
* Version of exp() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Elementwise application of exponentiation to the argument.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto exp(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().exp().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/exp2.hpp
Original file line number Diff line number Diff line change
@@ -36,7 +36,7 @@ struct exp2_fun {
* @return Elementwise exp2 of members of container.
*/
template <typename T, typename = require_vector_like_t<T>>
inline typename apply_scalar_unary<exp2_fun, T>::return_t exp2(const T& x) {
inline auto exp2(const T& x) {
return apply_scalar_unary<exp2_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/expm1.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct expm1_fun {
* @return Natural exponential of each value in x minus one.
*/
template <typename T>
inline typename apply_scalar_unary<expm1_fun, T>::return_t expm1(const T& x) {
inline auto expm1(const T& x) {
return apply_scalar_unary<expm1_fun, T>::apply(x);
}

28 changes: 27 additions & 1 deletion stan/math/prim/mat/fun/fabs.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_FABS_HPP
#define STAN_MATH_PRIM_MAT_FUN_FABS_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/meta.hpp>
#include <cmath>

namespace stan {
@@ -29,11 +31,35 @@ struct fabs_fun {
* @param x container
* @return Absolute value of each value in x.
*/
template <typename T>
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline typename apply_scalar_unary<fabs_fun, T>::return_t fabs(const T& x) {
return apply_scalar_unary<fabs_fun, T>::apply(x);
}

/**
* Version of fabs() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Absolute value of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto fabs(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().abs().matrix();
}

/**
* Version of fabs() that accepts Eigen Array or array expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Absolute value of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto fabs(const Eigen::ArrayBase<Derived>& x) {
return x.derived().abs();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/floor.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_FLOOR_HPP
#define STAN_MATH_PRIM_MAT_FUN_FLOOR_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/meta.hpp>
#include <cmath>

namespace stan {
@@ -29,11 +31,23 @@ struct floor_fun {
* @param x container
* @return Greatest integer <= each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<floor_fun, T>::return_t floor(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto floor(const T& x) {
return apply_scalar_unary<floor_fun, T>::apply(x);
}

/**
* Version of floor() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Greatest integer <= each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto floor(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().floor().matrix();
}

} // namespace math
} // namespace stan

32 changes: 29 additions & 3 deletions stan/math/prim/mat/fun/inv.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_INV_HPP
#define STAN_MATH_PRIM_MAT_FUN_INV_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/inv.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {
@@ -28,11 +30,35 @@ struct inv_fun {
* @param x container
* @return 1 divided by each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<inv_fun, T>::return_t inv(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto inv(const T& x) {
return apply_scalar_unary<inv_fun, T>::apply(x);
}

/**
* Version of inv() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return 1 divided by each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().inverse().matrix();
}

/**
* Version of inv() that accepts Eigen Array or array expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return 1 divided by each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv(const Eigen::ArrayBase<Derived>& x) {
return x.derived().inverse();
}

} // namespace math
} // namespace stan

3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/inv_Phi.hpp
Original file line number Diff line number Diff line change
@@ -31,8 +31,7 @@ struct inv_Phi_fun {
* @throw std::domain_error if any value is not between 0 and 1.
*/
template <typename T>
inline typename apply_scalar_unary<inv_Phi_fun, T>::return_t inv_Phi(
const T& x) {
inline auto inv_Phi(const T& x) {
return apply_scalar_unary<inv_Phi_fun, T>::apply(x);
}

32 changes: 28 additions & 4 deletions stan/math/prim/mat/fun/inv_cloglog.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_INV_CLOGLOG_HPP
#define STAN_MATH_PRIM_MAT_FUN_INV_CLOGLOG_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/exp.hpp>
#include <stan/math/prim/scal/fun/inv_cloglog.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {
@@ -28,12 +29,35 @@ struct inv_cloglog_fun {
* @param x container
* @return 1 - exp(-exp()) applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<inv_cloglog_fun, T>::return_t inv_cloglog(
const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto inv_cloglog(const T& x) {
return apply_scalar_unary<inv_cloglog_fun, T>::apply(x);
}

/**
* Version of inv_cloglog() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return 1 - exp(-exp()) applied to each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv_cloglog(const Eigen::MatrixBase<Derived>& x) {
return (1 - exp(-exp(x.derived().array()))).matrix();
}

/**
* Version of inv_cloglog() that accepts Eigen Array or array expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return 1 - exp(-exp()) applied to each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv_cloglog(const Eigen::ArrayBase<Derived>& x) {
return 1 - exp(-exp(x.derived()));
}

} // namespace math
} // namespace stan

6 changes: 4 additions & 2 deletions stan/math/prim/mat/fun/inv_logit.hpp
Original file line number Diff line number Diff line change
@@ -29,11 +29,13 @@ struct inv_logit_fun {
* @return Inverse logit applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<inv_logit_fun, T>::return_t inv_logit(
const T& x) {
inline auto inv_logit(const T& x) {
return apply_scalar_unary<inv_logit_fun, T>::apply(x);
}

// TODO(Tadej): Eigen is introducing their implementation logistic() of this
// in 3.4. Use that once we switch to Eigen 3.4

} // namespace math
} // namespace stan

31 changes: 28 additions & 3 deletions stan/math/prim/mat/fun/inv_sqrt.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_INV_SQRT_HPP
#define STAN_MATH_PRIM_MAT_FUN_INV_SQRT_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/scal/fun/inv_sqrt.hpp>

@@ -28,12 +30,35 @@ struct inv_sqrt_fun {
* @param x container
* @return 1 / sqrt of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<inv_sqrt_fun, T>::return_t inv_sqrt(
const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto inv_sqrt(const T& x) {
return apply_scalar_unary<inv_sqrt_fun, T>::apply(x);
}

/**
* Version of inv_sqrt() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv_sqrt(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().rsqrt().matrix();
}

/**
* Version of inv_sqrt() that accepts Eigen Array or array expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto inv_sqrt(const Eigen::ArrayBase<Derived>& x) {
return x.derived().rsqrt();
}

} // namespace math
} // namespace stan

23 changes: 5 additions & 18 deletions stan/math/prim/mat/fun/inv_square.hpp
Original file line number Diff line number Diff line change
@@ -1,26 +1,14 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_INV_SQUARE_HPP
#define STAN_MATH_PRIM_MAT_FUN_INV_SQUARE_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/inv.hpp>
#include <stan/math/prim/mat/fun/square.hpp>
#include <stan/math/prim/scal/fun/inv_square.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {

/**
* Structure to wrap inv_square() so that it can be vectorized.
*
* @tparam T type of variable
* @param x variable
* @return 1 / x squared.
*/
struct inv_square_fun {
template <typename T>
static inline T fun(const T& x) {
return inv_square(x);
}
};

/**
* Vectorized version of inv_square().
*
@@ -29,9 +17,8 @@ struct inv_square_fun {
* @return 1 / the square of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<inv_square_fun, T>::return_t inv_square(
const T& x) {
return apply_scalar_unary<inv_square_fun, T>::apply(x);
inline auto inv_square(const T& x) {
return inv(square(x));
}

} // namespace math
2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/lgamma.hpp
Original file line number Diff line number Diff line change
@@ -32,7 +32,7 @@ struct lgamma_fun {
* @throw std::domain_error if any value is a negative integer or 0.
*/
template <typename T>
inline typename apply_scalar_unary<lgamma_fun, T>::return_t lgamma(const T& x) {
inline auto lgamma(const T& x) {
return apply_scalar_unary<lgamma_fun, T>::apply(x);
}

20 changes: 17 additions & 3 deletions stan/math/prim/mat/fun/log.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_LOG_HPP
#define STAN_MATH_PRIM_MAT_FUN_LOG_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/log.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

namespace stan {
@@ -35,11 +37,23 @@ struct log_fun {
* @param[in] x container
* @return Elementwise application of natural log to the argument.
*/
template <typename T>
inline typename apply_scalar_unary<log_fun, T>::return_t log(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto log(const T& x) {
return apply_scalar_unary<log_fun, T>::apply(x);
}

/**
* Version of log() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto log(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().log().matrix();
}

} // namespace math
} // namespace stan
#endif
18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/log10.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_LOG10_HPP
#define STAN_MATH_PRIM_MAT_FUN_LOG10_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/meta.hpp>
#include <cmath>

namespace stan {
@@ -29,11 +31,23 @@ struct log10_fun {
* @param x container
* @return Log base-10 applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<log10_fun, T>::return_t log10(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto log10(const T& x) {
return apply_scalar_unary<log10_fun, T>::apply(x);
}

/**
* Version of log10() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Arc cosine of each variable in the container, in radians.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto log10(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().log10().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/log1m.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ struct log1m_fun {
* @return Natural log of 1 minus each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<log1m_fun, T>::return_t log1m(const T& x) {
inline auto log1m(const T& x) {
return apply_scalar_unary<log1m_fun, T>::apply(x);
}

3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/log1m_exp.hpp
Original file line number Diff line number Diff line change
@@ -29,8 +29,7 @@ struct log1m_exp_fun {
* @return Natural log of (1 - exp()) applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<log1m_exp_fun, T>::return_t log1m_exp(
const T& x) {
inline auto log1m_exp(const T& x) {
return apply_scalar_unary<log1m_exp_fun, T>::apply(x);
}

5 changes: 3 additions & 2 deletions stan/math/prim/mat/fun/log1p.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_LOG1P_HPP
#define STAN_MATH_PRIM_MAT_FUN_LOG1P_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/scal/fun/log1p.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {
@@ -35,7 +36,7 @@ struct log1p_fun {
* @return Elementwise log1p of members of container.
*/
template <typename T>
inline typename apply_scalar_unary<log1p_fun, T>::return_t log1p(const T& x) {
inline auto log1p(const T& x) {
return apply_scalar_unary<log1p_fun, T>::apply(x);
}

3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/log1p_exp.hpp
Original file line number Diff line number Diff line change
@@ -29,8 +29,7 @@ struct log1p_exp_fun {
* @return Natural log of (1 + exp()) applied to each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<log1p_exp_fun, T>::return_t log1p_exp(
const T& x) {
inline auto log1p_exp(const T& x) {
return apply_scalar_unary<log1p_exp_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/log2.hpp
Original file line number Diff line number Diff line change
@@ -36,7 +36,7 @@ struct log2_fun {
* @return elementwise log2 of container elements
*/
template <typename T, typename = require_vector_like_t<T>>
inline typename apply_scalar_unary<log2_fun, T>::return_t log2(const T& x) {
inline auto log2(const T& x) {
return apply_scalar_unary<log2_fun, T>::apply(x);
}

4 changes: 3 additions & 1 deletion stan/math/prim/mat/fun/log_determinant_spd.hpp
Original file line number Diff line number Diff line change
@@ -3,6 +3,8 @@

#include <stan/math/prim/err.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/mat/fun/log.hpp>
#include <stan/math/prim/mat/fun/sum.hpp>
#include <cmath>

namespace stan {
@@ -26,7 +28,7 @@ inline T log_determinant_spd(const Eigen::Matrix<T, R, C>& m) {
if (m.size() == 0)
return 0;

return m.ldlt().vectorD().array().log().sum();
return sum(log(m.ldlt().vectorD().array()));
}

} // namespace math
3 changes: 1 addition & 2 deletions stan/math/prim/mat/fun/log_inv_logit.hpp
Original file line number Diff line number Diff line change
@@ -36,8 +36,7 @@ struct log_inv_logit_fun {
* @return elementwise log_inv_logit of members of container
*/
template <typename T>
inline typename apply_scalar_unary<log_inv_logit_fun, T>::return_t
log_inv_logit(const T& x) {
inline auto log_inv_logit(const T& x) {
return apply_scalar_unary<log_inv_logit_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/logit.hpp
Original file line number Diff line number Diff line change
@@ -35,7 +35,7 @@ struct logit_fun {
* @return elementwise logit of container elements
*/
template <typename T>
inline typename apply_scalar_unary<logit_fun, T>::return_t logit(const T& x) {
inline auto logit(const T& x) {
return apply_scalar_unary<logit_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/minus.hpp
Original file line number Diff line number Diff line change
@@ -12,7 +12,7 @@ namespace math {
* @return Negation of subtrahend.
*/
template <typename T>
inline T minus(const T& x) {
inline auto minus(const T& x) {
return -x;
}

78 changes: 6 additions & 72 deletions stan/math/prim/mat/fun/promote_scalar.hpp
Original file line number Diff line number Diff line change
@@ -2,22 +2,22 @@
#define STAN_MATH_PRIM_MAT_FUN_PROMOTE_SCALAR_HPP

#include <stan/math/prim/scal/fun/promote_scalar.hpp>
#include <stan/math/prim/mat/fun/promote_scalar_type.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>

namespace stan {
namespace math {

/**
* Struct to hold static function for promoting underlying scalar
* types. This specialization is for Eigen matrix inputs.
* types. This specialization is for Eigen inputs.
*
* @tparam T return scalar type
* @tparam S input matrix scalar type for static nested function, which must
* have a scalar type assignable to T
* @tparam S input matrix or vector or row vector type for static nested
* function, which must have a scalar type assignable to T
*/
template <typename T, typename S>
struct promote_scalar_struct<T, Eigen::Matrix<S, -1, -1> > {
struct promote_scalar_struct<T, S, require_eigen_t<S>> {
/**
* Return the matrix consisting of the recursive promotion of
* the elements of the input matrix to the scalar type specified
@@ -26,73 +26,7 @@ struct promote_scalar_struct<T, Eigen::Matrix<S, -1, -1> > {
* @param x input matrix.
* @return matrix with values promoted from input vector.
*/
static Eigen::Matrix<typename promote_scalar_type<T, S>::type, -1, -1> apply(
const Eigen::Matrix<S, -1, -1>& x) {
Eigen::Matrix<typename promote_scalar_type<T, S>::type, -1, -1> y(x.rows(),
x.cols());
for (int i = 0; i < x.size(); ++i) {
y(i) = promote_scalar_struct<T, S>::apply(x(i));
}
return y;
}
};

/**
* Struct to hold static function for promoting underlying scalar
* types. This specialization is for Eigen column vector inputs.
*
* @tparam T return scalar type
* @tparam S input matrix scalar type for static nested function, which must
* have a scalar type assignable to T
*/
template <typename T, typename S>
struct promote_scalar_struct<T, Eigen::Matrix<S, 1, -1> > {
/**
* Return the column vector consisting of the recursive promotion of
* the elements of the input column vector to the scalar type specified
* by the return template parameter.
*
* @param x input column vector.
* @return column vector with values promoted from input vector.
*/
static Eigen::Matrix<typename promote_scalar_type<T, S>::type, 1, -1> apply(
const Eigen::Matrix<S, 1, -1>& x) {
Eigen::Matrix<typename promote_scalar_type<T, S>::type, 1, -1> y(x.rows(),
x.cols());
for (int i = 0; i < x.size(); ++i) {
y(i) = promote_scalar_struct<T, S>::apply(x(i));
}
return y;
}
};

/**
* Struct to hold static function for promoting underlying scalar
* types. This specialization is for Eigen row vector inputs.
*
* @tparam T return scalar type
* @tparam S input matrix scalar type for static nested function, which must
* have a scalar type assignable to T
*/
template <typename T, typename S>
struct promote_scalar_struct<T, Eigen::Matrix<S, -1, 1> > {
/**
* Return the row vector consisting of the recursive promotion of
* the elements of the input row vector to the scalar type specified
* by the return template parameter.
*
* @param x input row vector.
* @return row vector with values promoted from input vector.
*/
static Eigen::Matrix<typename promote_scalar_type<T, S>::type, -1, 1> apply(
const Eigen::Matrix<S, -1, 1>& x) {
Eigen::Matrix<typename promote_scalar_type<T, S>::type, -1, 1> y(x.rows(),
x.cols());
for (int i = 0; i < x.size(); ++i) {
y(i) = promote_scalar_struct<T, S>::apply(x(i));
}
return y;
}
static auto apply(const S& x) { return x.template cast<T>(); }
};

} // namespace math
69 changes: 0 additions & 69 deletions stan/math/prim/mat/fun/promote_scalar_type.hpp

This file was deleted.

4 changes: 3 additions & 1 deletion stan/math/prim/mat/fun/read_cov_L.hpp
Original file line number Diff line number Diff line change
@@ -2,6 +2,8 @@
#define STAN_MATH_PRIM_MAT_FUN_READ_COV_L_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/mat/fun/log.hpp>
#include <stan/math/prim/mat/fun/sum.hpp>
#include <stan/math/prim/scal/fun/constants.hpp>

namespace stan {
@@ -24,7 +26,7 @@ Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> read_cov_L(
const Eigen::Array<T, Eigen::Dynamic, 1>& sds, T& log_prob) {
size_t K = sds.rows();
// adjust due to transformation from correlations to covariances
log_prob += (sds.log().sum() + LOG_TWO) * K;
log_prob += (sum(log(sds)) + LOG_TWO) * K;
return sds.matrix().asDiagonal() * read_corr_L(CPCs, K, log_prob);
}

20 changes: 17 additions & 3 deletions stan/math/prim/mat/fun/round.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_ROUND_HPP
#define STAN_MATH_PRIM_MAT_FUN_ROUND_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/round.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {
@@ -28,11 +30,23 @@ struct round_fun {
* @param x container
* @return Rounded value of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<round_fun, T>::return_t round(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto round(const T& x) {
return apply_scalar_unary<round_fun, T>::apply(x);
}

/**
* Version of round() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Rounded value of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto round(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().round().matrix();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/sin.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_SIN_HPP
#define STAN_MATH_PRIM_MAT_FUN_SIN_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct sin_fun {
* @param x angles in radians
* @return Sine of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<sin_fun, T>::return_t sin(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto sin(const T& x) {
return apply_scalar_unary<sin_fun, T>::apply(x);
}

/**
* Version of sin() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Sine of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto sin(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().sin().matrix();
}

} // namespace math
} // namespace stan

5 changes: 3 additions & 2 deletions stan/math/prim/mat/fun/singular_values.hpp
Original file line number Diff line number Diff line change
@@ -19,8 +19,9 @@ namespace math {
template <typename T>
Eigen::Matrix<T, Eigen::Dynamic, 1> singular_values(
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>& m) {
if (m.size() == 0)
return Eigen::Matrix<T, 0, 1>();
if (m.size() == 0) {
return {};
}

return Eigen::JacobiSVD<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> >(m)
.singularValues();
30 changes: 28 additions & 2 deletions stan/math/prim/mat/fun/sinh.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_SINH_HPP
#define STAN_MATH_PRIM_MAT_FUN_SINH_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,35 @@ struct sinh_fun {
* @param x container
* @return Hyperbolic sine of each variable in x.
*/
template <typename T>
inline typename apply_scalar_unary<sinh_fun, T>::return_t sinh(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto sinh(const T& x) {
return apply_scalar_unary<sinh_fun, T>::apply(x);
}

/**
* Version of sinh() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Hyperbolic sine of each variable in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto sinh(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().sinh().matrix();
}

/**
* Version of acos() that accepts Eigen Array or array expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Hyperbolic sine of each variable in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto sinh(const Eigen::ArrayBase<Derived>& x) {
return x.derived().sinh();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/sqrt.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_SQRT_HPP
#define STAN_MATH_PRIM_MAT_FUN_SQRT_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct sqrt_fun {
* @param x container
* @return Square root of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<sqrt_fun, T>::return_t sqrt(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto sqrt(const T& x) {
return apply_scalar_unary<sqrt_fun, T>::apply(x);
}

/**
* Version of sqrt() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Square root of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto sqrt(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().sqrt().matrix();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/square.hpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_SQUARE_HPP
#define STAN_MATH_PRIM_MAT_FUN_SQUARE_HPP

#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/scal/fun/square.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>

namespace stan {
namespace math {
@@ -29,10 +31,22 @@ struct square_fun {
* @return Each value in x squared.
*/
template <typename T>
inline typename apply_scalar_unary<square_fun, T>::return_t square(const T& x) {
inline auto square(const T& x) {
return apply_scalar_unary<square_fun, T>::apply(x);
}

/**
* Version of square() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Each value in x squared.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto square(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().square().matrix();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/tan.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_TAN_HPP
#define STAN_MATH_PRIM_MAT_FUN_TAN_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct tan_fun {
* @param x angles in radians
* @return Tangent of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<tan_fun, T>::return_t tan(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto tan(const T& x) {
return apply_scalar_unary<tan_fun, T>::apply(x);
}

/**
* Version of tan() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Tangent of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto tan(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().tan().matrix();
}

} // namespace math
} // namespace stan

18 changes: 16 additions & 2 deletions stan/math/prim/mat/fun/tanh.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef STAN_MATH_PRIM_MAT_FUN_TANH_HPP
#define STAN_MATH_PRIM_MAT_FUN_TANH_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta.hpp>
#include <stan/math/prim/vectorize/apply_scalar_unary.hpp>
#include <cmath>

@@ -29,11 +31,23 @@ struct tanh_fun {
* @param x angles in radians
* @return Hyperbolic tangent of each value in x.
*/
template <typename T>
inline typename apply_scalar_unary<tanh_fun, T>::return_t tanh(const T& x) {
template <typename T, typename = require_not_eigen_vt<std::is_arithmetic, T>>
inline auto tanh(const T& x) {
return apply_scalar_unary<tanh_fun, T>::apply(x);
}

/**
* Version of tanh() that accepts Eigen Matrix or matrix expressions.
* @tparam Derived derived type of x
* @param x Matrix or matrix expression
* @return Hyperbolic tangent of each value in x.
*/
template <typename Derived,
typename = require_eigen_vt<std::is_arithmetic, Derived>>
inline auto tanh(const Eigen::MatrixBase<Derived>& x) {
return x.derived().array().tanh().matrix();
}

} // namespace math
} // namespace stan

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/tgamma.hpp
Original file line number Diff line number Diff line change
@@ -31,7 +31,7 @@ struct tgamma_fun {
* @throw std::domain_error if any value is 0 or a negative integer
*/
template <typename T>
inline typename apply_scalar_unary<tgamma_fun, T>::return_t tgamma(const T& x) {
inline auto tgamma(const T& x) {
return apply_scalar_unary<tgamma_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/trace_gen_inv_quad_form_ldlt.hpp
Original file line number Diff line number Diff line change
@@ -37,7 +37,7 @@ namespace math {
* be multiplied by D.
*/
template <typename T1, typename T2, typename T3, int R1, int C1, int R2, int C2,
int R3, int C3, typename = require_any_not_var_t<T1, T2, T3>>
int R3, int C3, typename = require_all_not_var_t<T1, T2, T3>>
inline return_type_t<T1, T2, T3> trace_gen_inv_quad_form_ldlt(
const Eigen::Matrix<T1, R1, C1> &D, const LDLT_factor<T2, R2, C2> &A,
const Eigen::Matrix<T3, R3, C3> &B) {
2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/trace_gen_quad_form.hpp
Original file line number Diff line number Diff line change
@@ -35,7 +35,7 @@ namespace math {
* be multiplied by D.
*/
template <typename TD, int RD, int CD, typename TA, int RA, int CA, typename TB,
int RB, int CB, typename = require_any_not_var_t<TD, TA, TB>>
int RB, int CB, typename = require_all_not_var_t<TD, TA, TB>>
inline return_type_t<TD, TA, TB> trace_gen_quad_form(
const Eigen::Matrix<TD, RD, CD> &D, const Eigen::Matrix<TA, RA, CA> &A,
const Eigen::Matrix<TB, RB, CB> &B) {
2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/trace_inv_quad_form_ldlt.hpp
Original file line number Diff line number Diff line change
@@ -29,7 +29,7 @@ namespace math {
*
*/
template <typename T1, typename T2, int R2, int C2, int R3, int C3,
typename = require_any_not_var_t<T1, T2>>
typename = require_all_not_var_t<T1, T2>>
inline return_type_t<T1, T2> trace_inv_quad_form_ldlt(
const LDLT_factor<T1, R2, C2> &A, const Eigen::Matrix<T2, R3, C3> &B) {
if (A.rows() == 0 && B.rows() == 0) {
7 changes: 3 additions & 4 deletions stan/math/prim/mat/fun/trigamma.hpp
Original file line number Diff line number Diff line change
@@ -12,12 +12,12 @@ namespace math {
*/
struct trigamma_fun {
/**
* Return the approximate value of the Phi() function applied to
* Return the trigamma() function applied to
* the argument.
*
* @tparam T type of argument
* @param x argument
* @return approximate value of Phi applied to argument
* @return trigamma applied to argument.
*/
template <typename T>
static inline T fun(const T& x) {
@@ -36,8 +36,7 @@ struct trigamma_fun {
* @return elementwise trigamma of container elements
*/
template <typename T>
inline typename apply_scalar_unary<trigamma_fun, T>::return_t trigamma(
const T& x) {
inline auto trigamma(const T& x) {
return apply_scalar_unary<trigamma_fun, T>::apply(x);
}

2 changes: 1 addition & 1 deletion stan/math/prim/mat/fun/trunc.hpp
Original file line number Diff line number Diff line change
@@ -36,7 +36,7 @@ struct trunc_fun {
* @return elementwise trunc of container elements
*/
template <typename T>
inline typename apply_scalar_unary<trunc_fun, T>::return_t trunc(const T& x) {
inline auto trunc(const T& x) {
return apply_scalar_unary<trunc_fun, T>::apply(x);
}

29 changes: 9 additions & 20 deletions stan/math/prim/mat/fun/value_of_rec.hpp
Original file line number Diff line number Diff line change
@@ -13,23 +13,14 @@ namespace math {
* T must implement value_of_rec. See
* test/unit/math/fwd/fun/value_of_test.cpp for fvar and var usage.
*
* @tparam T type of elements in the matrix
* @tparam R number of rows in the matrix, can be Eigen::Dynamic
* @tparam C number of columns in the matrix, can be Eigen::Dynamic
*
* @tparam T Type of matrix
* @param[in] M Matrix to be converted
* @return Matrix of values
**/
template <typename T, int R, int C>
inline Eigen::Matrix<double, R, C> value_of_rec(
const Eigen::Matrix<T, R, C>& M) {
Eigen::Matrix<double, R, C> Md(M.rows(), M.cols());
for (int j = 0; j < M.cols(); j++) {
for (int i = 0; i < M.rows(); i++) {
Md(i, j) = value_of_rec(M(i, j));
}
}
return Md;
template <typename T, typename = require_not_same_st<T, double>,
typename = require_eigen_t<T>>
inline auto value_of_rec(const T& M) {
return M.unaryExpr([](auto x) { return value_of_rec(x); });
}

/**
@@ -40,15 +31,13 @@ inline Eigen::Matrix<double, R, C> value_of_rec(
*
* <p>This inline pass-through no-op should be compiled away.
*
* @tparam R number of rows in the matrix, can be Eigen::Dynamic
* @tparam C number of columns in the matrix, can be Eigen::Dynamic
*
* @tparam T Type of matrix.
* @param x Specified matrix.
* @return Specified matrix.
*/
template <int R, int C>
inline const Eigen::Matrix<double, R, C>& value_of_rec(
const Eigen::Matrix<double, R, C>& x) {
template <typename T, typename = require_same_st<T, double>,
typename = require_eigen_t<T>>
inline const T& value_of_rec(const T& x) {
return x;
}
} // namespace math
2 changes: 2 additions & 0 deletions stan/math/prim/meta.hpp
Original file line number Diff line number Diff line change
@@ -31,7 +31,9 @@
#include <stan/math/prim/meta/operands_and_partials.hpp>
#include <stan/math/prim/meta/partials_return_type.hpp>
#include <stan/math/prim/meta/partials_type.hpp>
#include <stan/math/prim/meta/plain_type.hpp>
#include <stan/math/prim/meta/promote_args.hpp>
#include <stan/math/prim/meta/promote_scalar_type.hpp>
#include <stan/math/prim/meta/require_generics.hpp>
#include <stan/math/prim/meta/return_type.hpp>
#include <stan/math/prim/meta/scalar_seq_view.hpp>
16 changes: 6 additions & 10 deletions stan/math/prim/meta/as_column_vector_or_scalar.hpp
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@ namespace math {
* @param a Specified scalar.
* @return 1x1 matrix that contains the value of scalar.
*/
template <typename T>
template <typename T, typename = require_stan_scalar_t<T>>
inline const T& as_column_vector_or_scalar(const T& a) {
return a;
}
@@ -29,9 +29,8 @@ inline const T& as_column_vector_or_scalar(const T& a) {
* @param a Specified vector.
* @return Same vector.
*/
template <typename T>
inline const Eigen::Matrix<T, Eigen::Dynamic, 1>& as_column_vector_or_scalar(
const Eigen::Matrix<T, Eigen::Dynamic, 1>& a) {
template <typename T, typename = require_t<is_eigen_col_vector<T>>>
inline const auto& as_column_vector_or_scalar(const T& a) {
return a;
}

@@ -43,12 +42,9 @@ inline const Eigen::Matrix<T, Eigen::Dynamic, 1>& as_column_vector_or_scalar(
* @param a Specified vector.
* @return Transposed vector.
*/
template <typename T>
inline Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>>
as_column_vector_or_scalar(const Eigen::Matrix<T, 1, Eigen::Dynamic>& a) {
return Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>>(
a.data(), a.size()); // uses Eigen::Map instead of .transpose() so that
// there are less possible output types
template <typename T, typename = require_t<is_eigen_row_vector<T>>>
inline auto as_column_vector_or_scalar(const T& a) {
return a.transpose();
}

/** \ingroup type_trait
22 changes: 14 additions & 8 deletions stan/math/prim/meta/broadcast_array.hpp
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
#ifndef STAN_MATH_PRIM_META_BROADCAST_ARRAY_HPP
#define STAN_MATH_PRIM_META_BROADCAST_ARRAY_HPP

#include <stan/math/prim/meta/require_generics.hpp>
#include <stan/math/prim/meta/promote_scalar_type.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stdexcept>

namespace stan {
namespace math {
namespace internal {

template <typename T>
class broadcast_array {
private:
@@ -29,7 +32,7 @@ class broadcast_array {
}
};

template <typename T, typename S>
template <typename T, typename S, typename Enable = void>
class empty_broadcast_array {
public:
empty_broadcast_array() {}
@@ -45,8 +48,11 @@ class empty_broadcast_array {
void operator=(const Y& /*A*/);
};

template <typename ViewElt, typename OpElt, int R, int C>
class empty_broadcast_array<ViewElt, Eigen::Matrix<OpElt, R, C> > {
template <typename ViewElt, typename T>
class empty_broadcast_array<ViewElt, T, require_eigen_t<T>> {
enum { R = T::RowsAtCompileTime, C = T::ColsAtCompileTime };
using T_arg = promote_scalar_t<ViewElt, T>;

public:
empty_broadcast_array() {}
/** \ingroup type_trait
@@ -60,23 +66,23 @@ class empty_broadcast_array<ViewElt, Eigen::Matrix<OpElt, R, C> > {
/** \ingroup type_trait
* Not implemented so cannot be called.
*/
void operator=(const Eigen::Matrix<ViewElt, R, C>& /*A*/);
void operator=(const T_arg& /*A*/);
/** \ingroup type_trait
* Not implemented so cannot be called.
*/
void operator+=(Eigen::Matrix<ViewElt, R, C> /*A*/);
void operator+=(T_arg /*A*/);
/** \ingroup type_trait
* Not implemented so cannot be called.
*/
void operator-=(Eigen::Matrix<ViewElt, R, C> /*A*/);
void operator-=(T_arg /*A*/);
/** \ingroup type_trait
* Not implemented so cannot be called.
*/
Eigen::Matrix<ViewElt, 1, C>& row(int /*i*/);
T& row(int /*i*/);
/** \ingroup type_trait
* Not implemented so cannot be called.
*/
Eigen::Matrix<ViewElt, R, 1>& col(int /*i*/);
T& col(int /*i*/);
};
} // namespace internal
} // namespace math
3 changes: 2 additions & 1 deletion stan/math/prim/meta/get.hpp
Original file line number Diff line number Diff line change
@@ -2,6 +2,7 @@
#define STAN_MATH_PRIM_META_GET_HPP

#include <stan/math/prim/meta/require_generics.hpp>
#include <stan/math/prim/meta/scalar_type.hpp>
#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <cmath>
#include <cstddef>
@@ -44,7 +45,7 @@ inline T get(const std::vector<T>& x, size_t n) {
* @return n-th element of the \c Eigen \c Matrix or expression
*/
template <typename T, typename = require_eigen_t<T>>
inline auto get(const T& m, size_t n) {
inline scalar_type_t<T> get(const T& m, size_t n) {
return m(static_cast<int>(n));
}

4 changes: 2 additions & 2 deletions stan/math/prim/meta/is_vector.hpp
Original file line number Diff line number Diff line change
@@ -21,7 +21,7 @@ namespace internal {
*/
template <typename T, bool = is_eigen<T>::value>
struct is_eigen_col_vector_impl
: bool_constant<std::decay_t<T>::RowsAtCompileTime == 1> {};
: bool_constant<std::decay_t<T>::ColsAtCompileTime == 1> {};

/** \ingroup type_trait
* Specialization for when type is not an eigen vector.
@@ -34,7 +34,7 @@ struct is_eigen_col_vector_impl<T, false> : std::false_type {};
*/
template <typename T, bool = is_eigen<T>::value>
struct is_eigen_row_vector_impl
: std::integral_constant<bool, std::decay_t<T>::ColsAtCompileTime == 1> {};
: std::integral_constant<bool, std::decay_t<T>::RowsAtCompileTime == 1> {};

/** \ingroup type_trait
* Specialization for when type is not an eigen vector.
14 changes: 8 additions & 6 deletions stan/math/prim/meta/operands_and_partials.hpp
Original file line number Diff line number Diff line change
@@ -2,9 +2,11 @@
#define STAN_MATH_PRIM_META_OPERANDS_AND_PARTIALS_HPP

#include <stan/math/prim/mat/fun/Eigen.hpp>
#include <stan/math/prim/meta/require_generics.hpp>
#include <stan/math/prim/meta/broadcast_array.hpp>
#include <stan/math/prim/meta/return_type.hpp>
#include <vector>
#include <type_traits>

namespace stan {
namespace math {
@@ -34,7 +36,7 @@ namespace internal {
* @tparam ViewElt the type we expect to be at partials_[i]
* @tparam Op the type of the operand
*/
template <typename ViewElt, typename Op>
template <typename ViewElt, typename Op, typename Enable = void>
class ops_partials_edge {
public:
empty_broadcast_array<ViewElt, Op> partials_;
@@ -132,14 +134,14 @@ namespace internal {
* This class will be used for both multivariate (nested container)
* operands_and_partials edges as well as for the univariate case.
*/
template <typename Op, typename ViewElt, int R, int C>
class ops_partials_edge<ViewElt, Eigen::Matrix<Op, R, C>> {
template <typename Op, typename ViewElt>
class ops_partials_edge<ViewElt, Op, require_eigen_st<std::is_arithmetic, Op>> {
public:
using partials_t = empty_broadcast_array<ViewElt, Eigen::Matrix<Op, R, C>>;
using partials_t = empty_broadcast_array<ViewElt, Op>;
partials_t partials_;
empty_broadcast_array<partials_t, Eigen::Matrix<Op, R, C>> partials_vec_;
empty_broadcast_array<partials_t, Op> partials_vec_;
ops_partials_edge() {}
explicit ops_partials_edge(const Eigen::Matrix<Op, R, C>& /* ops */) {}
explicit ops_partials_edge(const Op& /* ops */) {}

private:
template <typename, typename, typename, typename, typename, typename>
55 changes: 55 additions & 0 deletions stan/math/prim/meta/plain_type.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#ifndef STAN_MATH_PRIM_META_PLAIN_TYPE_HPP
#define STAN_MATH_PRIM_META_PLAIN_TYPE_HPP

#include <stan/math/prim/meta/plain_type.hpp>
#include <stan/math/prim/meta/require_generics.hpp>
#include <type_traits>

namespace stan {

/**
* Determines plain (non expression) type associated with \c T. For non \c Eigen
* types it is the decayed input type.
* @tparam T type to determine plain type of
*/
template <typename T, typename Enable = void>
struct plain_type {
using type = std::decay_t<T>;
};

template <typename T>
using plain_type_t = typename plain_type<T>::type;

/**
* Determines return type of calling \c .eval() on Eigen expression.
*
* If input type \c T is a plain type (\c plain_type_t<T> equals \c
* std::decay<T>), than member \c type is defined as <code> const
* plain_type_t<T>& </code>. Otherwise member \c type is defined as \c
* plain_type_t<T>.
*
* @tparam T type to determine eval return type of
*/
template <typename T>
struct eval_return_type {
using T1 = plain_type_t<T>;
using type = std::conditional_t<std::is_same<std::decay_t<T>, T1>::value,
const T1&, T1>;
};

template <typename T>
using eval_return_type_t = typename eval_return_type<T>::type;

/**
* Determines plain (non expression) type associated with \c T. For \c Eigen
* expression it is a type the expression can be evaluated into.
* @tparam T type to determine plain type of
*/
template <typename T>
struct plain_type<T, require_eigen_t<T>> {
using type = typename std::decay_t<T>::PlainObject;
};

} // namespace stan

#endif // STAN_MATH_PRIM_MAT_META_PLAIN_TYPE_HPP
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.