Skip to content

Commit

Permalink
update cuda clip
Browse files Browse the repository at this point in the history
  • Loading branch information
tianleiwu committed Sep 25, 2024
1 parent 87ba638 commit ef4177d
Showing 1 changed file with 2 additions and 24 deletions.
26 changes: 2 additions & 24 deletions onnxruntime/core/providers/cuda/math/clip.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,33 +59,11 @@ Status Clip_6<T>::ComputeInternal(OpKernelContext* ctx) const {
return Status::OK();
}

namespace clip_internal {
template <typename T>
struct LowMax {
constexpr static T low() {
return std::numeric_limits<T>::lowest();
}
constexpr static T max() {
return std::numeric_limits<T>::max();
}
};

template <>
struct LowMax<MLFloat16> {
static MLFloat16 low() {
return MLFloat16::FromBits(math::floatToHalf(std::numeric_limits<float>::lowest()));
}
static MLFloat16 max() {
return MLFloat16::FromBits(math::floatToHalf(std::numeric_limits<float>::max()));
}
};
} // namespace clip_internal

template <typename T>
struct Clip::ComputeImpl {
void operator()(cudaStream_t stream, const Tensor* X, const Tensor* min, const Tensor* max, Tensor* Y) const {
auto min_default = clip_internal::LowMax<T>::low();
auto max_default = clip_internal::LowMax<T>::max();
auto min_default = std::numeric_limits<T>::lowest();
auto max_default = std::numeric_limits<T>::max();

Check warning on line 66 in onnxruntime/core/providers/cuda/math/clip.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <limits> for numeric_limits<> [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/cuda/math/clip.cc:66: Add #include <limits> for numeric_limits<> [build/include_what_you_use] [4]

const T* min_data = nullptr;
const T* max_data = nullptr;
Expand Down

0 comments on commit ef4177d

Please sign in to comment.