forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
CUDAScalar.cu
37 lines (32 loc) · 1.3 KB
/
CUDAScalar.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/EmptyTensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_local_scalar_dense_native.h>
#endif
#include <ATen/cuda/CUDAContext.h>
namespace at::native {
Scalar _local_scalar_dense_cuda(const Tensor& self) {
Scalar r;
AT_DISPATCH_V2(
self.scalar_type(), "_local_scalar_dense_cuda", AT_WRAP([&] {
// Create pinned memory for the scalar value to avoid implicit
// locking/sync in cuda library due to pageable memory
auto value = at::detail::empty_cpu(
{1}, /* size */
c10::CppTypeToScalarType<scalar_t>(), /* dtype */
c10::nullopt, /* layout */
c10::nullopt, /* device */
true, /* pin_memory */
c10::nullopt /* memory format */
);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::memcpy_and_sync((void *)value.const_data_ptr<scalar_t>(), self.const_data_ptr<scalar_t>(), sizeof(scalar_t), cudaMemcpyDeviceToHost, stream);
r = Scalar(*value.const_data_ptr<scalar_t>());
}), AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), kComplexHalf, kHalf, kBool, kBFloat16, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
return r;
}
} // at::native