-
Notifications
You must be signed in to change notification settings - Fork 2
/
thrust.cu
114 lines (100 loc) · 3.42 KB
/
thrust.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#include "data.h"
#include <vector>
#include <chrono>
#include <iostream>
#include <functional>
#include <thrust/for_each.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/execution_policy.h>
#include <assert.h>
#define cdpErrchk(ans) { cdpAssert((ans), __FILE__, __LINE__); }
__device__ void cdpAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPU kernel assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) assert(0);
}
}
namespace chrono = std::chrono;
struct squarediff
{
uint8_t const* i;
uint8_t const* j;
int __device__ operator() (int k)
{
int diff = i[k] - j[k];
return diff * diff;
}
};
template<class T1, class T2> void __global__ dowork(T1 indexeddivergence, int elems, T2 data, int* c)
{
thrust::counting_iterator<int32_t> zeroit(0);
thrust::for_each_n(thrust::device, zeroit, elems, [indexeddivergence, elems, data] (int i)
{
auto zeroit = thrust::make_counting_iterator(0);
thrust::transform(thrust::device, zeroit, zeroit + elems, &indexeddivergence(i, 0), [i, data] (int j)
{
squarediff differ;
differ.i = &data(i, 0, 0);
differ.j = &data(j, 0, 0);
thrust::counting_iterator<int32_t> zeroit(0);
return thrust::transform_reduce(thrust::seq, zeroit, zeroit + side * side, differ, 0, thrust::plus<int>());
});
});
}
int main()
{
printf("Thrust.\n");
int elems = data.extent(0);
int* divergence;
cudaMallocManaged(&divergence, sizeof(int) * elems * elems);
uint8_t* dataptr;
cudaMallocManaged(&dataptr, sizeof(uint8_t) * elems * side * side);
printf("Procesing %d * %d elements\n", elems, elems);
auto indexeddivergence = stdex::basic_mdspan<int, stdex::extents<stdex::dynamic_extent, stdex::dynamic_extent>>(divergence, elems, elems);
auto computestart = chrono::steady_clock::now();
mnisttype localdata = mnisttype(dataptr, elems);
cudaError e;
e = cudaMemcpy(dataptr, data.data(), sizeof(uint8_t) * elems * side * side, cudaMemcpyHostToDevice);
if (e)
{
printf("Cuda error %d reported: %s\n", e, cudaGetErrorString(e));
}
e= cudaMemAdvise(dataptr, sizeof(uint8_t) * elems * side * side, cudaMemAdviseSetReadMostly, 0);
if (e)
{
printf("Cuda error %d reported: %s\n", e, cudaGetErrorString(e));
}
dowork<<<1,1>>>(indexeddivergence, elems, localdata, divergence);
e = cudaPeekAtLastError();
if (e)
{
printf("Cuda error %d reported: %s\n", e, cudaGetErrorString(e));
}
e = cudaDeviceSynchronize();
if (e)
{
printf("Cuda error %d reported: %s\n", e, cudaGetErrorString(e));
}
auto firstend = chrono::steady_clock::now();
int maxi = 0;
int maxj = 0;
for (int i = 0; i < elems; i++)
{
for (int j = 0; j < elems; j++)
{
if (indexeddivergence(i, j) >= indexeddivergence(maxi, maxj))
{
maxi = i;
maxj = j;
}
}
}
auto secondend = chrono::steady_clock::now();
std::cout << "First pass, in microseconds : " << chrono::duration_cast<chrono::microseconds>(firstend-computestart).count() << std::endl;
std::cout << "Second pass, in microseconds : " << chrono::duration_cast<chrono::microseconds>(secondend-firstend).count() << std::endl;
printf("Maximum divergence at %d against %d with value %d\n", maxi, maxj, indexeddivergence(maxi, maxj));
}