-
Notifications
You must be signed in to change notification settings - Fork 11
/
mp1.cc
88 lines (60 loc) · 2.63 KB
/
mp1.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
// MP 1
#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<len) out[idx] = in1[idx] + in2[idx];
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength, " elements");
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int byteSize =sizeof(float) * inputLength;
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMalloc((void **) &deviceInput1, byteSize);
cudaMalloc((void **) &deviceInput2, byteSize);
cudaMalloc((void **) &deviceOutput, byteSize);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
cudaMemcpy(deviceInput1, hostInput1, byteSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput1, byteSize,cudaMemcpyHostToDevice);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
int block_size = 16;
int n_blocks = inputLength /block_size + (inputLength%block_size == 0 ? 0:1);
#ifndef CUDA_EMU
vecAdd<<< n_blocks, block_size>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
#else
setupCudaSim (n_blocks, block_size, boost::bind(vecAdd,deviceInput1,deviceInput2,deviceOutput, inputLength));
#endif
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, byteSize,cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}