0_Simple__matrixMul + 0_Simple__matrixMul_nvrtc
矩阵乘法,使用一维线程块和共享内存。并且在静态代码和运行时编译两种条件下使用。
▶ 源代码:静态使用
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <helper_functions.h>
#include <helper_cuda.h> template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; // A的行程起点
int aEnd = aBegin + wA - ; // A的行程终点
int aStep = BLOCK_SIZE; // A的跨度(一个 block 为宽 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
int bBegin = BLOCK_SIZE * bx; // B的行程起点
int bStep = BLOCK_SIZE * wB; // B的跨度(一个 block 为高 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
float Csub = ; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads(); #pragma unroll// 循环展开为 BLOCK_SIZE 个赋值语句,提高效率
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
} int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, 0.01f);
dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); // 热身
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize(); printf("Computing result using CUDA Kernel...\n");
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(start, NULL); int nIter = ;
for (int j = ; j < nIter; j++)
{
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop); float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); // 检查结果,要求相对误差:|<x, y>_cpu - <x,y>_gpu| / <|x|, |y|> < eps
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ; // machine zero
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int devID = ;// 指定设备,默认用0号设备
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaDeviceProp deviceProp;
cudaGetDevice(&devID);
cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
} int block_size = (deviceProp.major < ) ? : ; dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); // 使用命令行指定的A、B的维度参数
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}
▶ 源代码:运行时编译
/*matrixMul_kernel.cu*/
template <int BLOCK_SIZE> __device__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - ;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = ;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
#pragma unroll
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} extern "C" __global__ void matrixMulCUDA_block16(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
} extern "C" __global__ void matrixMulCUDA_block32(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
}
/*matrixMul.cpp*/
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "nvrtc_helper.h"
#include <helper_functions.h> void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
CUdeviceptr d_A, d_B, d_C; char *ptx, *kernel_file;
size_t ptxSize;
kernel_file = sdkFindFilePath("matrixMul_kernel.cu", argv[]);
compileFileToPTX(kernel_file, , NULL, &ptx, &ptxSize);
CUmodule module = loadPTX(ptx, argc, argv); dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
cuMemAlloc(&d_A, mem_size_A);
cuMemAlloc(&d_B, mem_size_B);
cuMemAlloc(&d_C, mem_size_C);
cuMemcpyHtoD(d_A, h_A, mem_size_A);
cuMemcpyHtoD(d_B, h_B, mem_size_B); dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); printf("Computing result using CUDA Kernel...\n"); CUfunction kernel_addr;
if (block_size == )
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block16");
else
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block32"); void *arr[] = { (void *)&d_C, (void *)&d_A, (void *)&d_B, (void *)&dimsA.x, (void *)&dimsB.x }; // Execute the kernel
int nIter = ; for (int j = ; j < nIter; j++)
{
cuLaunchKernel(kernel_addr,
grid.x, grid.y, grid.z,
threads.x, threads.y, threads.z,
, , &arr[], );
cuCtxSynchronize();
}
cuMemcpyDtoH(h_C, d_C, mem_size_C); printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ;
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB);
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
free(h_A);
free(h_B);
free(h_C);
cuMemFree(d_A);
cuMemFree(d_B);
cuMemFree(d_C);
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int block_size = ;
dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y);
} exit(EXIT_FAILURE);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}
▶ 输出结果:
[Matrix Multiply Using CUDA] - Starting...
GPU Device : "GeForce GTX 1070" with compute capability 6.1 MatrixA(,), MatrixB(,)
Computing result using CUDA Kernel...
done
Performance= 22.95 GFlop/s, Time= 5.712 msec, Size= Ops, WorkgroupSize= threads/block
Checking computed result for correctness: Result = PASS NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.
▶ 涨姿势:
● 程序写得很烂,各种声明、初始化杂糅。
● 一个根据cuda错误种类返回错误描述的函数
extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error);
● 预编译命令展开循环
#pragma unroll
for (i = ; i < m; i++)
c[i] = a[i] + b[i];
等价于
c[] = a[] + b[];
c[] = a[] + b[];
c[] = a[] + b[];
...
c[m-] = a[m-] + b[m-];
#pragma unroll 命令后面可接数字,表明展开前多少次迭代,例如 #pragma unroll 4
● 核函数泛型编程。可以在调用核函数时传入一个常量参数,变相使用动态数组来规定共享内存等数组的大小。
template <int BLOCK_SIZE> __global__ void functionName(void)
{
__shared__ int shareArray[BLOCK_SIZE];
...
} cunctionName<> << < blocksize, threadsize >> >();
● 热身,在多次重复实验前提前算一次。对缓存有帮助,有效减小实验结果(计算耗时)的方差。
0_Simple__matrixMul + 0_Simple__matrixMul_nvrtc的更多相关文章
随机推荐
- vue实例讲解之vuex的使用
vuex是一个状态管理插件,本文通过一个简单的实例来讲解一下,vuex的使用. 先看一张官方的图: 这个图新手一看估计是蒙的,简单解释一下,这个图表示的就是vue通过Action Mutations ...
- struts jar包
这些错误很让我摸不着头脑,经多方查阅资料后,在Struts 2.2.x中应该导入如下7个JAR文件 1) commons-fileupload-1.2.1.jar 2) commons-io- ...
- 用ES6巧妙的解决传统面试中的算法小问题!
最近自己也在准备面试,在复习算法的时候,机智的用了一波ES6.一起来瞧瞧吧! 1.数组的去重 var arr=str.split(''); for(var i=0;i<arr.length-1; ...
- 移动HTNL5前端框架—MUI
前 言 JRedu 鉴于之前的很多前端框架(特别是响应式布局的框架),UI控件看起来太像网页,没有原生感觉,因此追求原生UI也是MUI的重要目标.MUI以iOS平台UI为基础,补充部分Andro ...
- ZOJ2965 Accurately Say "CocaCola"! 线性扫描
Accurately Say "CocaCola"! 范围找到:1--700左右,然后打表就ok了 #include<cstdio> #include<cstdl ...
- WPF DataGrid自动生成行号
在使用WPF进行应用程序的开发时,经常会为DataGrid生成行号,这里主要介绍一下生成行号的方法.通常有三种方法,这里主要介绍其中的两种,另一种简单提一下. 1. 直接在LoadingRow事件 ...
- 客户机中PLSQL DEV访问虚拟机中的ORCLE11g,错误百出!
客户机中PLSQL DEV访问虚拟机中的ORCLE11g,错误百出! 创建时间: 2017/10/14 18:44 作者: CNSIMO 标签: ORACLE 忙了一下午,只有两个字形容:麻烦! ...
- c#中字节数组byte[]、图片image、流stream,字符串string、内存流MemoryStream、文件file,之间的转换
字节数组byte[]与图片image之间的转化 字节数组转换成图片 public static Image byte2img(byte[] buffer) { MemoryStream ms = ne ...
- btsync 分享资源
Btsync是一款跨平台软件,可以在不同的设备之间共享文件. Btsync类似于BT下载,用户对用户(多用户)之间的传送. 文档的分享者可以将资源放到文件夹下,生成共享Key,分享给接受者,接受者只需 ...
- javascript面向对象的写法及jQuery面向对象的写法
文章由来:jQuery源码学习时的总结 在JS中,一般的面向对象的写法如下: function Cao(){}//定义一个构造函数 Cao.prototype.init = function(){}/ ...