0_Simple__matrixMul + 0_Simple__matrixMul_nvrtc
矩阵乘法,使用一维线程块和共享内存。并且在静态代码和运行时编译两种条件下使用。
▶ 源代码:静态使用
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <helper_functions.h>
#include <helper_cuda.h> template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; // A的行程起点
int aEnd = aBegin + wA - ; // A的行程终点
int aStep = BLOCK_SIZE; // A的跨度(一个 block 为宽 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
int bBegin = BLOCK_SIZE * bx; // B的行程起点
int bStep = BLOCK_SIZE * wB; // B的跨度(一个 block 为高 BLOCK_SIZE 的一维条带,各线程分别对应其中的一个元素)
float Csub = ; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads(); #pragma unroll// 循环展开为 BLOCK_SIZE 个赋值语句,提高效率
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
} int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, 0.01f);
dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); // 热身
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize(); printf("Computing result using CUDA Kernel...\n");
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(start, NULL); int nIter = ;
for (int j = ; j < nIter; j++)
{
if (block_size == )
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
else
matrixMulCUDA<><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop); float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); // 检查结果,要求相对误差:|<x, y>_cpu - <x,y>_gpu| / <|x|, |y|> < eps
printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ; // machine zero
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int devID = ;// 指定设备,默认用0号设备
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaDeviceProp deviceProp;
cudaGetDevice(&devID);
cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
} int block_size = (deviceProp.major < ) ? : ; dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); // 使用命令行指定的A、B的维度参数
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}
▶ 源代码:运行时编译
/*matrixMul_kernel.cu*/
template <int BLOCK_SIZE> __device__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - ;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = ;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
#pragma unroll
for (int k = ; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
} extern "C" __global__ void matrixMulCUDA_block16(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
} extern "C" __global__ void matrixMulCUDA_block32(float *C, float *A, float *B, int wA, int wB)
{
matrixMulCUDA<>(C,A,B,wA,wB);
}
/*matrixMul.cpp*/
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "nvrtc_helper.h"
#include <helper_functions.h> void constantInit(float *data, int size, float val)
{
for (int i = ; i < size; ++i)
data[i] = val;
} int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
CUdeviceptr d_A, d_B, d_C; char *ptx, *kernel_file;
size_t ptxSize;
kernel_file = sdkFindFilePath("matrixMul_kernel.cu", argv[]);
compileFileToPTX(kernel_file, , NULL, &ptx, &ptxSize);
CUmodule module = loadPTX(ptx, argc, argv); dim3 dimsC(dimsB.x, dimsA.y, );
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
cuMemAlloc(&d_A, mem_size_A);
cuMemAlloc(&d_B, mem_size_B);
cuMemAlloc(&d_C, mem_size_C);
cuMemcpyHtoD(d_A, h_A, mem_size_A);
cuMemcpyHtoD(d_B, h_B, mem_size_B); dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); printf("Computing result using CUDA Kernel...\n"); CUfunction kernel_addr;
if (block_size == )
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block16");
else
cuModuleGetFunction(&kernel_addr, module, "matrixMulCUDA_block32"); void *arr[] = { (void *)&d_C, (void *)&d_A, (void *)&d_B, (void *)&dimsA.x, (void *)&dimsB.x }; // Execute the kernel
int nIter = ; for (int j = ; j < nIter; j++)
{
cuLaunchKernel(kernel_addr,
grid.x, grid.y, grid.z,
threads.x, threads.y, threads.z,
, , &arr[], );
cuCtxSynchronize();
}
cuMemcpyDtoH(h_C, d_C, mem_size_C); printf("Checking computed result for correctness: ");
bool correct = true;
double eps = .e- ;
for (int i = ; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB);
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
free(h_A);
free(h_B);
free(h_C);
cuMemFree(d_A);
cuMemFree(d_B);
cuMemFree(d_C);
if (correct)
return EXIT_SUCCESS;
else
return EXIT_FAILURE;
} int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
} int block_size = ;
dim3 dimsA(**block_size, **block_size, );
dim3 dimsB(**block_size, **block_size, ); if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y);
} exit(EXIT_FAILURE);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); getchar();
exit(matrix_result);
}
▶ 输出结果:
[Matrix Multiply Using CUDA] - Starting...
GPU Device : "GeForce GTX 1070" with compute capability 6.1 MatrixA(,), MatrixB(,)
Computing result using CUDA Kernel...
done
Performance= 22.95 GFlop/s, Time= 5.712 msec, Size= Ops, WorkgroupSize= threads/block
Checking computed result for correctness: Result = PASS NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.
▶ 涨姿势:
● 程序写得很烂,各种声明、初始化杂糅。
● 一个根据cuda错误种类返回错误描述的函数
extern __host__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error);
● 预编译命令展开循环
#pragma unroll
for (i = ; i < m; i++)
c[i] = a[i] + b[i];
等价于
c[] = a[] + b[];
c[] = a[] + b[];
c[] = a[] + b[];
...
c[m-] = a[m-] + b[m-];
#pragma unroll 命令后面可接数字,表明展开前多少次迭代,例如 #pragma unroll 4
● 核函数泛型编程。可以在调用核函数时传入一个常量参数,变相使用动态数组来规定共享内存等数组的大小。
template <int BLOCK_SIZE> __global__ void functionName(void)
{
__shared__ int shareArray[BLOCK_SIZE];
...
} cunctionName<> << < blocksize, threadsize >> >();
● 热身,在多次重复实验前提前算一次。对缓存有帮助,有效减小实验结果(计算耗时)的方差。
0_Simple__matrixMul + 0_Simple__matrixMul_nvrtc的更多相关文章
随机推荐
- PYTHON 函数局部变量和全局变量
有这样一段PYTHON代码,从事C语言开发的人都知道,如果定义了全局变量,而函数内没有定义同名的函数变量的话,那么在函数内对该变量的赋值就是对全局变量空间数值的修改, 然后在PYTHON中却不尽相同, ...
- js时间戳和日期字符串相互转换
<html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="C ...
- 面试题之-----String,StringBuffer,StringBuilder的区别
String :字符串常量,值不能改变. String s="abc"; s=s+"def"; System.out.println(s); 输出结果为: ab ...
- Springboot 学习笔记 ①
前言 之前一直在寻找Springboot的学习资料,终于得偿所愿...那么,先给自己定一个小目标 - 能够使用Springboot这套架构来搭建自己的服务. 准备阶段 1. 开发环境 开发环境其实还是 ...
- Vuforia开发完全指南(四)--- Image Target
Vuforia开发完全指南---Image Target,简单方便的AR图像识别 概述 在Vuforia提供的SDK中,最简单.也是最常见的AR功能就是Image Target---图像识别.你只需提 ...
- 在CentOS7上通过RPM安装实现LAMP+phpMyAdmin过程全记录
在CentOS7上通过RPM安装实现LAMP+phpMyAdmin过程全记录 时间:2017年9月20日 一.软件环境: IP:192.168.1.71 Hostname:centos73-2.sur ...
- CentOS7 + Nginx1.13.5 + PHP7.1.10 + MySQL5.7.19 源码编译安装
一.安装Nginx 1.安装依赖扩展 # yum -y install wget openssl* gcc gcc-c++ autoconf libjpeg libjpeg-devel libpng ...
- 学习如何看懂SQL Server执行计划(二)——函数计算篇
二.函数计算部分 --------------------标量聚合--------------------/* 标量聚合-主要在聚合函数操作中产生 计算标量:根据行中的现有值计算出一个新值 流聚合:在 ...
- 【转】Python装饰器与面向切面编程
原文请参考: http://www.cnblogs.com/huxi/archive/2011/03/01/1967600.html 今天来讨论一下装饰器.装饰器是一个很著名的设计模式,经常被用于有切 ...
- YYModel学习总结YYClassInfo(1)
OC的run-time 机制,简直像是网络上的猫! 我在开发中很少用到,但是作为iOS开发 人家肯定会问这个东西,所以深入的学习了下. 对于 run-time的入手,YYModel的学习,简直让人美滋 ...