cuda并行计算的几种模式
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h> #define MAX 120
#define MIN 0 cudaError_t addWithCudaStream(int *c, const int *a, const int *b, size_t size,
float* etime);
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size,
float* etime, int type);
__global__ void addKernel(int *c, const int *a, const int *b) {
int i = blockIdx.x;
c[i] = a[i] + b[i];
} __global__ void addKernelThread(int *c, const int *a, const int *b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main() {
const int arraySize = ;
srand((unsigned) time(NULL));
int a[arraySize] = { , , , , };
int b[arraySize] = { , , , , }; for (int i = ; i < arraySize; i++) {
a[i] = rand() % (MAX + - MIN) + MIN;
b[i] = rand() % (MAX + - MIN) + MIN;
}
int c[arraySize] = { };
// Add vectors in parallel.
cudaError_t cudaStatus;
int num = ;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for (int i = ; i < num; i++) {
cudaGetDeviceProperties(&prop, i);
} float time;
cudaStatus = addWithCudaStream(c, a, b, arraySize, &time);
printf("Elasped time of stream is : %f \n", time);
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
cudaStatus = addWithCuda(c, a, b, arraySize, &time, );
printf("Elasped time of Block is : %f \n", time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]); cudaStatus = addWithCuda(c, a, b, arraySize, &time, );
printf("Elasped time of thread is : %f \n", time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]); cudaStatus = addWithCudaStream(c, a, b, arraySize, &time);
printf("Elasped time of stream is : %f \n", time);
printf("{%d,%d,%d,%d,%d} + {%d,%d,%d,%d,%d} = {%d,%d,%d,%d,%d}\n",
a[arraySize - - ], a[arraySize - - ], a[arraySize - - ],
a[arraySize - - ], a[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], b[arraySize - - ], b[arraySize - - ],
b[arraySize - - ], c[arraySize - - ], c[arraySize - - ],
c[arraySize - - ], c[arraySize - - ], c[arraySize - - ]);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCudaStream failed!");
return ;
}
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return ;
}
return ;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCudaStream(int *c, const int *a, const int *b, size_t size,
float* etime) {
int *dev_a = ;
int *dev_b = ;
int *dev_c = ;
clock_t start, stop;
float time;
cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**) &dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStream_t stream[];
for (int i = ; i < ; i++) {
cudaStreamCreate(&stream[i]); //创建流
}
// Launch a kernel on the GPU with one thread for each element.
for (int i = ; i < ; i++) {
addKernel<<<, , , stream[i]>>>(dev_c + i, dev_a + i, dev_b + i); //执行流
}
start = clock();
cudaDeviceSynchronize();
stop = clock();
time = (float) (stop - start) / CLOCKS_PER_SEC;
*etime = time;
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaThreadSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: for (int i = ; i < ; i++) {
cudaStreamDestroy(stream[i]); //销毁流
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size,
float * etime, int type) {
int *dev_a = ;
int *dev_b = ;
int *dev_c = ;
clock_t start, stop;
float time;
cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**) &dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
} if (type == ) {
start = clock();
addKernel<<<size, >>>(dev_c, dev_a, dev_b);
} else {
start = clock();
addKernelThread<<<, size>>>(dev_c, dev_a, dev_b);
}
stop = clock();
time = (float) (stop - start) / CLOCKS_PER_SEC;
*etime = time;
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaThreadSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
如上文的实现程序,使用了thread并行,block并行,stream并行三种,使用三种方法法进行了五次计算,发现stream第一次计算时会出错,调用的子程序没有变化,没有搞懂?
Elasped time of stream is : 0.000006
{47,86,67,35,16} + {114,39,110,20,101} = {158,123,92,107,127}
Elasped time of Block is : 0.000006
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of stream is : 0.000008
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of thread is : 0.000004
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
Elasped time of stream is : 0.000007
{47,86,67,35,16} + {114,39,110,20,101} = {161,125,177,55,117}
cuda并行计算的几种模式的更多相关文章
- 对称加密和分组加密中的四种模式(ECB、CBC、CFB、OFB)
一. AES对称加密: AES加密 分组 二. 分组密码的填充 分组密码的填充 e.g.: PKCS#5填充方式 三. 流密码: 四. 分组密码加密中的四种模式: 3.1 ECB模式 优点: 1. ...
- win7 64位下自行编译OpenCV2.4.10+CUDA toolkit 5.5的整个过程以及需要注意的问题(opencv+cuda并行计算元素的使用)
首先说明的是,这个帖子是成功的编译了dll,但是这个dll使用的时候还是很容易出现各种问题的. 发现错误可能是由于系统安装了太多版本的opencv,环境变量的设置混乱,造成dll版本加载 ...
- Spark On Yarn的两种模式yarn-cluster和yarn-client深度剖析
Spark On Yarn的优势 每个Spark executor作为一个YARN容器(container)运行.Spark可以使得多个Tasks在同一个容器(container)里面运行 1. Sp ...
- AES加密的四种模式详解
对称加密和分组加密中的四种模式(ECB.CBC.CFB.OFB) 一. AES对称加密: A ...
- Hadoop hadoop的介绍和几种模式
Hadoop简介 Hadoop软件库是一个开源框架,允许使用简单的编程模型跨计算机集群分布式处理大型数据集.它旨在从单个服务器扩展到数千台计算机,每台计算机都提供本地计算和存储.库本身不是依靠硬件来提 ...
- hadoop(1)---hadoop的介绍和几种模式。
一.什么是hadoop? Hadoop软件库是一个开源框架,允许使用简单的编程模型跨计算机集群分布式处理大型数据集.它旨在从单个服务器扩展到数千台计算机,每台计算机都提供本地计算和存储.库本身不是依靠 ...
- javascript 创建对象的7种模式
使用字面量方式创建一个 student 对象: var student = function (){ name : "redjoy", age : 21, sex: women, ...
- javascript面向对象系列第二篇——创建对象的5种模式
× 目录 [1]字面量 [2]工厂模式 [3]构造函数[4]原型模式[5]组合模式 前面的话 如何创建对象,或者说如何更优雅的创建对象,一直是一个津津乐道的话题.本文将从最简单的创建对象的方式入手,逐 ...
- javascript创建对象的几种模式
在js中有几种模式可以创建对象,通过对象操作所包含的属性与方法. 一般来说,构造函数名称的第一个字母为大写字母,非构造函数名称的第一个字母为小写字母,当然,构造函数与一般函数唯一的区别只是调用的方式不 ...
随机推荐
- [js开源组件开发]html5标签audio的样式更改
html5标签audio的样式更改 由于html5的流行,现在移动端大多数的需求都可以使用audio来播放音频,但您可能只是需要很简单的播放/停止效果,但不同的浏览器上的audio样式却不尽人意,所以 ...
- 原生JS:Object对象详细参考
Object对象详细参考 本文参考MDN做的详细整理,方便大家参考MDN JavaScript原生提供一个Object对象(注意起首的O是大写),所有其他对象都继承自这个对象. 构造函数: Objec ...
- Sublime Text3 支持Less
1.安装Sublime 插件 (1)安装LESS插件:因为Sublime不支持Less语法高亮,所以,先安装这个插件,方法: ctrl+shift+p>install Package>输入 ...
- CSS基础知识之文本属性二三事
line-height 可以给某个元素指定一个不带单位的缩放因子,这样它的后代元素就会继承这个缩放因子,再根据自身的字号大小来计算自己的行高(line-height)值, body { font-si ...
- SharePoint 2013 VSS 编写器
Windows Server 包含的 VSS 是提供内置卷影复制功能的基础结构.VSS 创建的卷影副本扩展了存储管理员的磁带备份存档解决方案,提供可轻松.有效创建和还原的高保真时间点副本,从而帮助简化 ...
- Failed to connect to JobMonApp on port 13491
今天为了解决别的问题,把/etc/hosts文件里的 127.0.0.1 localhost改成了 127.0.0.1 DSETL ,结果运行作业的时候就报这个错:Failed to connect ...
- SQLMAP使用笔记
-u #注入点-f #指纹判别数据库类型-b #获取数据库版本信息-p #指定可测试的参数(?page=1&id=2 -p “page,id”)-D “” #指定数据库名-T “” #指定表名 ...
- mac安装Aws cli失败
OS X EI 10.11 报错信息如下: Found existing installation: six 1.4.1 DEPRECATION: Uninstalling a distutils i ...
- JavaScript小例子:复选框全选
JavaScript小例子:复选框全选 这只是一个小例子,很简单,但是这个功能还是很常用的: 实现后效果如图: JavaScript代码: <script type="text/jav ...
- Android项目实战(二十三):仿QQ设置App全局字体大小
一.项目需求: 因为产品对象用于中老年人,所以产品设计添加了APP全局字体调整大小功能. 这里仿做QQ设置字体大小的功能. QQ实现的效果是,滚动下面的seekbar,当只有seekbar到达某一个刻 ...