参考文献:

http://blog.csdn.net/neoxmu/article/details/8866928

我安装的是CUDA5.5,代码如下:

//#include "stdafx.h"
#include "CL\cl.h"
#include <stdlib.h>
#include <stdio.h> #pragma comment(lib,"OpenCL.lib") #define CL_VERBOSE
void openclRetTackle(cl_int retValue, char* processInfo){
if(retValue!=CL_SUCCESS){
#if (defined CL_DEBUG) || (defined CL_VERBOSE)
printf("%s Error!\n",processInfo);
#endif
exit(-1);
}else{
#ifdef CL_VERBOSE
printf("%s Success!\n",processInfo);
#endif
}
} cl_platform_id cpPlatform;
cl_device_id cdDevice;
cl_context cxGPUContext;
cl_command_queue cqCommandQueue; int openclInit()
{
cl_int ret;
//得到平台ID
openclRetTackle( clGetPlatformIDs(1, &cpPlatform, NULL), "clGetPlatFormIDs");
//得到GPU设备ID
openclRetTackle( clGetDeviceIDs(cpPlatform, CL_DEVICE_TYPE_GPU,1,&cdDevice,NULL), "clGetDeviceIDs");
//获取GPU设备上下文
cxGPUContext = clCreateContext(0, 1, &cdDevice, NULL, NULL, &ret);
openclRetTackle( ret , "clCreateContext" );
//开辟任务队列
cqCommandQueue = clCreateCommandQueue(cxGPUContext, cdDevice, 0, &ret);
openclRetTackle( ret , "clCreateCommandQueue");
return CL_SUCCESS;
} int run()
{
openclInit();
system("pause");
return 0;
}
<span style="font-family:Microsoft YaHei;font-size:18px;">//#include "stdafx.h"
#include <stdio.h>
#include <vector>
#include <CL/cl.h>
#include <iostream>
#include <fstream>
#include <string> #pragma comment(lib,"OpenCL.lib") int print_device()
{
cl_int err;
cl_uint num;
err = clGetPlatformIDs(0, 0, &num);
if(err != CL_SUCCESS)
{
std::cerr << "Unable to get platforms\n";
return 0;
}
std::vector<cl_platform_id> platforms(num);
err = clGetPlatformIDs(num, &platforms[0], &num);
if(err != CL_SUCCESS)
{
std::cerr << "Unable to get platform ID\n";
return 0;
} cl_context_properties prop[] = { CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platforms[0]), 0 };
cl_context context = clCreateContextFromType(prop, CL_DEVICE_TYPE_DEFAULT, NULL, NULL, NULL);
if(context == 0)
{
std::cerr << "Can't create OpenCL context\n";
return 0;
} size_t cb;
clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &cb);
std::vector<cl_device_id> devices(cb / sizeof(cl_device_id));
clGetContextInfo(context, CL_CONTEXT_DEVICES, cb, &devices[0], 0); clGetDeviceInfo(devices[0], CL_DEVICE_NAME, 0, NULL, &cb);
std::string devname;
devname.resize(cb);
clGetDeviceInfo(devices[0], CL_DEVICE_NAME, cb, &devname[0], 0);
std::cout << "Device: " << devname.c_str() << "\n"; clReleaseContext(context);
return 0; } cl_program load_program(cl_context context, const char* filename)
{
std::ifstream in(filename, std::ios_base::binary);
if(!in.good())
{
return 0; }// get file length
in.seekg(0, std::ios_base::end);
size_t length = in.tellg();
in.seekg(0, std::ios_base::beg); // read program source
std::vector<char> data(length + 1);
in.read(&data[0], length);
data[length] = 0; // create and build program
const char* source = &data[0];
cl_program program = clCreateProgramWithSource(context, 1, &source, 0, 0);
if(program == 0)
{
return 0;
}
if(clBuildProgram(program, 0, 0, 0, 0, 0) != CL_SUCCESS)
{
return 0;
}
return program;
}
int main()
{
print_device();
cl_int err;
cl_uint num;
err = clGetPlatformIDs(0, 0, &num);
if(err != CL_SUCCESS)
{
std::cerr << "Unable to get platforms\n";
return 0;
} std::vector<cl_platform_id> platforms(num);
err = clGetPlatformIDs(num, &platforms[0], &num);
if(err != CL_SUCCESS)
{
std::cerr << "Unable to get platform ID\n";
return 0;
}
cl_context_properties prop[] = { CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platforms[0]), 0 };
cl_context context = clCreateContextFromType(prop, CL_DEVICE_TYPE_DEFAULT, NULL, NULL, NULL);
if(context == 0)
{
std::cerr << "Can't create OpenCL context\n";
return 0;
} size_t cb;
clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &cb);
std::vector<cl_device_id> devices(cb / sizeof(cl_device_id));
clGetContextInfo(context, CL_CONTEXT_DEVICES, cb, &devices[0], 0); clGetDeviceInfo(devices[0], CL_DEVICE_NAME, 0, NULL, &cb);
std::string devname;
devname.resize(cb);
clGetDeviceInfo(devices[0], CL_DEVICE_NAME, cb, &devname[0], 0);
std::cout << "Device: " << devname.c_str() << "\n"; cl_command_queue queue = clCreateCommandQueue(context, devices[0], 0, 0);
if(queue == 0)
{
std::cerr << "Can't create command queue\n";
clReleaseContext(context);
return 0;
} const int DATA_SIZE = 1048576;
std::vector<float> a(DATA_SIZE), b(DATA_SIZE), res(DATA_SIZE);
for(int i = 0; i < DATA_SIZE; i++)
{
a[i] = std::rand();
b[i] = std::rand();
} cl_mem cl_a = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &a[0], NULL);
cl_mem cl_b = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &b[0], NULL);
cl_mem cl_res = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(cl_float) * DATA_SIZE, NULL, NULL);
if(cl_a == 0 || cl_b == 0 || cl_res == 0)
{
std::cerr << "Can't create OpenCL buffer\n";
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(queue);
clReleaseContext(context);
return 0;
} cl_program program = load_program(context, "..\\shader.txt");
if(program == 0)
{
std::cerr << "Can't load or build program\n";
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(queue);
clReleaseContext(context);
return 0;
}
cl_kernel adder = clCreateKernel(program, "adder", 0);
if(adder == 0)
{
std::cerr << "Can't load kernel\n";
clReleaseProgram(program);
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(queue);
clReleaseContext(context);
return 0;
} clSetKernelArg(adder, 0, sizeof(cl_mem), &cl_a); clSetKernelArg(adder, 1, sizeof(cl_mem), &cl_b); clSetKernelArg(adder, 2, sizeof(cl_mem), &cl_res); size_t work_size = DATA_SIZE; err = clEnqueueNDRangeKernel(queue, adder, 1, 0, &work_size, 0, 0, 0, 0);
if(err == CL_SUCCESS)
{ err = clEnqueueReadBuffer(queue, cl_res, CL_TRUE, 0, sizeof(float) * DATA_SIZE, &res[0], 0, 0, 0);
}
if(err == CL_SUCCESS)
{
bool correct = true;
for(int i = 0; i < DATA_SIZE; i++)
{
if(a[i] + b[i] != res[i])
{
correct = false;
break;
}
}
if(correct)
{ std::cout << "Data is correct\n";
}
else
{ std::cout << "Data is incorrect\n"; }
} else
{
std::cerr << "Can't run kernel or read back data\n";
} clReleaseKernel(adder);
clReleaseProgram(program);
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(queue);
clReleaseContext(context);
return 0; }</span>

需要使用的数据:

shader.txt

<span style="font-family:Microsoft YaHei;font-size:18px;">__kernel void adder(__global const float* a, __global const float* b, __global float* result)
{
int idx = get_global_id(0);
result[idx] = a[idx] + b[idx];
}</span>

CUDA版本的OpenCL在windows 7的下编程初步的更多相关文章

  1. windows下安装python、环境设置、多python版本的切换、pyserial与多版本python安装、windows命令行下切换目录

    1.windows下安装python 官网下载安装即可 2.安装后的环境设置 我的电脑--属性--高级--设置path的地方添加python安装目录,如C:\Python27;C:\Python33 ...

  2. YOLOv4:目标检测(windows和Linux下Darknet 版本)实施

    YOLOv4:目标检测(windows和Linux下Darknet 版本)实施 YOLOv4 - Neural Networks for Object Detection (Windows and L ...

  3. cmake编译opencv时指定cuda版本

    之前有网友提问说,基于cmake编译时如果切换cuda版本,比如我同时装了cuda8和cuda9,opencv总是找到cuda9,我想用cuda8怎么办?实际上,手头上要配置的工程是基于opencv3 ...

  4. 如何解决pytorch 编译时CUDA版本与运行时CUDA版本不对应

    转载请注明: 仰望高端玩家的小清新 http://www.cnblogs.com/luruiyuan/ 如何解决pytorch 编译时CUDA版本与运行时CUDA版本不对应 如果pytorch的编译时 ...

  5. cuda 版本查阅

    查看cuda版本 cat  /usr/local/cuda/version.txt nvcc -V

  6. [AI] 切换cuda版本的万金油

    1. 环境 ubuntu16.04 GTX1080Ti x 4 nvidia-418 cuda-10.1 pytorch1.0.0 目标:在最新的显卡驱动下,使用不同版本的cuda和深度学习框架来执行 ...

  7. pytorch Model Linear实现线性回归CUDA版本

    实验代码 import torch import torch.nn as nn #y = wx + b class MyModel(nn.Module): def __init__(self): su ...

  8. 矩池云里查看cuda版本

    可以用下面的命令查看 cat /usr/local/cuda/version.txt 如果想用nvcc来查看可以用下面的命令 nvcc -V 如果环境内没有nvcc可以安装一下,教程是矩池云上如何安装 ...

  9. 矩池云上如何修改cuda版本

    cuda版本可能对系统,驱动版本会有影响,修改之前需要先进行确认 1.检查系统版本 source /etc/os-release && echo $VERSION_ID 2.导入apt ...

随机推荐

  1. Android动态换肤(二、apk免安装插件方式)

    在上一篇文章Android动态换肤(一.应用内置多套皮肤)中,我们了解到,动态换肤无非就是调用view的setBackgroundResource(R.drawable.id)等方法设置控件的背景或者 ...

  2. [supervisor] 使用小记(入门教程)

    之前到现在很久没有用了,还是从安装说下,做个简单的实验,系统为Ubuntu14.04 快速安装配置 sudo pip_python install supervisor sudo echo_super ...

  3. java基本类型(数值范围):浮点的底层表示定义,float计算快一些

    Java八种基本类型: 六种数字类型(四个整数型,两个浮点型), 一种字符类型, 一种布尔型. 具体如下 1.整数:包括int,short,byte,long 2.浮点型:float,double 3 ...

  4. ExtJS学习(三)Grid表格

    表格说明 Ext中的表格功能非常强大,包括排序.缓存.拖动.隐藏某一列.自动显示行号.列汇总.单元格编辑等实用功能.表格由类Ext.grid.GridPanel定义,继承自Ext.Panel,其xty ...

  5. tomcat中Servlet的工作机制

    在研究Servlet在tomcat中的工作机制前必须先看看Servlet规范的一些重要的相关规定,规范提供了一个Servlet接口,接口中包含的重要方法是init.service.destroy等方法 ...

  6. 3.QT中QCommandLineParser和QCommandLineOption解析命令行参数

     1  新建项目 main.cpp #include <QCoreApplication> #include <QCommandLineParser> #include & ...

  7. 07 设置View的显示与隐藏

    在代码中: 例子: <span style="font-size:14px;">ImageButton imageButton = new ImageButton(th ...

  8. (一〇六)iPad开发之UIPopoverController的使用

    很多App里都有一种点击显示的悬浮气泡菜单,例如下图: 在iPad上可以使用UIPopoverController实现这个功能,popoverController继承自NSObject而不是UIVie ...

  9. 从二进制数据流中构造GDAL可以读取的图像数据

    在很多时候,我们的图像数据往往都不是文件方式存储在磁盘上,而是可能从网络或者数据库中获取的是二进制的图像数据流.最简单的方式和最容易想到的方式就是将这个文件流保存到磁盘上形成一个文件,然后再使用GDA ...

  10. spark概念、编程模型和模块概述

    http://blog.csdn.net/pipisorry/article/details/50931274 spark基本概念 Spark一种与 Hadoop 相似的通用的集群计算框架,通过将大量 ...