原文: http://blog.csdn.NET/u012234115/article/details/34860273
在做項目集成的時候需要用到cpp和cuda文件聯(lián)調(diào),自己摸索了兩種方式實現(xiàn)cpp和cu文件混合編譯。
本文環(huán)境:
windows7 64位VS2010CUDA5.5英偉達(dá)顯卡Tesla C1060
因此不需要額外配置,不過為了保險起見,可以選擇性地添加以下環(huán)境變量:CUDA_BIN_PATH %CUDA_PATH%/binCUDA_LIB_PATH %CUDA_PATH%/lib/Win32CUDA_SDK_BIN %CUDA_SDK_PATH%/bin/Win32CUDA_SDK_LIB %CUDA_SDK_PATH%/common/lib/Win32CUDA_SDK_PATH C:/cuda/cudasdk/common這時可以打開CUDA自帶的sample運行一下,運行能通過才可以繼續(xù)下面的內(nèi)容————cpp和cuda聯(lián)調(diào)。方法一:先建立cuda工程,再添加cpp文件
1.打開vs2010,新建一個cuda項目,名稱CudaCpp。
2.cuda默認(rèn)建立的工程是如下,實現(xiàn)了兩個一維向量的并行相加。kernel函數(shù)和執(zhí)行函數(shù)還有main函數(shù)全都寫在了一個cu文件里。
3.接下來在工程里添加一個空的cpp文件。將原來cu文件里main函數(shù)里的內(nèi)容剪切到cpp文件main函數(shù)里。
為了讓cpp能夠調(diào)用cu文件里面的函數(shù),在addWithCuda函數(shù)前加上extern “C” 關(guān)鍵字 (注意C大寫,為什么addKernel不用加呢?因為cpp里面直接調(diào)用的是addWithCuda)
4.在cpp里也要加上addWithCuda函數(shù)的完整前向聲明。下圖就是工程的完整結(jié)構(gòu)
5.可以在cpp里的main函數(shù)return之間加入getchar()防止運行后一閃就退出,加上system(“pause”)或者直接ctrl+F5也行。
運行結(jié)果:
下面貼出CudaCpp項目代碼。
kernel.cu
[plain] view plaincopy#include “cuda_runtime.h” #include “device_launch_parameters.h” #include <stdio.h> __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. extern “C” cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fPRintf(stderr, “cudaSetDevice failed! Do you have a CUDA-capable GPU installed?”); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, “addKernel launch failed: %s/n”, cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaDeviceSynchronize returned error code %d after launching addKernel!/n”, cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }main.cpp[cpp] view plaincopy#include <stdio.h> #include “cuda_runtime.h” #include “device_launch_parameters.h” extern “C” cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, ”addWithCuda failed!”); return 1; } printf(”{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}/n”, c[0], c[1], c[2], c[3], c[4]); printf(”cuda工程中調(diào)用cpp成功!/n”); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, ”cudaDeviceReset failed!”); return 1; } getchar(); //here we want the console to hold for a while return 0; }
方法二:先建立cpp工程,再添加cu文件
方法一由于是cuda工程是自動建立的,所以比較簡單,不需要多少額外的配置。而在cpp工程里面添加cu就要復(fù)雜一些。為了簡單起見,這里采用console程序講解,至于MFC或者Direct3D程序同理。1.建立一個空的win32控制臺工程,名稱CppCuda。
2.然后右鍵工程–>添加一個cu文件
3.將方法一中cu和cpp文件的代碼分別拷貝到這個工程里來(做了少許修改,extern “C”關(guān)鍵字和某些頭文件不要忘了加),工程結(jié)構(gòu)如圖:
這個時候編譯是通不過的,需要作一些配置。
4.關(guān)鍵的一步,右鍵工程–>生成自定義 ,將對話框中CUDA5.5前面的勾打上。
這時點擊 工程–>屬性,會發(fā)現(xiàn)多了CUDA鏈接器這一項。
5.關(guān)鍵的一步,右鍵kernel.cu文件–>屬性,在 常規(guī)–>項類型 里面選擇CUDA C/C++(由于cu文件是由nvcc編譯的,這里要修改編譯鏈接屬性)
6.工程–>屬性–>鏈接器–>附加依賴項,加入cudart.lib
7.工具–>選項–>文本編輯器–>文件擴(kuò)展名 添加cu /cuh兩個文件擴(kuò)展名
8.至此配置成功。運行一下:
9.為了更加確信cuda中的函數(shù)確實被調(diào)用,在main.cpp里面調(diào)用cuda函數(shù)的地方加入了一個斷點。
單步執(zhí)行一下。
可以看到程序跳到了cu文件里去執(zhí)行了,說明cpp調(diào)用cuda函數(shù)成功。
貼上代碼(其實跟方式一基本一樣,沒怎么改),工程CppCuda
kernel.cu
[plain] view plaincopy#include “cuda_runtime.h” #include “device_launch_parameters.h” #include <stdio.h> //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. extern “C” cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaSetDevice failed! Do you have a CUDA-capable GPU installed?”); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMalloc failed!”); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, “addKernel launch failed: %s/n”, cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaDeviceSynchronize returned error code %d after launching addKernel!/n”, cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, “cudaMemcpy failed!”); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }main.cpp[cpp] view plaincopy#include <iostream> #include “cuda_runtime.h” #include “device_launch_parameters.h” using namespace std; extern “C” cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); int main(int argc,char **argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, ”addWithCuda failed!”); return 1; } cout<<”{1,2,3,4,5} + {10,20,30,40,50} = {“<<c[0]<<‘,’<<c[1]<<‘,’<<c[2]<<‘,’<<c[3]<<‘}’<<endl; printf(”cpp工程中調(diào)用cu成功!/n”); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, ”cudaDeviceReset failed!”); return 1; } system(”pause”); //here we want the console to hold for a while return 0; }
注意有時候編譯出問題,把 “device_launch_parameters.h” 這個頭文件去掉就好了(去掉之后就不能調(diào)里面的函數(shù)或變量了),至于為什么,還不是很清楚。
新聞熱點
疑難解答