Cuda 學習筆記 (一)

Cuda 學習筆記 (一)

1.Hello World

#include<stdio.h>
__global__ void helloFromGPU(void){
  printf("Hello World from GPU!\n");
}

int main(void){
  printf("Hello World from CPU!\n");
  helloFromGPU<<<1,10>>>();
  cudaDeviceReset();
  return 0;
}

其中__global__表示後面的函數交由GPU處理,通常GPU編程包含以下五步

  • (1)分配GPU內存
  • (2)將CPU的內容拷貝給GPU內存
  • (3)調用CUDA的內核函數進行處理
  • (4)將GPU處理完的數據拷貝給CPU
  • (5)釋放GPU的內存

上述代碼中__global__之後的內容爲內核函數<<<>>>代表從主線程到設備端代碼的調用,裏面的參數10代表調用10個線程。
打開終端,進入cu文件所在的文件夾輸入
編譯

nvcc -o helloworld helloworld.cu

執行

./helloworld

結果

Hello World from CPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!
Hello World from GPU!

2.Python 中安裝 pycuda

下載代碼

git clone https://github.com/inducer/pycuda.git

編譯

cd pycuda
python setup.py install

測試

python
import pycuda

無報錯則安裝成功


3.GPU信息的輸出

#include<iostream>
using namespace std;

int main(void){
    int dev = 0;
    cudaDeviceProp devProp;
    cudaGetDeviceProperties(&devProp, dev);
    cout << "使用GPU device " << dev << ": " << devProp.name << endl;
    cout << "SM的數量:" << devProp.multiProcessorCount << endl;
    cout << "每個線程塊的共享內存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << endl;
    cout << "每個線程塊的最大線程數:" << devProp.maxThreadsPerBlock << endl;
    cout << "每個EM的最大線程數:" << devProp.maxThreadsPerMultiProcessor << endl;
    cout << "每個EM的最大線程束數:" << devProp.maxThreadsPerMultiProcessor / 32 << endl;

    return 0;
}

執行結果

使用GPU device 0: GeForce GTX 1080
SM的數量:20
每個線程塊的共享內存大小:48 KB
每個線程塊的最大線程數:1024
每個EM的最大線程數:2048
每個EM的最大線程束數:64

4.遍歷多個GPU並輸出信息

#include "common/book.h"

int main(void){
    cudaDeviceProp  prop;
    int count;
    HANDLE_ERROR( cudaGetDeviceCount( &count ) );
    for (int i=0; i< count; i++) {
        HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) );
        printf( "   --- General Information for device %d ---\n", i );
        printf( "Name:  %s\n", prop.name );
        printf( "Compute capability:  %d.%d\n", prop.major, prop.minor );
        printf( "Clock rate:  %d\n", prop.clockRate );
        printf( "Device copy overlap:  " );
        if (prop.deviceOverlap)
            printf( "Enabled\n" );
        else
            printf( "Disabled\n");
        printf( "Kernel execution timeout :  " );
        if (prop.kernelExecTimeoutEnabled)
            printf( "Enabled\n" );
        else
            printf( "Disabled\n" );

        printf( "   --- Memory Information for device %d ---\n", i );
        printf( "Total global mem:  %ld\n", prop.totalGlobalMem );
        printf( "Total constant Mem:  %ld\n", prop.totalConstMem );
        printf( "Max mem pitch:  %ld\n", prop.memPitch );
        printf( "Texture Alignment:  %ld\n", prop.textureAlignment );

        printf( "   --- MP Information for device %d ---\n", i );
        printf( "Multiprocessor count:  %d\n",
                    prop.multiProcessorCount );
        printf( "Shared mem per mp:  %ld\n", prop.sharedMemPerBlock );
        printf( "Registers per mp:  %d\n", prop.regsPerBlock );
        printf( "Threads in warp:  %d\n", prop.warpSize );
        printf( "Max threads per block:  %d\n",
                    prop.maxThreadsPerBlock );
        printf( "Max thread dimensions:  (%d, %d, %d)\n",
                    prop.maxThreadsDim[0], prop.maxThreadsDim[1],
                    prop.maxThreadsDim[2] );
        printf( "Max grid dimensions:  (%d, %d, %d)\n",
                    prop.maxGridSize[0], prop.maxGridSize[1],
                    prop.maxGridSize[2] );
        printf( "\n" );
    }
    return 0;
}

執行結果

   --- General Information for device 0 ---
Name:  GeForce GTX 1080
Compute capability:  6.1
Clock rate:  1733500
Device copy overlap:  Enabled
Kernel execution timeout :  Disabled
   --- Memory Information for device 0 ---
Total global mem:  8513978368
Total constant Mem:  65536
Max mem pitch:  2147483647
Texture Alignment:  512
   --- MP Information for device 0 ---
Multiprocessor count:  20
Shared mem per mp:  49152
Registers per mp:  65536
Threads in warp:  32
Max threads per block:  1024
Max thread dimensions:  (1024, 1024, 64)
Max grid dimensions:  (2147483647, 65535, 65535)
   --- General Information for device 1 ---
Name:  GeForce GTX 1080
Compute capability:  6.1
Clock rate:  1733500
Device copy overlap:  Enabled
Kernel execution timeout :  Disabled
   --- Memory Information for device 1 ---
Total global mem:  8513978368
Total constant Mem:  65536
Max mem pitch:  2147483647
Texture Alignment:  512
   --- MP Information for device 1 ---
Multiprocessor count:  20
Shared mem per mp:  49152
Registers per mp:  65536
Threads in warp:  32
Max threads per block:  1024
Max thread dimensions:  (1024, 1024, 64)
Max grid dimensions:  (2147483647, 65535, 65535)


5.GPU與CPU性能對比

#include "common/book.h"
#include <time.h>
#define N   600000

__global__ void addGPU( int *a, int *b, int *c ) {
    int tid = blockIdx.x;    // this thread handles the data at its thread id
    if (tid < N)
        c[tid] = a[tid] + b[tid];
}
void addCPU( int *a, int *b, int *c ){
    for (int i=0; i<N; i++) {
        c[i] = a[i] + b[i];
    }
}

int main( void ) {
    int a[N], b[N], c[N];
    int *dev_a, *dev_b, *dev_c;

    // allocate the memory on the GPU
    HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
    HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
    HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );

    // fill the arrays 'a' and 'b' on the CPU
    for (int i=0; i<N; i++) {
        a[i] = -i;
        b[i] = i * i;
    }

    // copy the arrays 'a' and 'b' to the GPU
    HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int),
                              cudaMemcpyHostToDevice ) );
    HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int),
                              cudaMemcpyHostToDevice ) );
    
    clock_t beginGPU = clock();
    addGPU<<<N,1>>>( dev_a, dev_b, dev_c );
    printf("GPU time -- %f sec\n", (clock() - beginGPU)*1.0/CLOCKS_PER_SEC);
    
    clock_t beginCPU = clock();
    addCPU(a, b, c );
    printf("CPU time -- %f sec\n", (clock() - beginCPU)*1.0/CLOCKS_PER_SEC);

    // copy the array 'c' back from the GPU to the CPU
    HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int),
                              cudaMemcpyDeviceToHost ) );

    // display the results
    //for (int i=0; i<N; i++) {
    //    printf( "%d + %d = %d\n", a[i], b[i], c[i] );
    //}

    // free the memory allocated on the GPU
    HANDLE_ERROR( cudaFree( dev_a ) );
    HANDLE_ERROR( cudaFree( dev_b ) );
    HANDLE_ERROR( cudaFree( dev_c ) );

    return 0;
}

執行結果
N分別爲 10000,100000,600000

bowen@user-Super-Server:~/cuda/p1$ ./l3
GPU time -- 0.000019 sec
CPU time -- 0.000040 sec
bowen@user-Super-Server:~/cuda/p1$ nvcc -o l3 l3.cu
bowen@user-Super-Server:~/cuda/p1$ ./l3
GPU time -- 0.000022 sec
CPU time -- 0.000406 sec
bowen@user-Super-Server:~/cuda/p1$ ./l3
GPU time -- 0.000020 sec
CPU time -- 0.002561 sec

6.常用API小結

  • kernal
__global__ void kernal(void){
}
kernal<<<1,1>>>();
  • cudaMalloc
    分配內存
int *dev_c;
cudaMalloc((void**)&dev_c,sizeof(int));
  • cudaMemcpy
    host指針與device指針的互相訪問
    此段代碼含義,將device的數據給host,相當於 c = dev_c;
    最後一個參數 cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice,
    cudaMemcpyHostToDevice
int c;
int *dev_c;
cudaMalloc((void**)&dev_c,sizeof(int));
cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost)
  • cudaGetDeviceCount & cudaDeviceProp & cudaGetDeviceProperties & cudaGetDevice

可以獲取GPU的個數並查詢GPU相關信息,返回一個cudaDeviceProp類型的結構

int count;
cudaGetDeviceCount(&count);

在這裏插入圖片描述

cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for(int i=0; i < count; i++){
    cudaGetDeviceProperties(&prop,i);
    //然後使用 prop.name 獲取GPU名字屬性
    ...
}

使用for循環進行迭代查找GPU有點麻煩,cuda提供了一個迭代方法。
例如查找一個主版本號爲6,次版本號爲大於等於1的GPU,

    int dev;
    cudaDeviceProp prop;
    cudaGetDevice(&dev);//獲取當前GPU的id
    memset(&prop,0,sizeof(cudaDeviceProp));
    prop.major = 6;
    prop.minor = 1;
    cudaChooseDevice(&dev,&prop);//將符合條件的設備號給dev
    printf("%d\n",dev);
    cudaSetDevice(dev);//將所有操作都在這個GPU上執行

例子1

dim3 grid(2, 2, 1), block(2, 2, 1);

在這裏插入圖片描述

    // 二:線程執行代碼
    __global__ void vector_add(float* vec1, float* vec2, float* vecres, int length) {
        // 在第幾個塊中 * 塊的大小 + 塊中的x, y維度(幾行幾列)
        int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
        if (tid < length) {
            vecres[tid] = vec1[tid] + vec2[tid];
        }
    }

例子2

    dim3 grid(8, 4, 1), block(8, 2, 1);

在這裏插入圖片描述

    __global__ void vector_add(float* vec1, float* vec2, float* vecres, int length) {
        // 在第幾個塊中 * 塊的大小 + 塊中的x, y維度(幾行幾列)
        int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
        if (tid < length) {
            vecres[tid] = vec1[tid] + vec2[tid];
        }
    }

二維

    __global__ void vector_add(float** mat1, float** mat2, float** matres, int width) {
        int x = blockIdx.x * blockDim.x + threadIdx.x;
        int y = blockIdx.y * blockDim.y + threadIdx.y;
        if (x < width && y < width) {
            matres[x][y] = mat1[x][y] + mat2[x][y];
        }
    }

【參考內容】[GPU高性能編程CUDA實戰].(桑德斯).聶雪軍等
代碼中頭文件是本書的源碼,該書掃描版與源代碼可以在CSDN下載中搜索下載

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章