cudaimprocess.h
#define M 16
#define N 16
__global__ void build_TransMat(float* d_transMat,size_t pitch);
void D2DimTest();
cudaimprocess.cu
__global__ void build_TransMat(float * d_transMat, size_t pitch)
{
int count = 1;
int tid_y = blockIdx.y*blockDim.y + threadIdx.y; //這類相當於height 。這麼理解就可以了
int offset_y = blockDim.y * gridDim.y;
int tid_x = blockIdx.x*blockDim.x + threadIdx.x; //這裏相當於width
int offset_x = blockDim.x * gridDim.x;
for (int j = tid_y; j < M; j += offset_y)
{
float * row_d_transMat = (float*)((char*)d_transMat+j*pitch);//這裏要地址對齊,所以不是j*row+d_transMat
for (int i = tid_x; i < N; i += offset_x)
{
if (i == j)
{
row_d_transMat[i] = 7;
printf("%d %d - \n",i,j);
}
else
{
row_d_transMat[i] = 0;
}
count++ ;
}
}
__syncthreads();
}
void D2DimTest()
{
float * d_transMat;
float * transMat;
size_t pitch;
transMat = new float[M*N];
cudaMallocPitch(&d_transMat,&pitch,sizeof(float)*N,M);
cudaMemset2D(d_transMat,pitch,0,sizeof(float)*N,M);
dim3 threadsPerBlock(32,32);
dim3 blocksPergrid(M+threadsPerBlock.x-1/128, N+threadsPerBlock.y-1/128);
build_TransMat << <blocksPergrid, threadsPerBlock >> > (d_transMat,pitch);
cudaMemcpy2D(transMat,sizeof(float)*N,d_transMat,pitch,sizeof(float)*N,M
,cudaMemcpyKind::cudaMemcpyDeviceToHost);
std::cout << "二維測試案例: pitch: " <<pitch<<std::endl;
for (int j = 0,k=0; j < M; j++)
{
for (int i = 0; i < N; i++,k++)
{
std::cout << transMat[k] << " ";
}
std::cout << std::endl;
}
cudaFree(d_transMat);
delete []transMat;
}
這是出來的結果,打印 pitch ,發現是512, 是128倍數, 這個是gpu自己分配的,
那麼這樣一來,以後寫二維數組就簡單多了,直接使用這種方式來處理,不用將二維轉成一維, 雖然犧牲了點效率,但是後續代碼可維護性以及可讀性就強了很多…