cudaimprocess.h
#define M 16
#define N 16
__global__ void build_TransMat(float* d_transMat,size_t pitch);
void D2DimTest();
cudaimprocess.cu
__global__ void build_TransMat(float * d_transMat, size_t pitch)
{
int count = 1;
int tid_y = blockIdx.y*blockDim.y + threadIdx.y; //这类相当于height 。这么理解就可以了
int offset_y = blockDim.y * gridDim.y;
int tid_x = blockIdx.x*blockDim.x + threadIdx.x; //这里相当于width
int offset_x = blockDim.x * gridDim.x;
for (int j = tid_y; j < M; j += offset_y)
{
float * row_d_transMat = (float*)((char*)d_transMat+j*pitch);//这里要地址对齐,所以不是j*row+d_transMat
for (int i = tid_x; i < N; i += offset_x)
{
if (i == j)
{
row_d_transMat[i] = 7;
printf("%d %d - \n",i,j);
}
else
{
row_d_transMat[i] = 0;
}
count++ ;
}
}
__syncthreads();
}
void D2DimTest()
{
float * d_transMat;
float * transMat;
size_t pitch;
transMat = new float[M*N];
cudaMallocPitch(&d_transMat,&pitch,sizeof(float)*N,M);
cudaMemset2D(d_transMat,pitch,0,sizeof(float)*N,M);
dim3 threadsPerBlock(32,32);
dim3 blocksPergrid(M+threadsPerBlock.x-1/128, N+threadsPerBlock.y-1/128);
build_TransMat << <blocksPergrid, threadsPerBlock >> > (d_transMat,pitch);
cudaMemcpy2D(transMat,sizeof(float)*N,d_transMat,pitch,sizeof(float)*N,M
,cudaMemcpyKind::cudaMemcpyDeviceToHost);
std::cout << "二维测试案例: pitch: " <<pitch<<std::endl;
for (int j = 0,k=0; j < M; j++)
{
for (int i = 0; i < N; i++,k++)
{
std::cout << transMat[k] << " ";
}
std::cout << std::endl;
}
cudaFree(d_transMat);
delete []transMat;
}
这是出来的结果,打印 pitch ,发现是512, 是128倍数, 这个是gpu自己分配的,
那么这样一来,以后写二维数组就简单多了,直接使用这种方式来处理,不用将二维转成一维, 虽然牺牲了点效率,但是后续代码可维护性以及可读性就强了很多…