Ascend C 自定義算子 Kernel Launch調用入門

本文分享自華爲雲社區《Ascend C 自定義算子 Kernel Launch調用入門》,作者: jackwangcumt。

1 Kernel Launch概述

根據官方說明文檔的介紹,Ascend C對外開放核函數的基礎調用(Kernel Launch)方式,是爲了簡化Ascend C 自定義算子的開發流程,提供更易用的調試調優功能。當開發者完成算子核函數的開發和Tiling實現後,即可通過AscendCL運行時接口,完成算子的調用並實現自己的推理應用;同時提供簡易的kernel開發工程,開發者僅需提供kernel側實現,基於工程框架可以快速實現Kernel Launch。本文實驗前提是完成了《Ascend C 自定義PRelu算子》博文的相關算子開發工程。網址爲:https://bbs.huaweicloud.com/blogs/425244 。請注意:

  • 8.0.RC1.alpha002 當前版本,Kernel Launch開放式編程爲試用特性,不支持應用於商用產品中。
  • 8.0.RC1.alpha002 當前版本暫不支持獲取用戶workspace特性。

2 Kernel Launch調用方式

ACLRT_LAUNCH_KERNEL調用方式對內核調用符方式進行了功能加強,核函數的調用是異步的,調用接口的使用方法如下:

ACLRT_LAUNCH_KERNEL(kernel_name)(blockDim, stream, argument list);
  • kernel_name:算子核函數的名稱。
  • blockDim:規定了核函數將會在幾個核上執行。每個執行該核函數的核會被分配一個邏輯ID,即block_idx,可以在覈函數的實現中調用GetBlockIdx來獲取block_idx。
  • stream,類型爲aclrtStream,stream用於維護一些異步操作的執行順序,確保按照應用程序中的代碼調用順序在Device上執行。
  • argument list:參數列表,與核函數的參數列表保持一致。

爲幫助開發者快速的完成算子的Kernel Launch調試,官方提供了簡易的算子工程,我們可以基於該算子工程中的樣例代碼和工程框架進行算子開發。算子工程支持的如下:

  • 該工程支持調試功能,如PRINTF功能、DumpTensor
  • 工程編譯生成的應用程序,可通過msprof命令行方式採集和解析性能數據。

可以參考工程樣例:https://gitee.com/ascend/samples/blob/master/operator/AddCustomSample/KernelLaunch/AddKernelInvocationTilingNeo ,其目錄結構如下所示:

AddKernelInvocationNeo
|-- cmake                                                 // CMake編譯文件
|-- scripts
|  ├── gen_data.py                                     // 輸入數據和真值數據生成腳本文件
|  ├── verify_result.py                                // 驗證輸出數據和真值數據是否一致的驗證腳本
|-- CMakeLists.txt                                        // CMake編譯配置文件
|-- add_custom.cpp                                     // 矢量算子kernel實現
|-- data_utils.h                                          // 數據讀入寫出函數
|-- main.cpp                                              // 主函數,調用算子的應用程序,含CPU域及NPU域調用
|-- run.sh                                                // 編譯運行算子的腳本

基於該算子工程,開發者進行算子開發的步驟如下:

  • 完成算子kernel側實現。
  • 編寫算子調用應用程序main.cpp。
  • 編寫CMake編譯配置文件CMakeLists.txt。

  • 根據實際需要修改輸入數據和真值數據生成腳本文件gen_data.py和驗證輸出數據和真值數據是否一致的驗證腳本verify_result.py。
  • 根據實際需要修改編譯運行算子的腳本run.sh並執行該腳本,完成算子的編譯運行和結果驗證。

3 Kernel Launch實現

在PReluSample目錄下新建一個目錄KernelLaunch,用於存放Kernel Launch調用方式的工程代碼,我這裏參考官方的https://gitee.com/ascend/samples/tree/master/operator/LeakyReluCustomSample/KernelLaunch/

LeakyReluKernelInvocation樣例工程,並修改了相關參數,p_relu_custom.cpp 代碼如下所示:

#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2; 
constexpr int32_t TOTAL_LENGTH = 8 * 200 * 1024;    
constexpr int32_t TILE_NUM = 32;                           
constexpr float alpha = 0.002;

class KernelPRelu {
public:
    __aicore__ inline KernelPRelu() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totalLength, uint32_t tileNum, float alpha)
    {
        PRINTF("[npu debug] >>> GetBlockNum() %d", GetBlockNum());
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        this->blockLength = totalLength / GetBlockNum();
        this->tileNum = tileNum;
        this->alpha = static_cast<float>(alpha);
        ASSERT(tileNum != 0 && "tile num can not be zero!");
        this->tileLength = this->blockLength / tileNum / BUFFER_NUM;

        // get start index for current core, core parallel
        xGm.SetGlobalBuffer((__gm__ float*)x + this->blockLength * GetBlockIdx(), this->blockLength);
        yGm.SetGlobalBuffer((__gm__ float*)y + this->blockLength * GetBlockIdx(), this->blockLength);
        // pipe alloc memory to queue, the unit is Bytes
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(tmpBuffer1, this->tileLength * sizeof(float));
        //pipe.InitBuffer(tmpBuffer2, this->tileLength * sizeof(float));
    }
    __aicore__ inline void Process()
    {
        // loop count need to be doubled, due to double buffer
        int32_t loopCount = this->tileNum * BUFFER_NUM;
        // tiling strategy, pipeline parallel
        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        // alloc tensor from queue memory
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        // copy progress_th tile from global tensor to local tensor
        DataCopy(xLocal, xGm[progress * this->tileLength], this->tileLength);
        // enque input tensors to VECIN queue
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        // deque input tensors from VECIN queue
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
        LocalTensor<float> tmpTensor1 = tmpBuffer1.Get<float>();
        float inputVal = 0.0;
        Maxs(tmpTensor1, xLocal, inputVal, this->tileLength); // x >= 0  --> x
        // x < 0 
        Mins(xLocal, xLocal, inputVal, this->tileLength);
        Muls(xLocal, xLocal, this->alpha, this->tileLength);
        Add(yLocal, xLocal, tmpTensor1, this->tileLength);
        outQueueY.EnQue<float>(yLocal);
        // free input tensors for reuse
        inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        // deque output tensor from VECOUT queue
        LocalTensor<float> yLocal = outQueueY.DeQue<float>();
        // copy progress_th tile from local tensor to global tensor
        DataCopy(yGm[progress * this->tileLength], yLocal, this->tileLength);
        // free output tensor for reuse
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    TBuf<QuePosition::VECCALC> tmpBuffer1;
    //TBuf<QuePosition::VECCALC> tmpBuffer1, tmpBuffer2;
    // create queues for input, in this case depth is equal to buffer num
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    // create queue for output, in this case depth is equal to buffer num
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<float> xGm, yGm;
    uint32_t blockLength;
    uint32_t tileNum;
    uint32_t tileLength;
    float alpha;
};
extern "C" __global__ __aicore__ void p_relu_custom(GM_ADDR x, GM_ADDR y) {
    //GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    KernelPRelu op;
    op.Init(x, y, TOTAL_LENGTH, TILE_NUM, alpha);
    op.Process();
}

#ifndef __CCE_KT_TEST__
// call of kernel function
void p_relu_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y)
{
    p_relu_custom<<<blockDim, l2ctrl, stream>>>(x, y);
}
#endif

main.cpp 代碼如下所示 :

/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * This file constains code of cpu debug and npu code.We read data from bin file
 * and write result to file.
 */
#include "data_utils.h"
#ifndef __CCE_KT_TEST__
#include "acl/acl.h"
extern void p_relu_custom_do(uint32_t coreDim, void* l2ctrl, void* stream, uint8_t* x, uint8_t* y);
#else
#include "tikicpulib.h"
extern "C" __global__ __aicore__ void p_relu_custom(GM_ADDR x, GM_ADDR y);
#endif

int32_t main(int32_t argc, char* argv[])
{
    uint32_t blockDim = 8;
    size_t inputByteSize = 8 * 200 * 1024 * sizeof(float);
    size_t outputByteSize = 8 * 200 * 1024 * sizeof(float);

#ifdef __CCE_KT_TEST__
    // CPU
    uint8_t* x = (uint8_t*)AscendC::GmAlloc(inputByteSize);
    uint8_t* y = (uint8_t*)AscendC::GmAlloc(outputByteSize);
    printf("[cpu debug]>>> inputByteSize: %d\n", inputByteSize); 

    ReadFile("./input/input_x.bin", inputByteSize, x, inputByteSize);
    AscendC::SetKernelMode(KernelMode::AIV_MODE);
    ICPU_RUN_KF(p_relu_custom, blockDim, x, y); // use this macro for cpu debug
    WriteFile("./output/output_y.bin", y, outputByteSize);
    AscendC::GmFree((void *)x);
    AscendC::GmFree((void *)y);
    
#else
   // NPU 
    //CHECK_ACL(aclInit(nullptr));
    CHECK_ACL(aclInit("./acl.json"));
    aclrtContext context;
    int32_t deviceId = 0;
    CHECK_ACL(aclrtSetDevice(deviceId));
    CHECK_ACL(aclrtCreateContext(&context, deviceId));
    aclrtStream stream = nullptr;
    CHECK_ACL(aclrtCreateStream(&stream));

    uint8_t *xHost, *yHost;
    uint8_t *xDevice, *yDevice;
    CHECK_ACL(aclrtMallocHost((void**)(&xHost), inputByteSize));
    CHECK_ACL(aclrtMallocHost((void**)(&yHost), outputByteSize));
    CHECK_ACL(aclrtMalloc((void**)&xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));
    CHECK_ACL(aclrtMalloc((void**)&yDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));

    ReadFile("./input/input_x.bin", inputByteSize, xHost, inputByteSize);
    CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));

    p_relu_custom_do(blockDim, nullptr, stream, xDevice, yDevice);
    CHECK_ACL(aclrtSynchronizeStream(stream));

    CHECK_ACL(aclrtMemcpy(yHost, outputByteSize, yDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST));
    WriteFile("./output/output_y.bin", yHost, outputByteSize);

    CHECK_ACL(aclrtFree(xDevice));
    CHECK_ACL(aclrtFree(yDevice));
    CHECK_ACL(aclrtFreeHost(xHost));
    CHECK_ACL(aclrtFreeHost(yHost));

    CHECK_ACL(aclrtDestroyStream(stream));
    CHECK_ACL(aclrtDestroyContext(context));
    CHECK_ACL(aclrtResetDevice(deviceId));
    CHECK_ACL(aclFinalize());
#endif
    return 0;
}

執行如下代碼進行NPU上板調試和CPU調試:

#npu
bash run.sh Ascend310P1 npu_onboard
# cpu
bash run.sh Ascend310P1 cpu

QQ截圖20240408155058.png

QQ截圖20240408155212.png

 

點擊關注,第一時間瞭解華爲雲新鮮技術~

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章