linux 核間通訊rpmsg架構分析【轉】

轉自:https://blog.csdn.net/wind0419/article/details/123277545

以imx8爲例

在最底層硬件上,A核和M核通訊是靠硬件來進行的,稱爲MU,如圖

 

 

Linux RPMsg 是在virtio framework上實現的一個消息傳遞機制
VirtIO 是一個用來實現“虛擬IO”的通用框架,典型虛擬的pci,網卡,磁盤等虛擬設備,kvm等都使用了這個技術

與virtio對應的還有一個virtio-ring,其實現了 virtio 的具體通信機制和數據流程。

virtio 層屬於控制層,負責前後端之間的通知機制(kick,notify)和控制流程,而 virtio-vring 則負責具體數據流轉發

從整體架構上看,關係如下:

 

最底層有platform_bus,負責從dts獲取配置來初始化相關對象,如virtio_device,初始化其config操作函數列表以及devID等,同時註冊到virtio_bus

dts相關配置:
&rpmsg{
/*
* 64K for one rpmsg instance:
*/
vdev-nums = <2>;
reg = <0x0 0x90000000 0x0 0x20000>;
status = "okay";
};
主要初始化過程在imx_rpmsg_probe中,關鍵操作有:
註冊MU相關的硬件中斷

ret = request_irq(irq, imx_mu_rpmsg_isr, IRQF_EARLY_RESUME | IRQF_SHARED,

"imx-mu-rpmsg", rpdev);

初始化MU硬件

ret = imx_rpmsg_mu_init(rpdev);

創建工作隊列用於處理MU中斷數據

INIT_DELAYED_WORK(&(rpdev->rpmsg_work), rpmsg_work_handler);

創建通知鏈用於對接virtio queue

BLOCKING_INIT_NOTIFIER_HEAD(&(rpdev->notifier));

初始化virtio_device並註冊

for (j = 0; j < rpdev->vdev_nums; j++) {
pr_debug("%s rpdev%d vdev%d: vring0 0x%x, vring1 0x%x\n",
__func__, rpdev->core_id, rpdev->vdev_nums,
rpdev->ivdev[j].vring[0],
rpdev->ivdev[j].vring[1]);
rpdev->ivdev[j].vdev.id.device = VIRTIO_ID_RPMSG;
rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;
rpdev->ivdev[j].vdev.dev.parent = &pdev->dev;
rpdev->ivdev[j].vdev.dev.release = imx_rpmsg_vproc_release;
rpdev->ivdev[j].base_vq_id = j * 2;

ret = register_virtio_device(&rpdev->ivdev[j].vdev);
if (ret) {
pr_err("%s failed to register rpdev: %d\n",
__func__, ret);
return ret;
}

}
值得注意的是virtio_device的config結構 rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;

static struct virtio_config_ops imx_rpmsg_config_ops = {

.get_features = imx_rpmsg_get_features,

.finalize_features = imx_rpmsg_finalize_features,

.find_vqs = imx_rpmsg_find_vqs,

.del_vqs = imx_rpmsg_del_vqs,

.reset = imx_rpmsg_reset,

.set_status = imx_rpmsg_set_status,

.get_status = imx_rpmsg_get_status,

};

imx_rpmsg_find_vqs

--> rp_find_vq

-->ioremap_nocache

-->vring_new_virtqueue(...imx_rpmsg_notify, callback...)

需要注意的是callback的註冊過程,在rpmsg_bus中
rpmsg_probe

-->vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };

-->virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL);

在此處註冊的imx_rpmsg_notify 和 callback 將被virtio_bus框架所調用

中間virtio_bus承上啓下,並負責提供統一標準的virtio queue操作接口,如virtqueue_add,virtqueue_kick等

針對struct virtqueue,對外只有一個callback函數,用於表示queue的數據變化

struct virtqueue {
struct list_head list;
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
void *priv;
};
其實virtqueue只是提供一層標準queue的操作接口,其具體實現依靠vring_virtqueue

struct vring_virtqueue {
struct virtqueue vq;

/* Actual memory layout for this queue */
struct vring vring
{
//queue的具體實現
unsigned int num;

struct vring_desc *desc;

struct vring_avail *avail;

struct vring_used *used;
};


/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);

...

/* Per-descriptor state. */
struct vring_desc_state desc_state[];
};
其觸發過程在vring_interrupt

irqreturn_t vring_interrupt(int irq, void *_vq)
{
##對外只有virtqueue,找到其包裝vring_virtqueue
struct vring_virtqueue *vq = to_vvq(_vq);

if (!more_used(vq)) {
pr_debug("virtqueue interrupt with no work for %p\n", vq);
return IRQ_NONE;
}

if (unlikely(vq->broken))
return IRQ_HANDLED;

pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
##調用virtqueue的callback
if (vq->vq.callback)
vq->vq.callback(&vq->vq);

return IRQ_HANDLED;
}
結合中斷,整體流程如下:
imx_mu_rpmsg_isr

-->rpmsg_work_handler

-->vring_interrupt

-->virtqueue.callback

關於vring_virtqueue,包含一個notify,用於通知queue有變化

virtqueue_add 和 virtqueue_kick 以及 virtqueue_notify 都能夠觸發notify

最終notify的實現在imx_rpmsg_notify,其內容爲設置MU寄存器,發送數據

/* kick the remote processor, and let it know which virtqueue to poke at */
static bool imx_rpmsg_notify(struct virtqueue *vq)
{
unsigned int mu_rpmsg = 0;
struct imx_rpmsg_vq_info *rpvq = vq->priv;

mu_rpmsg = rpvq->vq_id << 16;
mutex_lock(&rpvq->rpdev->lock);
/*
* Send the index of the triggered virtqueue as the mu payload.
* Use the timeout MU send message here.
* Since that M4 core may not be loaded, and the first MSG may
* not be handled by M4 when multi-vdev is enabled.
* To make sure that the message wound't be discarded when M4
* is running normally or in the suspend mode. Only use
* the timeout mechanism by the first notify when the vdev is
* registered.
*/
if (unlikely(rpvq->rpdev->first_notify > 0)) {
rpvq->rpdev->first_notify--;
MU_SendMessageTimeout(rpvq->rpdev->mu_base, 1, mu_rpmsg, 200);
} else {
MU_SendMessage(rpvq->rpdev->mu_base, 1, mu_rpmsg);
}
mutex_unlock(&rpvq->rpdev->lock);
return true;
}
最上面可看成基於rpmsg的應用,掛載到rpmsg_bus總線,針對rpmsg也有對應的標準操作接口,如rpmsg_send,rpmsg_sendto,rpmsg_poll等等

在rpmsg_bus這一層,還有一個rpmsg_endpoint概念,其對應有一個rpmsg_endpoint_ops,包含send,send_to等接口,目前還未對其深入研究

static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {

.destroy_ept = virtio_rpmsg_destroy_ept,

.send = virtio_rpmsg_send,

.sendto = virtio_rpmsg_sendto,

.send_offchannel = virtio_rpmsg_send_offchannel,

.trysend = virtio_rpmsg_trysend,

.trysendto = virtio_rpmsg_trysendto,

.trysend_offchannel = virtio_rpmsg_trysend_offchannel,

};

發送流程爲
rpmsg_send

-->rpmsg_endpoint.ops->send

-->virtio_rpmsg_send

-->virtqueue_add_outbuf 往queue填充數據

-->virtqueue_kick 通知對端

-->virtqueue_notify

-->imx_rpmsg_notify

-->MU_REG_WRITE

rpmsg_bus總線默認提供兩個回調rpmsg_recv_done和rpmsg_xmit_done以便通知給上層rpmsg應用,分別表示收到數據及發送完成

接收處理流程:
imx_mu_rpmsg_isr

-->rpmsg_work_handler

-->vring_interrupt

-->virtqueue.callback

-->rpmsg_recv_done or rpmsg_xmit_done

/* called when an rx buffer is used, and it's time to digest a message */
static void rpmsg_recv_done(struct virtqueue *rvq)
{
struct virtproc_info *vrp = rvq->vdev->priv;
struct device *dev = &rvq->vdev->dev;
struct rpmsg_hdr *msg;
unsigned int len, msgs_received = 0;
int err;

msg = virtqueue_get_buf(rvq, &len);
if (!msg) {
dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
return;
}

while (msg) {
err = rpmsg_recv_single(vrp, dev, msg, len);
if (err)
break;

msgs_received++;

msg = virtqueue_get_buf(rvq, &len);
}

dev_dbg(dev, "Received %u messages\n", msgs_received);

/* tell the remote processor we added another available rx buffer */
if (msgs_received)
## 通知接收queue
virtqueue_kick(vrp->rvq);
}
static void rpmsg_xmit_done(struct virtqueue *svq)
{
struct virtproc_info *vrp = svq->vdev->priv;

dev_dbg(&svq->vdev->dev, "%s\n", __func__);

/* wake up potential senders that are waiting for a tx buffer */
wake_up_interruptible(&vrp->sendq);
}
整體過程如下:

 


————————————————
版權聲明:本文爲CSDN博主「WindLOR」的原創文章,遵循CC 4.0 BY-SA版權協議,轉載請附上原文出處鏈接及本聲明。
原文鏈接:https://blog.csdn.net/wind0419/article/details/123277545

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章