Binder native層源碼分析(五):sm對數據的接收以及處理

我們從servicemanager的主函數開始

main和binder_loop

//\frameworks\native\cmds\servicemanager\service_manager.c
int main()
{
    struct binder_state *bs;

    bs = binder_open(128*1024);
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }

    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
//......,selinux相關代碼,跳過
    binder_loop(bs, svcmgr_handler);

    return 0;
}

servicemanager主函數中,先是打開了Binder驅動,然後讓自己成爲環境的默認manager,接着就進入了binder_loop。注意這裏把svcmgr_handler函數作爲參數傳入了binder_loop。

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;//write size爲0,所以不會寫
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
//binder_thread_write對該協議的處理似乎只是設置線程的狀態,不重要,跳過
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));
//進入循環
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//讀取數據

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
		//交給binder_parse
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

在binder_loop中,bwr的write_size爲0,read_size>0,也就是說該函數不斷地在循環中讀取請求,然後交給binder_parse處理。

在驅動的ioctl函數中,由於read_size>0,write_size=0,所以會進入binder_thread_read函數。

binder_thread_read

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;//也就是binder_loop中的read_buf
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
retry:
	binder_inner_proc_lock(proc);
	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
	binder_inner_proc_unlock(proc);

	thread->looper |= BINDER_LOOPER_STATE_WAITING;
//當transaction_stack爲空且todo鏈表也爲空時,wait_for_proc_work爲true
	trace_binder_wait_for_work(wait_for_proc_work,
				   !!thread->transaction_stack,
				   !binder_worklist_empty(proc, &thread->todo));
//事務已經放進todo鏈表中了,wait_for_proc_work爲false,不會進入該if
	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
					BINDER_LOOPER_STATE_ENTERED))) {
			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
				proc->pid, thread->pid, thread->looper);
		//進入睡眠,等待喚醒
			wait_event_interruptible(binder_user_error_wait,
						 binder_stop_on_user_error < 2);
		}
		binder_restore_priority(current, proc->default_priority);
	}
	if (non_block) {
		if (!binder_has_work(thread, wait_for_proc_work))
			ret = -EAGAIN;
	} else {
		ret = binder_wait_for_work(thread, wait_for_proc_work);
	}
	//設置線程循環狀態
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

	if (ret)
		return ret;

在上篇博文中,我們知道驅動已經把一個事務放進todo鏈表中了,所以此時不會進入睡眠,繼續往下看。

	while(1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w = NULL;
		struct list_head *list = NULL;
		struct binder_transaction *t = NULL;
		struct binder_thread *t_from;

		binder_inner_proc_lock(proc);
		//得到todo隊列
		if (!binder_worklist_empty_ilocked(&thread->todo))
			list = &thread->todo;
		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
			   wait_for_proc_work)
			list = &proc->todo;
		else {
			binder_inner_proc_unlock(proc);

			/* no data added */
			if (ptr - buffer == 4 && !thread->looper_need_return)
				goto retry;
			break;
		}
		w = binder_dequeue_work_head_ilocked(list);
		if (binder_worklist_empty_ilocked(&thread->todo))
			thread->process_todo = false;
//根據binder_work的類型做出處理。這裏只關注它是怎麼處理之前放進發送線程的
//BINDER_WORK_TRANSACTION_COMPLETE類型的tcomplete和放進目標線程的
//BINDER_WORK_TRANSACTION類型的binder_work即可
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			binder_inner_proc_unlock(proc);
//在binder_thread_write中把t->work的地址放進了隊列,這裏根據work的地址恢復出t
			t = container_of(w, struct binder_transaction, work);
		} break;
//由於在ioctl中是先寫再讀,所以當發送進程binder_thread_write完畢之後,會進入
//binder_thread_read的這個分支處理之前寫的時候放進todo隊列的tcomplete事務,
//而不用等ioctl被再次調用。
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			binder_inner_proc_unlock(proc);
			cmd = BR_TRANSACTION_COMPLETE;//
			if (put_user(cmd, (uint32_t __user *)ptr))//在prt(mIn)中寫入BR_TRANSACTION_COMPILE的cmd代表發送成功。
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
				     "%d:%d BR_TRANSACTION_COMPLETE\n",
				     proc->pid, thread->pid);
			kfree(w);
			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
		} break;
//...

在switch case中,處理BINDER_WORK_TRANCTION時僅僅只是簡單地恢復出了t,對t的處理放在了後面:

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
		//得到sm對應的binder_node
			struct binder_node *target_node = t->buffer->target_node;
			struct binder_priority node_prio;
		//根據上篇博文的分析我們知道,ptr和cookie指向服務類
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;//注意這裏對cookie進行賦值
			node_prio.sched_policy = target_node->sched_policy;
			node_prio.prio = target_node->min_priority;
			binder_transaction_priority(current, t, node_prio,
						    target_node->inherit_rt);
			cmd = BR_TRANSACTION;//注意這裏是BR_TRANSACTION而非BC
		} else {
			tr.target.ptr = 0;
			tr.cookie = 0;
			cmd = BR_REPLY;
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

		t_from = binder_get_txn_from(t);
		if (t_from) {
			struct task_struct *sender = t_from->proc->tsk;

			tr.sender_pid = task_tgid_nr_ns(sender,
							task_active_pid_ns(current));
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		//將內核地址轉換成用戶空間地址
		tr.data.ptr.buffer = (binder_uintptr_t)
			((uintptr_t)t->buffer->data +
			binder_alloc_get_user_buffer_offset(&proc->alloc));
		tr.data.ptr.offsets = tr.data.ptr.buffer +
					ALIGN(t->buffer->data_size,
					    sizeof(void *));
	//向read_buf寫協議BR_TRANSACTION
		if (put_user(cmd, (uint32_t __user *)ptr)) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "put_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += sizeof(uint32_t);
		//向read_buf寫binder_transaction_data tr
		if (copy_to_user(ptr, &tr, sizeof(tr))) {
			if (t_from)
				binder_thread_dec_tmpref(t_from);

			binder_cleanup_transaction(t, "copy_to_user failed",
						   BR_FAILED_REPLY);

			return -EFAULT;
		}
		ptr += sizeof(tr);
		//...

		if (t_from)
			binder_thread_dec_tmpref(t_from);
		t->buffer->allow_user_free = 1;
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
			binder_inner_proc_lock(thread->proc);
			//備份transaction_stack
			t->to_parent = thread->transaction_stack;
			t->to_thread = thread;
			thread->transaction_stack = t;//本線程的transaction_stack等於t
			binder_inner_proc_unlock(thread->proc);
		} else {
			binder_free_transaction(t);
		}
		break;
	}

上面的代碼很簡單,根據t重新構造出了binder_transaction_data tr,然後寫進了前面binder_loop中的read_buf。注意,傳輸的數據已經在binder_thread_write中被複制到了分配給本進程的通信空間中,所以這裏直接令tr.data.ptr.buffer的值等於該通信空間的地址(轉換成用戶空間地址),這樣就完成了數據的跨進程傳輸。

另外,注意其中的一條賦值語句tr.cookie=traget_node.cookie,目標服務結點對應的服務的引用被儲存到了tr當中。

後面的代碼就是更新bwr.consumed,更新完binder_thread_read就結束了。

binder_parse

調用完ioctl後,binder_loop會進入binder_parse函數。

//res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
//先讀cmd
        uint32_t cmd = *(uint32_t *) ptr;//即讀出BR_TRANSACTION
        ptr += sizeof(uint32_t);
        switch(cmd) {
//...
        case BR_TRANSACTION: {
//和waitforResponse中處理TRANSACTION和REPLY協議時一樣,將後面的數據
//強制轉換成binder_transaction_data類型,只不過這裏的變量名是txn,而
//waitForResponse中變量名是tr
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {//func即函數svcmgr_handler
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;//注意reply是binder_io類型,而非Parcel
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);//初始化reply
                bio_init_from_txn(&msg, txn);//見下面
                res = func(bs, txn, &msg, &reply);//交給svcmgr_handler處理
                if (txn->flags & TF_ONE_WAY) {//flags爲0,不會進入該if
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {//進入該else
                //發送reply
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
//...
}

binder_io類型和bio_init_from_txn

在binder_parse處理BR_TRANSACTION協議時,先是用讀出的binder_transaction txn初始化binder_io msg,我們看一下初始化函數和binder_io數據結構。

struct binder_io
{
    char *data;            /* pointer to read/write from */
    binder_size_t *offs;   /* array of offsets */
    size_t data_avail;     /* bytes available in data buffer */
    size_t offs_avail;     /* entries available in offsets array */

    char *data0;           /* start of data buffer */
    binder_size_t *offs0;  /* start of offsets buffer */
    uint32_t flags;
    uint32_t unused;
};

binder_io分爲兩個區域,data區和offs區。其中data0/offs0是區域的起始地址,而data/offs是當前地址。其中offs記錄的是obj的相關信息。

void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
    bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
    bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
    bio->data_avail = txn->data_size;
    bio->offs_avail = txn->offsets_size / sizeof(size_t);
    bio->flags = BIO_F_SHARED;
}

代碼很簡單,就是把txn中的數據部分提取出來,而丟掉txn中用於數據傳輸的部分例如pid,uid,code等。

svcmgr_handler

得到兩個binder_io後,binder_parse進入真正處理客戶端請求的關鍵函數,即sm的svcmgr_handler

//res = func(bs, txn, &msg, &reply);
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
//檢查目標是否正確
    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
//檢查目標服務名是否android.io.IServiceManager。服務名即源碼分析一結尾中首先
//放入Parcel的InterfaceToken
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }
//開始正式進入請求的處理
    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
//...
    case SVC_MGR_ADD_SERVICE:
//...
    case SVC_MGR_LIST_SERVICES: {
//...
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

處理了4種請求,在源碼分析一的結尾我們知道傳入的code值是ADD_SERVICE_TRANSACTION,從名字上看對應上面的SVC_MGR_ADD_SERVICE。

處理SVC_MGR_ADD_SERVICE請求

    case SVC_MGR_ADD_SERVICE:
    //讀取字符串,在源碼分析一中,第二個寫入Parcel的是String16("media.player")
    //即服務名
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

取得了服務名,handle值後調用了do_add_service。我們先看handle是怎麼得到的。

bio_get_ref和_bio_get_obj

uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;

    obj = _bio_get_obj(bio);//讀取obj
    if (!obj)
        return 0;
	//在源碼分析四的binder_translate_binder中我們知道,obj的類型被改成了
	//BINDER_TYPE_HANDLE
    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->handle;

    return 0;//否則返回0
}

static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
{
    size_t n;
    size_t off = bio->data - bio->data0;

    /* TODO: be smarter about this? */
    //如果存在一個offs區域的偏移信息等於當前位置,則從當前位置讀一個obj
    for (n = 0; n < bio->offs_avail; n++) {
        if (bio->offs[n] == off)
            return bio_get(bio, sizeof(struct flat_binder_object));
    }

    bio->data_avail = 0;
    bio->flags |= BIO_F_OVERFLOW;
    return NULL;
}

其實就是從offs區域(等同於binder_transactiondata.data.ptr.offsets和Parcel.mObjects)中恢復出flat_binder_object,再返回其handle值。

在上篇博文中我們知道,該handle值是binder_ref在sm的binder_proc紅黑樹中的鍵值。通過這個handle值我們就能從sm的binder_proc中找到相應的binder_ref,進而找到MediaPlayerService的binder_node,如圖:
在這裏插入圖片描述

do_add_service

取得handle值後,交給了do_add_service處理。

//do_add_service(bs, s, len, handle, txn->sender_euid,allow_isolated, txn->sender_pid)
int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)
{
    struct svcinfo *si;

    //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
    //        allow_isolated ? "allow_isolated" : "!allow_isolated", uid);

    if (!handle || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(s, len, spid, uid)) {
        ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
             str8(s, len), handle, uid);
        return -1;
    }
//在svclist中找是否存在該service
    si = find_svc(s, len);
    if (si) {//如果service在svclist中存在
    //如果存在handle值,即binder_proc中存在該服務的舊binder_ref,則廢棄
        if (si->handle) {
            ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s, len), handle, uid);
            svcinfo_death(bs, si);//釋放對該handle對應服務的引用
        }
        si->handle = handle;//更新handle值
    } else {//如果在service在svclist中不存在
    	//分配一個新的servie並插入svclist
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
                 str8(s, len), handle, uid);
            return -1;
        }
        si->handle = handle;//記錄binder_ref鍵值信息
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;//插入svclist
        svclist = si;
    }

    binder_acquire(bs, handle);//增加對該handle對應服務的引用
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

這塊代碼牽扯到一些協議,得結合binder_thread_write和executecommand中對協議的處理一塊看纔看得明白。

當需要添加的service已經存在時,會調用svcinfo_death()向Binder發送一個協議爲BC_RELEASE,數據爲舊handle值的數據。

在函數的最後,會調用binder_acquire()向Binder驅動發送一個協議爲BC_ACQUIRE,數據爲新handle值的數據。

我們看Binder驅動中是怎麼處理這些協議的。

binder_thread_write對BC_RELEASE,BC_ACQUIRE的處理

		switch (cmd) {
		case BC_INCREFS:
		case BC_ACQUIRE:
		case BC_RELEASE:
		case BC_DECREFS: {
			uint32_t target;
			const char *debug_string;
			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
			struct binder_ref_data rdata;
		//讀出handle值
			if (get_user(target, (uint32_t __user *)ptr))
				return -EFAULT;

			ptr += sizeof(uint32_t);
			ret = -1;
			if (increment && !target) {//target(handle值)非0,不會進入這個if
				struct binder_node *ctx_mgr_node;
				mutex_lock(&context->context_mgr_node_lock);
				ctx_mgr_node = context->binder_context_mgr_node;
				if (ctx_mgr_node)
					ret = binder_inc_ref_for_node(
							proc, ctx_mgr_node,
							strong, NULL, &rdata);
				mutex_unlock(&context->context_mgr_node_lock);
			}
			if (ret)//ret非0,會進入這個if
				ret = binder_update_ref_for_handle(
						proc, target, increment, strong,
						&rdata);
			if (!ret && rdata.desc != target) {
				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
					proc->pid, thread->pid,
					target, rdata.desc);
			}

讀出了handle值,傳給了binder_update_ref_for_handle處理。注意其中的proc是servicemanager的proc。

binder_update_ref_for_handle和binder_ref類型

static int binder_update_ref_for_handle(struct binder_proc *proc,
		uint32_t desc, bool increment, bool strong,
		struct binder_ref_data *rdata)
{//desc即代表handle值
	int ret = 0;
	struct binder_ref *ref;
	bool delete_ref = false;

	binder_proc_lock(proc);
	//根據handle值在proc的binder_ref紅黑樹中找到對應的binder_ref
	ref = binder_get_ref_olocked(proc, desc, strong);
	if (!ref) {
		ret = -EINVAL;
		goto err_no_ref;
	}
	if (increment)
		ret = binder_inc_ref_olocked(ref, strong, NULL);//增加引用
	else
		delete_ref = binder_dec_ref_olocked(ref, strong);//減少引用

	if (rdata)
		*rdata = ref->data;
	binder_proc_unlock(proc);

	if (delete_ref)
		binder_free_ref(ref);
	return ret;

err_no_ref:
	binder_proc_unlock(proc);
	return ret;
}

從servicemanager進程的binder_proc中找到該handle對應的引用,然後根據參數值增加引用計數或減少引用計數。

binder_inc_ref_olocked的代碼在上篇博文中已經看過了,總結一下就是增加binder_ref->data.strong,且當該值爲0時還會增加對應的binder_node->internal_strong_refs

而binder_dec_node_olocked就是減少binder_ref->data.strong,當減爲0時還會減少binder_node->internal_strong_refs。

總結

至此,service manager完成了MediaPlayerService的註冊,其實就是在服務進程創建一個binder_node,然後在sm的進程中創建一個binder_ref指向服務的binder_node,而handle值是binder_ref的關鍵字,用於在sm的binder_ref的紅黑樹中找到服務的binder_ref。

記得在上篇博文中,服務進程向sm進程通信時,是直接從環境變量中取得sm的binder_node的。而當binder_ref創建完畢後,sm就可以根據handle值取得該服務的binder_ref,進而得到binder_node,也就具備了向服務進程發送通信數據的能力。

在下篇博文中我們分析,當客戶端向sm查詢服務時,sm是怎麼把服務端的信息交給客戶端使得客戶端能夠向服務端通信的。弄清楚這個之後,關於Binder底層實現的內容基本就結束了。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章