相關文章鏈接:
1. Android Framework - 學習啓動篇
2. Android Binder 驅動 - Media 服務的添加過程
3. Android Binder 驅動 - 啓動 ServiceManager 進程
4. Android Binder 驅動 - 內核驅動層源碼分析
5. Android Binder 驅動 - 從驅動層來分析服務的添加過程
6. Android Binder 驅動 - 從 Java 層來跟蹤服務的查找過程
相關源碼文件:
// app 進程
/frameworks/base/core/java/android/app/Activity.java
/frameworks/base/core/java/android/app/Instrumentation.java
/frameworks/base/core/java/android/app/ActivityManagerNative.java
/frameworks/base/core/java/android/os/Binder.java
/frameworks/native/libs/binder/BpBinder.cpp
/frameworks/native/libs/binder/ProcessState.cpp
/frameworks/native/libs/binder/IPCThreadState.cpp
/frameworks/base/core/jni/android_os_Parcel.cpp
// ServiceManager 進程
/frameworks/native/cmds/servicemanager/service_manager.c
/frameworks/native/cmds/servicemanager/binder.c
// binder 驅動層
/drivers/android/binder.c
/drivers/staging/android/binder.c
Binder 驅動在 Android 中是無所不在的,比如最常見的 startActivity 和 startService 等等,都會經歷多次進程間通信。那本文就基於 Activity 的啓動流程,從 Java 層來跟蹤分析服務的查找返回過程:
public void startActivityForResult(Intent intent, int requestCode,
@Nullable Bundle options) {
if (mParent == null) {
Instrumentation.ActivityResult ar = mInstrumentation.execStartActivity(this,
mMainThread.getApplicationThread(), mToken, this,intent, requestCode, options);
...
} else {
...
}
}
public ActivityResult execStartActivity(Context who, IBinder contextThread,
IBinder token, Activity target,Intent intent, int requestCode, Bundle options) {
...
try {
int result = ActivityManagerNative.getDefault()
.startActivity(whoThread, who.getBasePackageName(), intent,
intent.resolveTypeIfNeeded(who.getContentResolver()),token,
target != null ? target.mEmbeddedID : null,requestCode, 0, null, options);
checkStartActivityResult(result, intent);
} catch (RemoteException e) {
throw new RuntimeException("Failure from system", e);
}
return null;
}
static public IActivityManager getDefault() {
return gDefault.get();
}
private static final Singleton<IActivityManager> gDefault = new Singleton<IActivityManager>() {
protected IActivityManager create() {
IBinder b = ServiceManager.getService("activity");
if (false) {
Log.v("ActivityManager", "default service binder = " + b);
}
IActivityManager am = asInterface(b);
if (false) {
Log.v("ActivityManager", "default service = " + am);
}
return am;
}
};
上面涉及到了兩次跨進程通信,一次是 app 進程向 ServiceManager 進程查找獲取 ActivityManagerService 的 IBinder 對象,一次是 app 進程向 ActivityManagerService 進程發送啓動 Activity 的請求,本文我們主要來跟蹤分析 app 進程是如何獲取系統服務的,至於如何與 ActivityManagerService 進程通信,後面的文章會陸續分析。
public static IBinder getService(String name) {
try {
IBinder service = sCache.get(name);
if (service != null) {
return service;
} else {
return getIServiceManager().getService(name);
}
} catch (RemoteException e) {
Log.e(TAG, "error in getService", e);
}
return null;
}
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
return sServiceManager;
}
// Find the service manager
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
// 這裏跟蹤到 Native 層的源碼裏面
public static final native IBinder getContextObject();
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
AutoMutex _l(mProxyLock);
// mClass = android/os/BinderProxy,其實就是 new 一個 java 的 BinderProxy
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
if (object != NULL) {
// 把 BpBinder(0) 的地址設置給 BinderProxy 的 mObject 屬性
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
}
return object;
}
getIServiceManager 返回的其實就是 new BpBinder(0) ,只不過在 Java 層來看是一個 IBinder (BinderProxy)對象。我們接着往下跟蹤 getService() 的具體實現。
public IBinder getService(String name) throws RemoteException {
// 構建一個發送數據的 Parcel 和一個接收數據的 Parcel
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
// 把請求數據寫入 data
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
final class BinderProxy implements IBinder {
...
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
return transactNative(code, data, reply, flags);
}
// mObject = new BpBinder(0)
private long mObject;
...
}
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) {
// 獲取 native 層的 Parcel
Parcel* data = parcelForJavaObject(env, dataObj);
Parcel* reply = parcelForJavaObject(env, replyObj);
// 獲取 BpBinder(0)
IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);
// 調用 BpBinder 的 transact 方法
status_t err = target->transact(code, *data, reply, flags);
return JNI_FALSE;
}
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags){
if (mAlive) {
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
// 在《Android Binder 驅動 - Media 服務的添加過程》一文中有詳細介紹
status_t IPCThreadState::transact(int32_t handle,uint32_t code,
const Parcel& data,Parcel* reply, uint32_t flags){
...
}
IPCThreadState::transact 的具體源碼在 《Android Binder 驅動 - Media 服務的添加過程》 一文中已詳細分析過,這裏就不再囉嗦這部分了。我們主要來看下 ServiceManager 進程是如何回覆我們的。
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
// 先執行回調函數
res = func(bs, txn, &msg, &reply);
// 然後把執行結果寫入 binder 驅動
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
...
}
bio_put_uint32(reply, 0);
return 0;
}
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
// 像 replay 中寫入一個 flat_binder_object
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->handle = handle;
obj->cookie = 0;
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
// 回覆命令
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
...
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
// 像驅動層寫入數據,read_size = 0
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
通過上面的源碼分析可以看到 ServiceManager 進程,最終是像驅動層寫入了一個 flat_binder_object 最主要的是給 handle 賦值。那 binder 驅動到底是怎麼通知我們的 app 進程的呢?
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
// 這裏是執行回覆消息
in_reply_to = thread->transaction_stack;
target_thread = in_reply_to->from;
target_proc = target_thread->proc;
} else {
...
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
...
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
...
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
...
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
...
}
if (ref->node->proc == target_proc) {
...
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
trace_binder_transaction_ref_to_ref(t, ref, new_ref);
}
} break;
...
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
...
} else {
...
}
// 再向 app 進程寫入數據,喚醒 app 等待
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
// 再向 ServiceManager 自己進程寫入 BINDER_WORK_TRANSACTION_COMPLETE
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
由此可見 ServiceManager 進程的返回數據是交給了 binder 驅動,binder 驅動會轉發喚醒 app 進程。命令是 BC_REPLY 那麼 app 獲取到數據後會繼續執行相應的操作。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
...
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
...
}
} else {
...
}
}
goto finish;
}
}
return err;
}
// 這裏只有一個 flat_binder_object
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
binder_size_t minOffset = 0;
freeDataNoInit();
mError = NO_ERROR;
mData = const_cast<uint8_t*>(data);
mDataSize = mDataCapacity = dataSize;
//ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
mDataPos = 0;
ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
mObjects = const_cast<binder_size_t*>(objects);
mObjectsSize = mObjectsCapacity = objectsCount;
mNextObjectHint = 0;
mOwner = relFunc;
mOwnerCookie = relCookie;
for (size_t i = 0; i < mObjectsSize; i++) {
binder_size_t offset = mObjects[i];
if (offset < minOffset) {
ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
__func__, (uint64_t)offset, (uint64_t)minOffset);
mObjectsSize = 0;
break;
}
minOffset = offset + sizeof(flat_binder_object);
}
scanForFds();
}
// 回到最初的 getService 代碼
public IBinder getService(String name) throws RemoteException {
// 構建一個發送數據的 Parcel 和一個接收數據的 Parcel
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
// 把請求數據寫入 data
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
return javaObjectForIBinder(env, parcel->readStrongBinder());
}
return NULL;
}
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
...
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
// 會有一個緩存,返回的就是 BpBinder(handle)
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
至此所有的源碼已分析完畢,最終獲取返回的系統服務(AMS)是 new BpBinder(handle) 。最後我們不妨再仔細思考一個問題,我們拿到的 handle 值是 ServiceManager 進程的 handle 值嗎?
視頻地址:https://pan.baidu.com/s/1CHysxz7sKoLftpzXkbNAmw
視頻密碼:5cfz