我們先來看一個簡單的 native service 例子:
// 定義能力接口
class IBuddy: public IInterface {
public:
enum {
BUDDY_GET_PID = IBinder::FIRST_CALL_TRANSACTION,
};
virtual int getPid(void ) = 0;
DECLARE_META_INTERFACE(Buddy);
};
IMPLEMENT_META_INTERFACE(Buddy, "Buddy");
// 定義接口proxy ,用於應用調用接口
class BpBuddy : public BpInterface<IBuddy> {
public:
// 定義構造函數,注意參數 類型
BpBuddy(const sp<IBinder>& impl) : BpInterface<IBuddy>(impl) { }
virtual void getPid(int32_t push_data) {
Parcel data, reply;
// Write RPC headers. (previously just the interface token)
data.writeInterfaceToken(IBuddy::getInterfaceDescriptor());
data.writeInt32(push_data);
remote()->transact(BUDDY_GET_PID, data, &reply);
int32_t res;
status_t status = reply.readInt32(&res);
return res;
}
}
// 接口的實現類
class BnBuddy : public BnInterface<IBuddy> {
virtual status_t onTransact(uint32_t code,const Parcel& data,
Parcel* reply,uint32_t flags = 0);
};
status_t BnBuddy::onTransact(uint32_t code, const Parcel &data,
Parcel* reply,uint32_t flags) {
// 檢測 discriptor 是否相同
CHECK_INTERFACE(IBuddy, data, reply);
switch(code) {
case BUDDY_GET_PID: {
int32_t pid = getPid(); // 調用getPid() 函數
reply->writeInt32(pid);
return NO_ERROR;
}
break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
class BuddyService : public BnBuddy {
virtual int32_t getPid() {// impl: system call, get process ID
return getpid();
}
};
int main(int argc, char **argv)
{
//regester service to service manager
defaultServiceManager()->addService(String16("BuddyService"), new BuddyService());
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
return 0;
}
sp<IServiceManager> sm =defaultServiceManager();
// find buddy service , and call function
sp<IBinder> binder =sm->getService(String16("Buddy"));
sp<IBuddy> mBuddy =interface_cast<IBuddy>(binder);
0.1 一個完整native service 所需要的組件(class)
再來回顧一下 前面添加service 代碼:
int main(int argc, char **argv)
{
//regester service to service manager
defaultServiceManager()->addService(String16("BuddyService"), new BuddyService());
//開啓監聽
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
return 0;
}
RTFSC , 源代碼我們逐步分析:
第一步,首先調用 defaultServiceManager(),此函數調用返回了一個 IServiceManager 指針,
sp<IServiceManager> defaultServiceManager(){
// gDefaultServiceManager 在文件 /android/frameworks/native/libs/binder/Static.cpp 中定義
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
// 加鎖 線程安全
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
// 這裏是我們所需要關注的重點,這行代碼這裏發生了什麼呢 ?
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
// 這裏是一個singleton, gProcess 也是在 /android/frameworks/native/libs/binder/Static.cpp 中定義的
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
return gProcess;
}
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
// 查找 handle entry
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
// 這裏我們返回的是一個 handle == 0 的BpBinder
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
interface_cast<IServiceManager> 這個函數的作用是什麼呢?
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
顯然,這裏直接調用的 IServiceManager::asInterface 那麼函數asInterface 是在哪裏聲明和定義的呢,下面我們看看 IServiceManager 這個類的定義
// IServiceManager 聲明
class IServiceManager : public IInterface
{
public:
DECLARE_META_INTERFACE(ServiceManager);
//。。。 省略不相關代碼
};
// 定義
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
0.2.0 關於 IInterface 所提供的兩個宏
// 定義了一個 靜態變量
// 聲明瞭一個 asInterface 靜態函數
// 聲明瞭一個獲取接口描述符的成員函數
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
// 初始化了類的靜態變量
const android::String16 I##INTERFACE::descriptor(NAME); \
// 靜態函數返回的就是描述符靜態變量
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
// asInterface 函數的實現 , 在IServiceManager 中這裏的參數是一個 BpBinder
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
#define CHECK_INTERFACE(interface, data, reply) \
if (!data.checkInterface(this)) { return PERMISSION_DENIED; } \
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
因此 interface_cast<IServiceManager>( ProcessState::self()->getContextObject(NULL)); 返回的是一個 BpServiceManager(BpBinder)
// android/frameworks/native/libs/binder/IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service, bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
// 調用BpBinder::transact
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
第二步:IPCThreadState::self()->joinThreadPool();
void IPCThreadState::joinThreadPool(bool isMain = true)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
//...............
status_t result;
// 循環接收請求數據
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
result = executeCommand(cmd);
//...............
}
return result;
}
第三步:ProcessState::self()->startThreadPool();
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
String8 ProcessState::makeBinderThreadName() {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
String8 name;
name.appendFormat("Binder_%X", s);
return name;
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp<Thread> t = new PoolThread(isMain);
t->run(name.string());
}
}
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
0.3 通過service manager 獲取服務
我們看一下下面這段代碼sp<IServiceManager> sm =defaultServiceManager();
// find buddy service , and call function
sp<IBinder> binder =sm->getService(String16("Buddy"));
sp<IBuddy> mBuddy =interface_cast<IBuddy>(binder);
看到以上的代碼,和 defaultServiceManager 獲取 IServiceManager 的內容差不多,在這裏不在贅述,關於 getService 及addService的實現過程,後面會講到。
1,ServiceManager
在前面 native service 例子中,我們已經看到,要將用戶自定義的 native service ,添加到系統及從系統獲取自定義service, 都和一個類分不開,IServiceManager,IServiceManager 是應用層實現binder通信的基礎,首先,看一下 IServiceManager 類圖,部分細節爲畫出,請參見native service 的類圖。
1,1 serviceManager proxy端
BpServiceManager 是IServiceManager 的接口的proxy,由類圖可知,IServiceManager 一共有四個接口。主要功能也就是爲了管理系統的service。BpServiceManager 的mRemote 是一個BpBinder ,而並不是 BnServiceManager,因爲IServciceManager 除了完成接口提供的函數外,因爲她是用來維護整個的service 系統的,所以BpBinder 裏,還有許多與IPC 關於的代碼。爲了更進一步的瞭解BpBinder,我們選擇IServiceManager 裏使用最多的兩個 接口 addService 和 getService 來進行 細緻的講解。
virtual status_t BpServiceManager::addService(const String16& name, const sp<IBinder>& service, bool allowIsolated = false)
{
// Parce 在使用 binder 進行IPC時候,一個很重要的數據結構,在此不表,後文講解
Parcel data, reply;
// 寫入接口描述符
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
// 寫入servcie name
data.writeString16(name);
// 寫如service 的 實例, 這裏很重要,後面的實例,IPC 全靠他了。
data.writeStrongBinder(service);
// 寫入isolate 的
data.writeInt32(allowIsolated ? 1 : 0);
// 調用 BpBinder的 transact 函數,關於常量定義 enum {
// GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
// CHECK_SERVICE_TRANSACTION,
// ADD_SERVICE_TRANSACTION,
// LIST_SERVICES_TRANSACTION,
//};
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
{
// Once a binder has died, it will never come back to life.
// 構造函數初始化, mAlive == 0, mHandle == 0, flags == 0, default
if (mAlive) {
// 這裏調用到了IPCThreadState
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
看看 IPCThreadState裏的實現
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
// 常量定義,此定義在 /kernel/drivers/staging/android/binder.h中定義
//enum transaction_flags {
//TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
//TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
//TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
//TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
//};
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
// 將數據寫入mOut中,同樣在kernel source中定義 BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) { // reply != NULL;
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
writeTransactionData 函數,將請求的數據,打包到parcel中。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags, int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.handle = handle; // 0
tr.code = code; // ADD_SERVICE_TRANSACTION
tr.flags = binderFlags; // flags == 0|TF_ACCEPT_FDS;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {// but statusBuffer == NULL;
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = statusBuffer;
tr.offsets_size = 0;
tr.data.ptr.offsets = NULL;
} else {
return (mLastError = err);
}
// BC_TRANSACTION == cmd
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
打包的數據結構,binder_transaction_data在 /kernel/drivers/staging/android/binder.h中定義 ,這是binder IPC 中,幾個關鍵的數據結構之一,在後面還會講到binder_write_read 和 flat_binder_object,都在 內核的 binder.h裏定義的
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
size_t handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
unsigned int flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
const void *buffer;
/* offsets from buffer to flat_binder_object structs */
const void *offsets;
} ptr;
uint8_t buf[8];
} data;
};
waitForResponse 函數,函數名字有點歧義,準確的說,應該是transAndWait,將數據寫入binder driver 並等待binder driver 響應數據。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult=NULL)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
// talkWithDriver 返回以後,讀出返回的數據,在 mIn中
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
// 讀取響應消息類型
cmd = mIn.readInt32();
switch (cmd) {
//........................
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
// 將響應數據填充到 parcel 的 reply 中
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(size_t),
freeBuffer, this);
} else {
//.......................
}
} else {
//........................
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
talkWithDriver,顧名思義,通過此函數,與binder driver 進行對話。
status_t IPCThreadState::talkWithDriver(bool doReceive = true)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
// 數據結構 定義在 kernel中, /kernel/drivers/staging/android/binder.h
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
// mOut writeTransactionData 中寫入的值
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
// 寫數據同時等待讀數據,阻塞
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
// 在IPCThreadState構造函數處初始化 open_driver
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
//reset mOut
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < (ssize_t)mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
// 將返回的數據寫入 mIn 中
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
與binder driver 通信的時候,實際用到的數據結構
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses apropriately.
*/
// 利用整型與指針(實際上就是一個整型的數值,只不過其數值表示的是內存地址)之間的聯繫,
// 簡單粗暴且高效的讀寫方式
struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
signed long read_size; /* bytes to read */
signed long read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
作爲server(BuddyService) 端,當開啓線程以後,會等待從binder driver 讀取消息,然後解析消息並響應響應的數據,下面是執行解析和響應的函數executeCommand
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
//...... 省略部分代碼
case BR_TRANSACTION:
{
binder_transaction_data tr;
// 讀取從 binder driver 獲取到的數據 tr
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(size_t),
freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
//..................
Parcel reply;
if (tr.target.ptr) {
// BBinder:: transact ==> BuddyService::onTransact
sp<BBinder> b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
} else {
const status_t error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
// server 發送響應
sendReply(reply, 0);
} else {
}
mCallingPid = origPid;
mCallingUid = origUid;
}
break;
//...... 省略部分代碼
default:
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{
status_t err;
status_t statusBuffer;
err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);
if (err < NO_ERROR) return err;
return waitForResponse(NULL, NULL);
}
getService 從 serviceManager 獲取服務virtual sp<IBinder> BpServiceManager::getService(const String16& name) const
{
unsigned n;
for (n = 0; n < 5; n++){
sp<IBinder> svc = checkService(name);
if (svc != NULL) return svc;
ALOGI("Waiting for service %s...\n", String8(name).string());
sleep(1);
}
return NULL;
}
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
調用的是checkService ,而 裏面同樣的調用的是 remote()->transact 這裏不在贅述其調用過程,這裏我們要說的 reply.readStrongBinder();1.2 client 與server 通信中一個重要的數據結構 parcel.
parcel 裏面定義了很多數據接口,因爲代碼長度關係,我們選擇幾個 經常使用的,重要的 接口分析一下status_t Parcel::writeInterfaceToken(const String16& interface)
{
writeInt32(IPCThreadState::self()->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
// currently the interface identification token is just its name as a string
return writeString16(interface);
}
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& proc, const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
// BnBuddy::localBinder {return this;}. BpBinder::remoteBinder{ return this;}.
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
ALOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = NULL;
obj.cookie = NULL;
}
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out)
{
return out->writeObject(flat, false);
}
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
status_t unflatten_binder(const sp<ProcessState>& proc, const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = static_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
1.3 serviceManager impl 端
//android/frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char **argv)
{
// struct binder_state
//{
//int fd;
//void *mapped;
//unsigned mapsize;
//};
struct binder_state *bs;
///* the one magic object */
//#define BINDER_SERVICE_MANAGER ((void*) 0)
void *svcmgr = BINDER_SERVICE_MANAGER;
bs = binder_open(128*1024);
//初始化 binder_node 根節點
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
struct binder_state *binder_open(unsigned mapsize)
{
struct binder_state *bs;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return 0;
}
// 獲取 fd
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
// 映射內存大小
bs->mapsize = mapsize;
// 映射內存
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
//..............
}
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
// 4X32 字節的緩衝區
unsigned readbuf[32];
// 不寫數據,只是讀數據,所以都爲 0
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
// 告訴驅動程序開始進入循環監聽,讀取數據
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 解析讀取到的數據
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
int binder_parse(struct binder_state *bs, struct binder_io *bio, uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
//size 是按 字節算的
uint32_t *end = ptr + (size / 4);
while (ptr < end) {
// mOut.writeInt32(cmd);
// mOut.write(&tr, sizeof(tr));
uint32_t cmd = *ptr++;
// ++ 以後 ptr 指向了 這樣的一個數據結構 binder_transaction_data tr
switch(cmd) {
//...................................
case BR_TRANSACTION: {
// binder_txn 與 binder_transaction_data 但是,兩個數據結構的內存結構相同,
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4]; // 64 × 4
//struct binder_io
//{
// char *data; /* pointer to read/write from */
// uint32_t *offs; /* array of offsets */
// uint32_t data_avail; /* bytes available in data buffer */
// uint32_t offs_avail; /* entries available in offsets array */
//
// char *data0; /* start of data buffer */
// uint32_t *offs0; /* start of offsets buffer */
// uint32_t flags;
// uint32_t unused;
//};
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
// call back svcmgr_handler
res = func(bs, txn, &msg, &reply);
// send reply ==>binder_write==>ioctl.
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
case BR_REPLY: {
struct binder_txn *txn = (void*) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += (sizeof(*txn) / sizeof(uint32_t));
r = 0;
break;
}
//............................
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
int allow_isolated;
if (txn->target != svcmgr_handle)
return -1;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
// s 就是 serviceManager 服務的 discreptor, svcmgr_id =
//uint16_t svcmgr_id[] = {
//'a','n','d','r','o','i','d','.','o','s','.',
//'I','S','e','r','v','i','c','e','M','a','n','a','g','e','r'
//};
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
// 需要查找的 service name
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len, txn->sender_euid);
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
1.4 完整的ipc過程