Ahao's Technical Blog Ahao's Technical Blog
首页
  • 001.基础篇
  • 002.玩转AOSP篇
  • 003.学穿Binder篇
  • 004.基础组件篇
  • 005.系统启动过程分析
  • 006.Hal开发入门与实践
  • 007.显示系统
  • 008.核心系统服务
  • 009.输入系统
  • 010.开发工具
关于
  • 分类
  • 标签
  • 归档
GitHub (opens new window)

阿豪讲Framework

不积跬步无以至千里
首页
  • 001.基础篇
  • 002.玩转AOSP篇
  • 003.学穿Binder篇
  • 004.基础组件篇
  • 005.系统启动过程分析
  • 006.Hal开发入门与实践
  • 007.显示系统
  • 008.核心系统服务
  • 009.输入系统
  • 010.开发工具
关于
  • 分类
  • 标签
  • 归档
GitHub (opens new window)
  • 基础篇

  • 玩转AOSP篇

  • 学穿Binder篇

    • 000.Binder 专题导学 —— 如何深入掌握 Binder
    • 001.学习 Binder 的预备知识
    • 002.Binder 基本原理
    • 003.Binder 程序示例之 C 语言篇
    • 004.Binder 服务注册过程情景分析之 C 语言篇
    • 005.Binder 服务获取与使用过程情景分析之C语言篇
    • 006.Android Binder 驱动框架设计与分析
    • 007.Binder 驱动情景分析之 ServiceManager 启动过程
    • 008.Binder 驱动情景分析之服务注册过程
    • 009.Binder 驱动情景分析之服务获取与使用过程
    • 010.Binder 程序示例之 C++ 篇
    • 011.Binder C++ 程序分析之主要类解析
    • 012.Binder 服务注册过程情景分析之 C++ 篇
      • 1. Server 端发起服务注册请求
        • 1.1 Binder 初始化
        • 1.2 调用 defaultServiceManager 函数获取 ServiceManager Binder 代理类
        • 1.3 发起远程调用,添加 hello 服务
        • 1.4 开启线程池,等待远程调用
      • 参考资料
    • 013.Binder 服务获取与使用过程情景分析之C++篇
    • 014.Binder 程序示例之 aidl-cpp 篇
    • 015.添加 Android Native 系统服务
    • 016.添加 Native 系统服务回调
    • 017.Binder 程序示例之 Java 篇
    • 018.Binder Java 层初始化
    • 019.Binder Java 层服务注册过程分析
    • 020.Binder Java 层服务获取与使用过程分析
    • 021.添加 Java 系统服务
    • 022.Android Java 系统服务框架与第三方 App 使用自定义 Java 系统服务
    • 023.添加 Java 系统服务回调
    • 024.AIDL 数据类型详解之 Java 篇
    • 025.AIDL 数据类型详解之 C++ 篇
    • 026.Java 调用 Native 服务
    • 027.Native 调用 Java Binder 服务
    • 028.AIDL 关键字 in out inout oneway 解析
    • 029.Binder 驱动 Debug 入门指南
    • 030.Binder 匿名服务源码分析
    • 031.Binder 中的 Parcel 数据结构分析(C++)
    • 032.Binder 中的 Parcel 数据结构分析(Java)
    • 033.Binder 多线程情景分析
    • 034.Binder 线程池溢出问题
    • 035.Binder 代理对象泄露问题分析
    • 036.Binder 死亡通知情景分析
    • 037.Binder 异常处理机制
    • 038.Binder 系统源码演进
    • 039.Binder 面试题汇总
    • 补充——LocalService
  • 基础组件篇

  • 系统启动过程分析

  • Hal开发入门与实践

  • 显示系统

  • 核心系统服务

  • 输入系统

  • 开发工具

  • Framework
  • 学穿Binder篇
阿豪
2023-07-29
目录

012.Binder 服务注册过程情景分析之 C++ 篇

本文基于 Android10 源码环境

# 1. Server 端发起服务注册请求

Server 端主函数的实现如下:

int main(int argc, char const *argv[])
{   
    //完成驱动初始化
    sp<ProcessState> proc(ProcessState::self());
    //获得 ServiceManager Binder 代理类
    sp<IServiceManager> sm = defaultServiceManager();
    //添加 hello 服务
    sm->addService(String16("hello"), new BnHelloService());

    //开启线程池,等待远程调用
	ProcessState::self()->startThreadPool();
	IPCThreadState::self()->joinThreadPool();
    
    return 0;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

总共四个流程:

  • Binder 初始化
  • 调用 defaultServiceManager 函数获取 ServiceManager Binder 代理类
  • 发起远程调用,添加 hello 服务
  • 开启线程池,等待远程调用

接下来我们一一分析上面的 4 个流程

# 1.1 Binder 初始化

sp<ProcessState> proc(ProcessState::self());
1

ProcessState 是一个单例类,其 self 方法返回进程全局单例对象,同时单例对象构建的过程中完成 Binder 的初始化。

具体的实现如下:

// /frameworks/native/libs/binder/Static.h
sp<ProcessState> gProcess;

// /frameworks/native/libs/binder/ProcessState.cpp
//ProcessState 是一个单例类
// const char* kDefaultDriver = "/dev/binder";
sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != nullptr) {
        return gProcess;
    }
    //调用构造函数
    gProcess = new ProcessState(kDefaultDriver);
    return gProcess;
}

ProcessState::ProcessState(const char *driver)
    : mDriverName(String8(driver))
    , mDriverFD(open_driver(driver)) //关注点1 调用 open_dirver 完成初始化
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(nullptr)
    , mBinderContextUserData(nullptr)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
    , mCallRestriction(CallRestriction::NONE)
{
    if (mDriverFD >= 0) {
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        //关注点2 调用 mmap 完成映射
        mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using %s failed: unable to mmap transaction memory.\n", mDriverName.c_str());
            close(mDriverFD);
            mDriverFD = -1;
            mDriverName.clear();
        }
    }

    LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened.  Terminating.");
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

关注点1,调用 open_driver:

static int open_driver(const char *driver)
{
    //使用 open 打开 /dev/binder 驱动
    int fd = open(driver, O_RDWR | O_CLOEXEC);
    if (fd >= 0) {
        int vers = 0;
        //Binder 版本
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
            close(fd);
            fd = -1;
        }
        //Binder 协议版本
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
          ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
                vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
            close(fd);
            fd = -1;
        }
        //设置线程数
        size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
    }
    return fd;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

关注点2 调用 mmap 完成映射

mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
1

可以看出 C++ 中的流程和 C 语言的示例基本一样。

# 1.2 调用 defaultServiceManager 函数获取 ServiceManager Binder 代理类

接下来看看 defaultServiceManager 的实现:

// /frameworks/native/libs/binder/Static.cpp
sp<IServiceManager> gDefaultServiceManager;

sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != nullptr) return gDefaultServiceManager;

    {
        AutoMutex _l(gDefaultServiceManagerLock);
        while (gDefaultServiceManager == nullptr) {
            //关注点
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(nullptr));
            if (gDefaultServiceManager == nullptr)
                sleep(1);
        }
    }

    return gDefaultServiceManager;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

接下来逐步分析:ProcessState::self()->getContextObject(nullptr) :

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);
}


sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    //1
    handle_entry* e = lookupHandleLocked(handle);

    //2
    if (e != nullptr) {
        IBinder* b = e->binder;
        if (b == nullptr || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                Parcel data;
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, nullptr, 0);
                if (status == DEAD_OBJECT)
                   return nullptr;
            }
            //3
            b = BpBinder::create(handle);
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }

    return result;
}

Vector<handle_entry> mHandleToObject;

ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
    const size_t N=mHandleToObject.size();
    if (N <= (size_t)handle) {
        handle_entry e;
        e.binder = nullptr;
        e.refs = nullptr;
        status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
        if (err < NO_ERROR) return nullptr;
    }
    return &mHandleToObject.editItemAt(handle);
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  1. lookupHandleLocked(),是在 Vector mHandleToObject 中查找是否有句柄为 handle 的handle_entry 对象。有的话,则返回该 handle_entry 对象;没有的话,则新建 handle 对应的 handle_entry,并将其添加到 mHandleToObject 中,然后再返回。mHandleToObject 是用于保存各个 IBinder 代理对象的 Vector 数组,它相当于一个缓冲。

  2. 很显然,此时 e! = NULL 为 true,进入 if(e!=NULL) 中。而此时 e->binder=NULL,并且 handle=0;则调 IPCThreadState::self()->transact() 尝试去和 Binder 驱动通信(尝试去ping内核中Binder驱动)。由于 Binder 驱动已启动,ping通信是能够成功的。

  3. 接着,调用 BpBinder::create(handle)(其内部实际是 new 一个 BpBinder 对象),返回的 BpBinder 赋值给 e->binder。然后,将该 BpBinder 对象返回。

从上面的分析知道 ProcessState::self()->getContextObject(nullptr) 返回了一个 BpBinder 对象,其内部的 mHandle 值为 0。

接下来调用 interface_cast<IServiceManager> 宏,将 BpBinder 转换为 IServiceManager

下面看下 interface_cast<IServiceManager>

template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
    return INTERFACE::asInterface(obj);
}

//模板展开
inline sp<IServiceManager> interface_cast(const sp<IBinder>& obj)
{
    return IServiceManager::asInterface(obj);
}
1
2
3
4
5
6
7
8
9
10
11

asInterface 是 IServiceManager 类的一个静态方法,通过 IMPLEMENT_META_INTERFACE 宏实现。宏展开后如下:

::android::sp<IServiceManager> IServiceManager::asInterface(              
            const ::android::sp<::android::IBinder>& obj)               
    {                                                                   
        ::android::sp<IServiceManager> intr;                               
        if (obj != nullptr) {                                           
            intr = static_cast<IServiceManager*>(    
                // BpBinder 未实现 queryLocalInterface 函数
                // 继承了 IBinder 的 queryLocalInterface 函数实现(实现在 frameworks/native/libs/binder/Binder.cpp 中),直接返回 nullptr                  
                obj->queryLocalInterface(                               
                        IServiceManager::descriptor).get());               
            if (intr == nullptr) {    //走这里 
                //obj 类型是 BpBinder                                 
                intr = new BpServiceManager(obj);                          
            }                                                           
        }                                                               
        return intr;                                                    
    }               
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

asInterface 函数实际是 new 了一个 BpServiceManager,并传入了 obj,obj 的类型是 BpBinder。

回到开始处,我们把新构建的 BpServiceManager 赋值给了全局变量 gDefaultServiceManager,后面我们就可以通过这个代理类发起远程调用。

# 1.3 发起远程调用,添加 hello 服务

sm->addService(String16("hello"), new BnHelloService());
1

sm 的实际类型是 BpServiceManager ,我们通过这个 Binder 代理类发起远程调用。 BnHelloService 是 Hello 服务对应的 Binder 本地类。

接下来看看 addService 的具体实现:

// frameworks/native/libs/binder/include/binder/IServiceManager.h
    //声明
    //后两个参数带默认值
    virtual status_t addService(const String16& name, const sp<IBinder>& service,
                                bool allowIsolated = false,
                                int dumpsysFlags = DUMP_FLAG_PRIORITY_DEFAULT) = 0;

// frameworks/native/libs/binder/IServiceManager.cpp
    //实现
    virtual status_t addService(const String16& name, const sp<IBinder>& service,
                                bool allowIsolated, int dumpsysPriority) 
                                {
        Parcel data, reply;
        //关注点1
        //写入头数据
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        //将 BnHelloService 封装到 flat_binder_object 结构体中
        data.writeStrongBinder(service);
        data.writeInt32(allowIsolated ? 1 : 0);
        data.writeInt32(dumpsysPriority);
        //关注点2
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25

关注点1:构建好数据 Parcel data 关注点2 :调用 remote()->transact 发起远程调用,接下来看看 remote() 是什么,是怎么发起远程调用的。

根据之前的类关系分析,找到 remote() 函数的实现,在 IServiceManager 的父类 BpRefBase :

// frameworks/native/libs/binder/include/binder/Binder.h

class BpRefBase : public virtual RefBase
{
protected:
   //......

    inline  IBinder*        remote()                { return mRemote; }
    inline  IBinder*        remote() const          { return mRemote; }

private:
    
    //......
    IBinder* const          mRemote;
    //......
};
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

remote() 只是简单的返回 mRmote 成员,接下来我们找找 mRemote 是什么时候赋值的?

前面的分析, IServiceManager::asInterface 中 new 了一个 BpServiceManager:

//调用
//obj 的类型是 BpBinder ,内部的 handle = 0
intr = new BpServiceManager(obj);    

//实现
explicit BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
{
}
1
2
3
4
5
6
7
8
9

BpServiceManager 继续调用父类 BpInterface<IServiceManager> 构造函数:

template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
    : BpRefBase(remote)
{
}
1
2
3
4
5

继续调用父类 BpRefBase 的构造函数:

BpRefBase::BpRefBase(const sp<IBinder>& o)
    : mRemote(o.get()), mRefs(nullptr), mState(0)
{
    extendObjectLifetime(OBJECT_LIFETIME_WEAK);

    if (mRemote) {
        mRemote->incStrong(this);           // Removed on first IncStrong().
        mRefs = mRemote->createWeak(this);  // Held for our entire lifetime.
    }
}
1
2
3
4
5
6
7
8
9
10

这里把传入的 BpBinder (hanle = 0) 赋值给 mRemote。 remote() 函数返回传入的 BpBinder

回过头来我们的 remote()->transact 实际就是调用的 BpBinder 的 transact 函数:

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}
1
2
3
4
5
6
7
8
9
10
11
12
13

实际调用的是 IPCThreadState 单例类的 transact 函数:

首先通过 self 获得单例对象:

static pthread_key_t gTLS = 0;
static bool gHaveTLS = false;

IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;
    }

    if (gShutdown) {
        ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
        return nullptr;
    }

    pthread_mutex_lock(&gTLSMutex);
    if (!gHaveTLS) {
        int key_create_value = pthread_key_create(&gTLS, threadDestructor);
        if (key_create_value != 0) {
            pthread_mutex_unlock(&gTLSMutex);
            ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
                    strerror(key_create_value));
            return nullptr;
        }
        gHaveTLS = true;
    }
    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

这里用到了 Thread Specific Data,线程特有数据。在多线程程序中,所有线程共享程序中的变量。现在有一全局变量,所有线程都可以使用它,改变它的值。而如果每个线程希望能单独拥有它,那么就需要使用线程存储了。表面上看起来这是一个全局变量,所有线程都可以使用它,而它的值在每一个线程中又是单独存储的。这就是线程特有数据的意义。

下面说一下线程存储的具体用法。

  • 创建一个类型为 pthread_key_t 类型的变量。
  • 调用 pthread_key_create() 来创建该变量。该函数有两个参数,第一个参数就是上面声明的 pthread_key_t 变量,第二个参数是一个清理函数,用来在线程释放该线程存储的时候被调用。该函数指针可以设成 NULL,这样系统将调用默认的清理函数。
  • 当线程中需要存储特殊值的时候,可以调用 pthread_setspcific() 。该函数有两个参数,第一个为前面声明的 pthread_key_t 变量,第二个为 void* 变量,这样你可以存储任何类型的值。
  • 如果需要取出所存储的值,调用 pthread_getspecific() 。该函数的参数为前面提到的 pthread_key_t 变量,该函数返回 void * 类型的值。

下面是前面提到的函数的原型:

int pthread_key_create(pthread_key_t *key, void (*destructor)(void*));

int pthread_setspecific(pthread_key_t key, const void *value);

void *pthread_getspecific(pthread_key_t key);
1
2
3
4
5

下面是例子:

#include <stdio.h>
#include <pthread.h>

static pthread_key_t  key;

void echomsg(int t)
{
        printf("destructor excuted in thread %d,param=%d\n",pthread_self(),t);
}

void * child1(void *arg)
{
        pthread_t tid = pthread_self();
        printf("thread %d enter\n",tid);
        pthread_setspecific(key,(void *)tid);
        sleep(2);
        printf("thread %d returns %d and key %d\n",tid,pthread_getspecific(key),key);
        sleep(5);
}

void * child2(void *arg)
{
        pthread_t tid = pthread_self();
        printf("thread %d enter\n",tid);
        pthread_setspecific(key,(void *)tid);
        sleep(1);
        printf("thread %d returns %d and key %d\n",tid,pthread_getspecific(key),key);
        sleep(5);
}

int main(void)
{
        pthread_t tid1,tid2;
        printf("this is main thread\n");
        pthread_key_create(&key,(void (*)(void *))echomsg);
        pthread_create(&tid1,NULL,child1,NULL);
        pthread_create(&tid2,NULL,child2,NULL);
        sleep(10);
        pthread_key_delete(key);
        printf("main thread exit\n");
        return 0;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42

从例子中可以看出:pthread_key_t 无论是哪一个线程创建,其他所有的线程都是可见的,即一个进程中只需 phread_key_create() 一次。看似是全局变量,然而全局的只是 key 值,对于不同的线程对应的 value 值是不同的(通过pthread_setspcific()和pthread_getspecific()设置)。

我们回到 binder 代码,当我们第一次调用 self 函数会执行到 new IPCThreadState:

//调用默认构造函数/唯一构造函数
new IPCThreadState

//实现
IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mWorkSource(kUnsetWorkSource),
      mPropagateWorkSource(false),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0),
      mCallRestriction(mProcess->mCallRestriction)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
    mIPCThreadStateBase = IPCThreadStateBase::self();
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

构造函数基本就是对成员变量进行赋值,暂时不用关心细节。

接下来看见我们最关心的 transact

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err;

    flags |= TF_ACCEPT_FDS;

    IF_LOG_TRANSACTIONS() {
        TextOutput::Bundle _b(alog);
        alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
            << handle << " / code " << TypeCode(code) << ": "
            << indent << data << dedent << endl;
    }

    LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
        (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
    //关注点1
    err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);

    if (err != NO_ERROR) {
        if (reply) reply->setError(err);
        return (mLastError = err);
    }

    if ((flags & TF_ONE_WAY) == 0) { //代码走这
        if (UNLIKELY(mCallRestriction != ProcessState::CallRestriction::NONE)) {
            if (mCallRestriction == ProcessState::CallRestriction::ERROR_IF_NOT_ONEWAY) {
                ALOGE("Process making non-oneway call but is restricted.");
                CallStack::logStack("non-oneway call", CallStack::getCurrent(10).get(),
                    ANDROID_LOG_ERROR);
            } else /* FATAL_IF_NOT_ONEWAY */ {
                LOG_ALWAYS_FATAL("Process may not make oneway calls.");
            }
        }

        #if 0
        if (code == 4) { // relayout
            ALOGI(">>>>>> CALLING transaction 4");
        } else {
            ALOGI(">>>>>> CALLING transaction %d", code);
        }
        #endif
        if (reply) { //代码走这
            //关注点2
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
        #if 0
        if (code == 4) { // relayout
            ALOGI("<<<<<< RETURNING transaction 4");
        } else {
            ALOGI("<<<<<< RETURNING transaction %d", code);
        }
        #endif

        IF_LOG_TRANSACTIONS() {
            TextOutput::Bundle _b(alog);
            alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
                << handle << ": ";
            if (reply) alog << indent << *reply << dedent << endl;
            else alog << "(none requested)" << endl;
        }
    } else {
        err = waitForResponse(nullptr, nullptr);
    }

    return err;
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

关注点 1 主要是构造需要发送的数据:

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
        tr.offsets_size = 0;
        tr.data.ptr.offsets = 0;
    } else {
        return (mLastError = err);
    }

    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));

    return NO_ERROR;
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

关注点2:发起远程调用

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        //发起远程调用
        //发起远程调用后,当前线程阻塞,ServiceManager 被唤醒并同时收到数据,ServiceManager 的处理可以参考 c 语言部分的分析
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;

        //ServiceManager 处理完添加服务请求,进入阻塞装填,返回数据给 Server 端
        cmd = (uint32_t)mIn.readInt32();

        IF_LOG_COMMANDS() {
            alog << "Processing waitForResponse Command: "
                << getReturnString(cmd) << endl;
        }

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;

        case BR_DEAD_REPLY:
            err = DEAD_OBJECT;
            goto finish;

        case BR_FAILED_REPLY:
            err = FAILED_TRANSACTION;
            goto finish;

        case BR_ACQUIRE_RESULT:
            {
                ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
                const int32_t result = mIn.readInt32();
                if (!acquireResult) continue;
                *acquireResult = result ? NO_ERROR : INVALID_OPERATION;
            }
            goto finish;

        case BR_REPLY:  //代码走这里
            {   
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) { //走这里
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        //reply 成员变量赋值
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(nullptr,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(nullptr,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }

    return err;
}

// 看着挺复杂,实际就三个流程
// 1. 构造要发送的数据
// 2. ioctl + BINDER_WRITE_READ 发起远程调用
// 2. 解析收到的数据

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {
        return -EBADF;
    }

    binder_write_read bwr;

    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();

    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }

    IF_LOG_COMMANDS() {
        TextOutput::Bundle _b(alog);
        if (outAvail != 0) {
            alog << "Sending commands to driver: " << indent;
            const void* cmds = (const void*)bwr.write_buffer;
            const void* end = ((const uint8_t*)cmds)+bwr.write_size;
            alog << HexDump(cmds, bwr.write_size) << endl;
            while (cmds < end) cmds = printCommand(alog, cmds);
            alog << dedent;
        }
        alog << "Size of receive buffer: " << bwr.read_size
            << ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
    }

    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        IF_LOG_COMMANDS() {
            alog << "About to read/write, write size = " << mOut.dataSize() << endl;
        }
#if defined(__ANDROID__)
        //发起远程调用
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
#else
        err = INVALID_OPERATION;
#endif
        if (mProcess->mDriverFD <= 0) {
            err = -EBADF;
        }
        IF_LOG_COMMANDS() {
            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
        }
    } while (err == -EINTR);

    IF_LOG_COMMANDS() {
        alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
            << bwr.write_consumed << " (of " << mOut.dataSize()
                        << "), read consumed: " << bwr.read_consumed << endl;
    }

    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else {
                mOut.setDataSize(0);
                processPostWriteDerefs();
            }
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        IF_LOG_COMMANDS() {
            TextOutput::Bundle _b(alog);
            alog << "Remaining data size: " << mOut.dataSize() << endl;
            alog << "Received commands from driver: " << indent;
            const void* cmds = mIn.data();
            const void* end = mIn.data() + mIn.dataSize();
            alog << HexDump(cmds, mIn.dataSize()) << endl;
            while (cmds < end) cmds = printReturnCommand(alog, cmds);
            alog << dedent;
        }
        return NO_ERROR;
    } 

    return err;
}
y
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204

至此,添加服务的过程就分析完了

# 1.4 开启线程池,等待远程调用

ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
1
2

startThreadPool:

void ProcessState::startThreadPool()
{
    AutoMutex _l(mLock);
    if (!mThreadPoolStarted) {
        mThreadPoolStarted = true;
        spawnPooledThread(true);
    }
}

void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        String8 name = makeBinderThreadName();
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp<Thread> t = new PoolThread(isMain);
        t->run(name.string());
    }
}

class PoolThread : public Thread
{
public:
    explicit PoolThread(bool isMain)
        : mIsMain(isMain)
    {
    }
    
protected:
    virtual bool threadLoop()
    {
        IPCThreadState::self()->joinThreadPool(mIsMain);
        return false;
    }
    
    const bool mIsMain;
};
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

起一个新的线程调用 IPCThreadState::self()->joinThreadPool:

void IPCThreadState::joinThreadPool(bool isMain)
{
    LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());

    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);

    status_t result;
    do { //进入循环
        processPendingDerefs();
        // now get the next command to be processed, waiting if necessary
        //读数据,处理数据
        result = getAndExecuteCommand();

        if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
            ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
                  mProcess->mDriverFD, result);
            abort();
        }

        // Let this thread exit the thread pool if it is no longer
        // needed and it is not the main process thread.
        if(result == TIMED_OUT && !isMain) {
            break;
        }
    } while (result != -ECONNREFUSED && result != -EBADF);

    LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
        (void*)pthread_self(), getpid(), result);

    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    //读数据
    result = talkWithDriver();
    if (result >= NO_ERROR) {
        size_t IN = mIn.dataAvail();
        if (IN < sizeof(int32_t)) return result;
        cmd = mIn.readInt32();
        IF_LOG_COMMANDS() {
            alog << "Processing top-level Command: "
                 << getReturnString(cmd) << endl;
        }

        pthread_mutex_lock(&mProcess->mThreadCountLock);
        mProcess->mExecutingThreadsCount++;
        if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
                mProcess->mStarvationStartTimeMs == 0) {
            mProcess->mStarvationStartTimeMs = uptimeMillis();
        }
        pthread_mutex_unlock(&mProcess->mThreadCountLock);
        
        //处理数据
        result = executeCommand(cmd);

        pthread_mutex_lock(&mProcess->mThreadCountLock);
        mProcess->mExecutingThreadsCount--;
        if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&
                mProcess->mStarvationStartTimeMs != 0) {
            int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;
            if (starvationTimeMs > 100) {
                ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",
                      mProcess->mMaxThreads, starvationTimeMs);
            }
            mProcess->mStarvationStartTimeMs = 0;
        }
        pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
        pthread_mutex_unlock(&mProcess->mThreadCountLock);
    }

    return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

整体思路和 C 语言中的 binder_loop 大体类似

# 参考资料

  • pthread_key_t和pthread_key_create()详解 (opens new window)
  • 线程私有存储空间--pthread_key_t (opens new window)
上次更新: 2025/10/08, 17:04:37
011.Binder C++ 程序分析之主要类解析
013.Binder 服务获取与使用过程情景分析之C++篇

← 011.Binder C++ 程序分析之主要类解析 013.Binder 服务获取与使用过程情景分析之C++篇→

最近更新
01
显示设备初始化
10-17
02
显示疑难问题分析基础
10-08
03
Perfetto 上手指南1 —— Trace 的抓取
10-08
更多文章>
Theme by Vdoing | Copyright © 2020-2025 AHao Framework | MIT License
  • 跟随系统
  • 浅色模式
  • 深色模式
  • 阅读模式