Binder Java 层服务获取与使用过程分析

# Binder Java 层服务获取与使用过程分析

# 服务获取过程

Binder 程序示例之 Java 篇 (opens new window) 中我们介绍了,Client 通过以下代码获取到服务:

IBinder binder = ServiceManager.getService("hello");
IHelloService svr = IHelloService.Stub.asInterface(binder);
1
2

ServiceManager.getService 的具体实现如下:

// frameworks/base/core/java/android/os/ServiceManager.java
@UnsupportedAppUsage
private static Map<String, IBinder> sCache = new ArrayMap<String, IBinder>();

@UnsupportedAppUsage
public static IBinder getService(String name) {
    try {
        //先在缓存里面取数据
        IBinder service = sCache.get(name);
        if (service != null) {
            return service;
        } else {
            //缓存如果没有,重新获取
            return Binder.allowBlocking(rawGetService(name));
        }
    } catch (RemoteException e) {
            Log.e(TAG, "error in getService", e);
    }
    return null;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

先从 Scache 缓存中去取 IBinder 对象,缓存中没有就会调用 rawGetService(name) 获取:

    private static IBinder rawGetService(String name) throws RemoteException {
        final long start = sStatLogger.getTime();

        // getIServiceManager() 等价于 new ServiceManagerProxy(new BinderProxy(0))
        final IBinder binder = getIServiceManager().getService(name);

        final int time = (int) sStatLogger.logDurationStat(Stats.GET_SERVICE, start);

        final int myUid = Process.myUid();
        final boolean isCore = UserHandle.isCore(myUid);

        final long slowThreshold = isCore
                ? GET_SERVICE_SLOW_THRESHOLD_US_CORE
                : GET_SERVICE_SLOW_THRESHOLD_US_NON_CORE;

        synchronized (sLock) {
            sGetServiceAccumulatedUs += time;
            sGetServiceAccumulatedCallCount++;

            final long nowUptime = SystemClock.uptimeMillis();

            // Was a slow call?
            if (time >= slowThreshold) {
                // We do a slow log:
                // - At most once in every SLOW_LOG_INTERVAL_MS
                // - OR it was slower than the previously logged slow call.
                if ((nowUptime > (sLastSlowLogUptime + SLOW_LOG_INTERVAL_MS))
                        || (sLastSlowLogActualTime < time)) {
                    EventLogTags.writeServiceManagerSlow(time / 1000, name);

                    sLastSlowLogUptime = nowUptime;
                    sLastSlowLogActualTime = time;
                }
            }

            // Every GET_SERVICE_LOG_EVERY_CALLS calls, log the total time spent in getService().

            final int logInterval = isCore
                    ? GET_SERVICE_LOG_EVERY_CALLS_CORE
                    : GET_SERVICE_LOG_EVERY_CALLS_NON_CORE;

            if ((sGetServiceAccumulatedCallCount >= logInterval)
                    && (nowUptime >= (sLastStatsLogUptime + STATS_LOG_INTERVAL_MS))) {

                EventLogTags.writeServiceManagerStats(
                        sGetServiceAccumulatedCallCount, // Total # of getService() calls.
                        sGetServiceAccumulatedUs / 1000, // Total time spent in getService() calls.
                        (int) (nowUptime - sLastStatsLogUptime)); // Uptime duration since last log.
                sGetServiceAccumulatedCallCount = 0;
                sGetServiceAccumulatedUs = 0;
                sLastStatsLogUptime = nowUptime;
            }
        }
        return binder;
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

getIServiceManager() 在上文已经解析过了,等价于 new ServiceManagerProxy(new BinderProxy(0)),具体过程可以参考 Binder Java 层服务注册过程分析

    //getIServiceManager() 等价于 new ServiceManagerProxy(new BinderProxy(0))
    @UnsupportedAppUsage
    private static IServiceManager getIServiceManager() {
        if (sServiceManager != null) {
            return sServiceManager;
        }

        // Find the service manager
        sServiceManager = ServiceManagerNative
                .asInterface(Binder.allowBlocking(BinderInternal.getContextObject()));
        return sServiceManager;
    }

1
2
3
4
5
6
7
8
9
10
11
12
13

从上文知道,ServiceManagerProxy 内部的 BinderProxy 是一个横跨 native 与 java 的数据:

上面的方法会返回一个 ServiceManagerProxy 对象,接着会调用该对象的 getService 方法:

    @UnsupportedAppUsage
    public IBinder getService(String name) throws RemoteException {
        Parcel data = Parcel.obtain();
        Parcel reply = Parcel.obtain();
        data.writeInterfaceToken(IServiceManager.descriptor);
        data.writeString(name);
        //BinderProxy 的 transact
        mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
        IBinder binder = reply.readStrongBinder();
        reply.recycle();
        data.recycle();
        return binder;
    }
1
2
3
4
5
6
7
8
9
10
11
12
13

这里的 mRemote 就是 new ServiceManagerProxy 时传入的 BinderProxy:

 public ServiceManagerProxy(IBinder remote) {
        mRemote = remote;
}
1
2
3

接着就会调用 BinderProxy 的 transact 的源码:

public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
        Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");

        //......

        try {
            //关注这里
            return   (code, data, reply, flags);
        } finally {
           //......
        }
}

//native 方法
public native boolean transactNative(int code, Parcel data, Parcel reply,int flags) throws RemoteException;

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

transact 会调用 transactNative 发起远程调用,transactNative 是一个 native 方法,具体实现在 frameworks/base/core/jni/android_util_Binder.cpp

// obj 对应类型为 BinderProxy
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
        jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
    if (dataObj == NULL) {
        jniThrowNullPointerException(env, NULL);
        return JNI_FALSE;
    }

    // Java 对象 转为 c++ 对象
    Parcel* data = parcelForJavaObject(env, dataObj);
    if (data == NULL) {
        return JNI_FALSE;
    }

    // Java 对象 转为 c++ 对象
    Parcel* reply = parcelForJavaObject(env, replyObj);
    if (reply == NULL && replyObj != NULL) {
        return JNI_FALSE;
    }

    //拿到全局变量 BinderProxyNativeData 的成员变量 mObject,实际是一个 BpBinder
    IBinder* target = getBPNativeData(env, obj)->mObject.get();
    if (target == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");
        return JNI_FALSE;
    }

    ALOGV("Java code calling transact on %p in Java object %p with code %" PRId32 "\n",
            target, obj, code);


    bool time_binder_calls;
    int64_t start_millis;
    if (kEnableBinderSample) {
        // Only log the binder call duration for things on the Java-level main thread.
        // But if we don't
        time_binder_calls = should_time_binder_calls();

        if (time_binder_calls) {
            start_millis = uptimeMillis();
        }
    }

    //BpBinder 发起远程调用
    status_t err = target->transact(code, *data, reply, flags);

    if (kEnableBinderSample) {
        if (time_binder_calls) {
            conditionally_log_binder_call(start_millis, target, code);
        }
    }

    if (err == NO_ERROR) {
        return JNI_TRUE;
    } else if (err == UNKNOWN_TRANSACTION) {
        return JNI_FALSE;
    }

    signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/, data->dataSize());
    return JNI_FALSE;
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

可以看出,绕了一圈还是通过 native 层的 BpBinder 发起远程调用。BpBinder 的调用细节和 Binder 服务注册过程情景分析之C++篇 (opens new window) 相同,这里不在重复。

transact 发起服务获取远程访问后,ServiceManager端会返回的是一个整型 handle,handle 值存放在 reply中,回到 Java 层后,通过 reply.readStrongBinder() 将 handle 包装为一个 IBinder 对象(实际是一个 BinderProxy 对象)。 reply.readStrongBinder() 的包装过程会在

接着调用下面的方法,将 IBinder 对象装换为 IHelloService:

IHelloService svr = IHelloService.Stub.asInterface(binder);

public static com.yuandaima.IHelloService asInterface(android.os.IBinder obj)
    {
      if ((obj==null)) {
        return null;
      }
      android.os.IInterface iin = obj.queryLocalInterface(DESCRIPTOR);
      if (((iin!=null)&&(iin instanceof com.yuandaima.IHelloService))) {
        return ((com.yuandaima.IHelloService)iin);
      }
      //实际返回一个 IHelloService.Stub.Proxy binder 客户端类对象
      return new com.yuandaima.IHelloService.Stub.Proxy(obj);
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14

至此我们就获取到了 Hello 服务的客户端类对象,接着我们就可以通过这个对象使用我们自定义的远程服务了。

# 服务使用过程

通过 Hello 服务的代理类 com.yuandaima.IHelloService.Stub.Proxy 我们就可以发起远程调用了:

// IHelloService  的实际类型是 com.yuandaima.IHelloService.Stub.Proxy
IHelloService svr = IHelloService.Stub.asInterface(binder);
//发起远程调用
svr.sayhello();

//sayhello 的代码又 aidl 生成
@Override public void sayhello() throws android.os.RemoteException
      {
        android.os.Parcel _data = android.os.Parcel.obtain();
        android.os.Parcel _reply = android.os.Parcel.obtain();
        try {
          _data.writeInterfaceToken(DESCRIPTOR);
          boolean _status = mRemote.transact(Stub.TRANSACTION_sayhello, _data, _reply, 0);
          if (!_status && getDefaultImpl() != null) {
            getDefaultImpl().sayhello();
            return;
          }
          _reply.readException();
        }
        finally {
          _reply.recycle();
          _data.recycle();
        }
      }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

构建好 data reply 两个 Parcel 数据,然后通过 mRemote.transact 发起远程调用。

mRemote 通过构造函数传入,实际类型是 BinnderProxy:

private android.os.IBinder mRemote;
Proxy(android.os.IBinder remote)
{
    mRemote = remote;
}
1
2
3
4
5

接着调用 BinderProxy 的 transact 方法,这个方法的调用上面已经解析过几次了,这里总结一下流程:

BinderProxy->transact
    transactNative
        android_os_BinderProxy_transact
            BpBinder->transact
                IPCThreadState::self()->transact
                    ioctl
1
2
3
4
5
6

层层递进,最终还是调用到 ioctl 发起远程调用。

在 Android 中有用于专门处理的 Binder 消息的线程,我们称这些线程是 Binder 线程。

在 Java 层的示例程序中,Server 端并没有启动 binder 线程,那我们的 Server 端是怎么收到远程发送过来的数据的呢?

Java 进程中的 Binder 线程是在 Java 进程启动时创建的,Java 进程的启动都是通过 Process.start()方法,向 Zygote 进程发出创建进程的 socket 消息,Zygote 收到消息后会调用 Zygote.forkAndSpecialize() 来 fork 出新进程,在新进程中会调用到 RuntimeInit.nativeZygoteInit 方法,该方法经过 jni 映射,最终会调用到 app_main.cpp 中的 onZygoteInit

    virtual void onZygoteInit()
    {
        sp<ProcessState> proc = ProcessState::self();
        ALOGV("App process: starting thread pool.\n");
        proc->startThreadPool();
    }
1
2
3
4
5
6

Android 中的 Java 进程就在这里启动了 Binder 线程。、

Binder 线程中通过以下的代码读取和解析 binder 数据:

void IPCThreadState::joinThreadPool(bool isMain)
{
    LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());

    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);

    status_t result;
    do { //进入循环
        processPendingDerefs();
        // now get the next command to be processed, waiting if necessary
        //读数据,处理数据
        result = getAndExecuteCommand();

        if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
            ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
                  mProcess->mDriverFD, result);
            abort();
        }

        // Let this thread exit the thread pool if it is no longer
        // needed and it is not the main process thread.
        if(result == TIMED_OUT && !isMain) {
            break;
        }
    } while (result != -ECONNREFUSED && result != -EBADF);

    LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
        (void*)pthread_self(), getpid(), result);

    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    // 和Binder驱动交互,此时已经从该逻辑中返回
    result = talkWithDriver();
    if (result >= NO_ERROR) {
        ...
        // 读取mIn中的数据
        cmd = mIn.readInt32();
        ...

        // 调用executeCommand()对数据进行处理。
        result = executeCommand(cmd);
        ...
    }

    return result;
}

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    //...
    do {
        //...
        //从这里唤醒
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            //...
    } while (err == -EINTR);

    //...

    if (err >= NO_ERROR) {
        // 清空已写的数据
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        // 设置已读数据
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        ...
        return NO_ERROR;
    }

    return err;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

当远程数据到来时,Binder 线程会从休眠中唤醒:

//IPCThreadState.cpp
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    //...
    do {
        //...
        //从这里唤醒
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            //...
    } while (err == -EINTR);

    //...

    if (err >= NO_ERROR) {
        // 清空已写的数据
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        // 设置已读数据
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        ...
        return NO_ERROR;
    }

    return err;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

唤醒后,会把读取到的数据写入 mIn 中,接着调用 executeCommand 处理收到的数据:

status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    // 和Binder驱动交互,此时已经从该逻辑中返回
    result = talkWithDriver();
    if (result >= NO_ERROR) {
        ...
        // 读取mIn中的数据
        cmd = mIn.readInt32();
        ...

        // 调用executeCommand()对数据进行处理。
        result = executeCommand(cmd);
        ...
    }

    return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

在 executeCommand 中就会调用 BBinder 的 transact 处理收到的数据:

status_t IPCThreadState::executeCommand(int32_t cmd) 
{
    BBinder* obj; 
    RefBase::weakref_type* refs;
    status_t result = NO_ERROR;

    switch (cmd) {
        ...
        case BR_TRANSACTION:
        {
            binder_transaction_data tr;
            result = mIn.read(&tr, sizeof(tr));
            ...

            Parcel buffer;
            buffer.ipcSetDataReference(
                reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                tr.data_size,
                reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                tr.offsets_size/sizeof(size_t), freeBuffer, this);

            ...

            Parcel reply;
            ...
            if (tr.target.ptr) {
                sp<BBinder> b((BBinder*)tr.cookie);
                const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
                if (error < NO_ERROR) reply.setError(error);

            } else {
                ...
            }

            if ((tr.flags & TF_ONE_WAY) == 0) {
                sendReply(reply, 0);
            } else {
                ...
            }
            ...

        }
        break;

        ...
    }

    ...
    return result;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50

这里的 tr.cookie 的类型是 JavaBBinder 指针,当 Client 端获取服务返回时,由 binder 驱动处理和构建这个指针,并传回应用层。(这部分内容涉及到 Parcel 的数据结构设计,会在 Parcel 解析部分来讲解这部分内容)。

接着就会调用到 JavaBBinder 的 transact 函数,这个函数在 JavaBBinder 的父类中实现:

status_t BBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    data.setDataPosition(0);

    status_t err = NO_ERROR;
    switch (code) {
        case PING_TRANSACTION:
            reply->writeInt32(pingBinder());
            break;
        default: //走这个分支
            err = onTransact(code, data, reply, flags);
            break;
    }

    if (reply != nullptr) {
        reply->setDataPosition(0);
    }

    return err;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

接着调用到 JavaBBinder 的 onTransact 函数:

	//android_util_binder.cpp
    virtual status_t onTransact(
        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
    {
        JNIEnv* env = javavm_to_jnienv(mVM);

        ALOGV("onTransact() on %p calling object %p in env %p vm %p\n", this, mObject, env, mVM);

        IPCThreadState* thread_state = IPCThreadState::self();
        const int32_t strict_policy_before = thread_state->getStrictModePolicy();

		//通过JNI调用Binder(Java)中的execTransact方法
        jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
            code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);

        if (env->ExceptionCheck()) {
            jthrowable excep = env->ExceptionOccurred();
            report_exception(env, excep,
                "*** Uncaught remote exception!  "
                "(Exceptions are not yet supported across processes.)");
            res = JNI_FALSE;

            /* clean up JNI local ref -- we don't return to Java code */
            env->DeleteLocalRef(excep);
        }
        ...
        return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;
    }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

这里就会通过 JNI 调用到 Java 层的 execTransact 方法:

    @UnsupportedAppUsage
    private boolean execTransact(int code, long dataObj, long replyObj,
            int flags) {
        // At that point, the parcel request headers haven't been parsed so we do not know what
        // WorkSource the caller has set. Use calling uid as the default.
        final int callingUid = Binder.getCallingUid();
        final long origWorkSource = ThreadLocalWorkSource.setUid(callingUid);
        try {
            //调用 execTransactInternal
            return execTransactInternal(code, dataObj, replyObj, flags, callingUid);
        } finally {
            ThreadLocalWorkSource.restore(origWorkSource);
        }
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14

接着调用 execTransactInternal:

private boolean execTransactInternal(int code, long dataObj, long replyObj, int flags,
            int callingUid) {
        // Make sure the observer won't change while processing a transaction.
        final BinderInternal.Observer observer = sObserver;
        final CallSession callSession =
                observer != null ? observer.callStarted(this, code, UNSET_WORKSOURCE) : null;
        Parcel data = Parcel.obtain(dataObj);
        Parcel reply = Parcel.obtain(replyObj);
        // theoretically, we should call transact, which will call onTransact,
        // but all that does is rewind it, and we just got these from an IPC,
        // so we'll just call it directly.
        boolean res;
        // Log any exceptions as warnings, don't silently suppress them.
        // If the call was FLAG_ONEWAY then these exceptions disappear into the ether.
        final boolean tracingEnabled = Binder.isTracingEnabled();
        try {
            if (tracingEnabled) {
                final String transactionName = getTransactionName(code);
                Trace.traceBegin(Trace.TRACE_TAG_ALWAYS, getClass().getName() + ":"
                        + (transactionName != null ? transactionName : code));
            }
            //调用子类实现的 onTransact
            res = onTransact(code, data, reply, flags);
        } catch (RemoteException|RuntimeException e) {
            if (observer != null) {
                observer.callThrewException(callSession, e);
            }
            if (LOG_RUNTIME_EXCEPTION) {
                Log.w(TAG, "Caught a RuntimeException from the binder stub implementation.", e);
            }
            if ((flags & FLAG_ONEWAY) != 0) {
                if (e instanceof RemoteException) {
                    Log.w(TAG, "Binder call failed.", e);
                } else {
                    Log.w(TAG, "Caught a RuntimeException from the binder stub implementation.", e);
                }
            } else {
                // Clear the parcel before writing the exception
                reply.setDataSize(0);
                reply.setDataPosition(0);
                reply.writeException(e);
            }
            res = true;
        } finally {
            if (tracingEnabled) {
                Trace.traceEnd(Trace.TRACE_TAG_ALWAYS);
            }
            if (observer != null) {
                // The parcel RPC headers have been called during onTransact so we can now access
                // the worksource uid from the parcel.
                final int workSourceUid = sWorkSourceProvider.resolveWorkSourceUid(
                        data.readCallingWorkSourceUid());
                observer.callEnded(callSession, data.dataSize(), reply.dataSize(), workSourceUid);
            }
        }
        checkParcel(this, code, reply, "Unreasonably large binder reply buffer");
        reply.recycle();
        data.recycle();

        // Just in case -- we are done with the IPC, so there should be no more strict
        // mode violations that have gathered for this thread.  Either they have been
        // parceled and are now in transport off to the caller, or we are returning back
        // to the main transaction loop to wait for another incoming transaction.  Either
        // way, strict mode begone!
        StrictMode.clearGatheredViolations();
        return res;
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

代码很长,但是核心的就一句 res = onTransact(code, data, reply, flags);,这里就会调用到子类实现的 onTransact 方法,具体就是示例程序中的 IHelloService.Stub 类的 onTransact 方法:

    @Override public boolean onTransact(int code, android.os.Parcel data, android.os.Parcel reply, int flags) throws android.os.RemoteException
    {
      java.lang.String descriptor = DESCRIPTOR;
      switch (code)
      {
        case INTERFACE_TRANSACTION:
        {
          reply.writeString(descriptor);
          return true;
        }
        case TRANSACTION_sayhello:
        {
          data.enforceInterface(descriptor);
          this.sayhello();
          reply.writeNoException();
          return true;
        }
        case TRANSACTION_sayhello_to:
        {
          data.enforceInterface(descriptor);
          java.lang.String _arg0;
          _arg0 = data.readString();
          int _result = this.sayhello_to(_arg0);
          reply.writeNoException();
          reply.writeInt(_result);
          return true;
        }
        case TRANSACTION_registerCallback:
        {
          data.enforceInterface(descriptor);
          int _arg0;
          _arg0 = data.readInt();
          com.yuandaima.ICallback _arg1;
          _arg1 = com.yuandaima.ICallback.Stub.asInterface(data.readStrongBinder());
          this.registerCallback(_arg0, _arg1);
          reply.writeNoException();
          return true;
        }
        default:
        {
          return super.onTransact(code, data, reply, flags);
        }
      }
    }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

接着就会根据收到的 code 值调用到 sayhello 方法。

最后执行完成后会一步步回到上级函数,并调用 sendReply 函数,返回数据给 Client,这部分内容和服务使用过程绝大部分是一样的,我们就不再重复分析。