持续创作,加速成长!这是我参与「掘金日新计划 · 10 月更文挑战」的第9天,点击查看活动详情
一、前言
之前的文章我们了解了当客户端想要和服务端通信时是如何发起请求并得到数据的,今天我们就来聊一下当SMgr接收到客户端的请求是如何提供相应的代理对象与之数据通信的。
二、SMgr启动
我们来看一下servicemanager的启动文件 /frameworks/native/cmds/servicemanager/servicemanager.rc
service servicemanager /system/bin/servicemanager
class core animation
user system
group system readproc
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart audioserver
onrestart restart media
onrestart restart surfaceflinger
onrestart restart inputflinger
onrestart restart drm
onrestart restart cameraserver
onrestart restart keystore
onrestart restart gatekeeperd
onrestart restart thermalservice
writepid /dev/cpuset/system-background/tasks
shutdown critical
ServiceManager通过init进程从rc文件解析并启动,这个文件包含了启动SMgr进程已经相应的参数
2.1 SMgr启动参数
SMgr的main函数位于
frameworks/native/cmds/servicemanager/main.cpp
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
bs = binder_open(driver, 128*1024);
if (!bs) {
#ifdef VENDORSERVICEMANAGER
ALOGW("failed to open binder driver %s\n", driver);
while (true) {
sleep(UINT_MAX);
}
#else
ALOGE("failed to open binder driver %s\n", driver);
#endif
return -1;
}
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
#ifdef VENDORSERVICEMANAGER
sehandle = selinux_android_vendor_service_context_handle();
#else
sehandle = selinux_android_service_context_handle();
#endif
selinux_status_open(true);
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
binder_loop(bs, svcmgr_handler);
return 0;
}
int main(int argc, char** argv) {
//这里的argc=1,argv数组长度为1,argv[0]='/system/bin/servicemanager'
if (argc > 2) {
LOG(FATAL) << "usage: " << argv[0] << " [binder driver]";
}
//argc=1,binder的驱动为 /dev/binder
const char* driver = argc == 2 ? argv[1] : "/dev/binder";
//初始化binder驱动
sp<ProcessState> ps = ProcessState::initWithDriver(driver);
ps->setThreadPoolMaxThreadCount(0);
ps->setCallRestriction(ProcessState::CallRestriction::FATAL_IF_NOT_ONEWAY);
//实例化ServiceManager
sp<ServiceManager> manager = new ServiceManager(std::make_unique<Access>());
//将自身作为服务添加
if (!manager->addService("manager", manager, false /*allowIsolated*/, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk()) {
LOG(ERROR) << "Could not self register servicemanager";
}
//设置服务端Bbinder对象
IPCThreadState::self()->setTheContextObject(manager);
//设置成为binder驱动的context manager
ps->becomeContextManager(nullptr, nullptr);
//通过Looper epolll机制处理binder事务
sp<Looper> looper = Looper::prepare(false /*allowNonCallbacks*/);
BinderCallback::setupTo(looper);
ClientCallbackCallback::setupTo(looper, manager);
while(true) {
looper->pollAll(-1);
}
// should not be reached
//开启了死循环,return 即表明发生了异常.
return EXIT_FAILURE;
}
注:main函数中的的argc是入参个数,argv是每个入参,以数组的形式存储.
2.2 initWithDriver
binder的初始化我们已经大致知道了基本流程,主要有binder_open,mmap建立内存映射,以及binder_loop frameworks/native/libs/binder/ProcessState.cpp
sp<ProcessState> ProcessState::initWithDriver(const char* driver)
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != nullptr) {
// Allow for initWithDriver to be called repeatedly with the same
// driver.
//如果已经被初始化了,并且传入的driver参数和已经初始化的驱动名一样,直接返回之前初始化的单例
if (!strcmp(gProcess->getDriverName().c_str(), driver)) {
return gProcess;
}
//否则异常退出
LOG_ALWAYS_FATAL("ProcessState was already initialized.");
}
//判断指定的driver是否存在并可读
if (access(driver, R_OK) == -1) {
ALOGE("Binder driver %s is unavailable. Using /dev/binder instead.", driver);
//回滚默认binder驱动
driver = "/dev/binder";
}
gProcess = new ProcessState(driver);
return gProcess;
}
2.3 becomeContextManager
接着调用了ProcessState的becomeContextManager函数注册成为Binder驱动的context manager
bool ProcessState::becomeContextManager(context_check_func checkFunc, void* userData)
{
AutoMutex _l(mLock);
mBinderContextCheckFunc = checkFunc;
mBinderContextUserData = userData;
flat_binder_object obj {
.flags = FLAT_BINDER_FLAG_TXN_SECURITY_CTX,
};
int result = ioctl(mDriverFD, BINDER_SET_CONTEXT_MGR_EXT, &obj);
// fallback to original method
if (result != 0) {
android_errorWriteLog(0x534e4554, "121035042");
int dummy = 0;
result = ioctl(mDriverFD, BINDER_SET_CONTEXT_MGR, &dummy);
}
if (result == -1) {
mBinderContextCheckFunc = nullptr;
mBinderContextUserData = nullptr;
ALOGE("Binder ioctl to become context manager failed: %s\n", strerror(errno));
}
return result == 0;
}
可以看到这里调用了ioctl函数,入参指令码是BINDER_SET_CONTEXT_MGR
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
...
switch (cmd) {
...
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
...
static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
mutex_lock(&context->context_mgr_node_lock);
//binder的context manager只能设置一次
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
//判断调用进程是否有权限设置context manager
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
//context->binder_context_mgr_uid != -1
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
//设置Binder驱动context manager所在进程的用户ID
context->binder_context_mgr_uid = curr_euid;
}
//新建binder节点
new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
}
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
//设置binder驱动context manager节点
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
out:
mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
这里会先检查之前是否设置过context mananager,然后通过binder_new_node创建一个新的binder节点,并将它作为context manager节点.
2.4 BinderCallback
可以看到,在main函数里声明了一个BinderCallback
class BinderCallback : public LooperCallback {
public:
static sp<BinderCallback> setupTo(const sp<Looper>& looper) {
sp<BinderCallback> cb = new BinderCallback;
int binder_fd = -1;
//向binder驱动发送BC_ENTER_LOOPER事务请求,并获得binder设备的文件描述符
IPCThreadState::self()->setupPolling(&binder_fd);
LOG_ALWAYS_FATAL_IF(binder_fd < 0, "Failed to setupPolling: %d", binder_fd);
// Flush after setupPolling(), to make sure the binder driver
// knows about this thread handling commands.
IPCThreadState::self()->flushCommands();
//监听binder文件描述符
int ret = looper->addFd(binder_fd,
Looper::POLL_CALLBACK,
Looper::EVENT_INPUT,
cb,
nullptr /*data*/);
LOG_ALWAYS_FATAL_IF(ret != 1, "Failed to add binder FD to Looper");
return cb;
}
int handleEvent(int /* fd */, int /* events */, void* /* data */) override {
IPCThreadState::self()->handlePolledCommands();
return 1; // Continue receiving callbacks.
}
};
int IPCThreadState::setupPolling(int* fd)
{
if (mProcess->mDriverFD < 0) {
return -EBADF;
}
//设置binder请求码
mOut.writeInt32(BC_ENTER_LOOPER);
//赋值binder驱动的文件描述符
*fd = mProcess->mDriverFD;
return 0;
}
可以看到,这里的BinderCallback调用了setupTo函数,setupTo函数首先向binder驱动发起了一个BC_ENTER_LOOPER事务请求,获得了binder驱动的文件描述符,然后调用looper:addFd函数监听binder设备的文件描述符,通过Looper:handleEvent接收并处理binder的驱动消息.
2.5 handleEvent
int handleEvent(int /* fd */, int /* events */, void* /* data */) override {
IPCThreadState::self()->handlePolledCommands();
return 1; // Continue receiving callbacks.
}
status_t IPCThreadState::handlePolledCommands()
{
status_t result;
//当读缓存中数据未消费完时,持续循环
do {
result = getAndExecuteCommand();
} while (mIn.dataPosition() < mIn.dataSize());
//当我们清空执行完所有的命令后,最后处理BR_DECREFS和BR_RELEASE
processPendingDerefs();
flushCommands();
return result;
}
BinderCallback重写了handleEvent,里面调用了IPCThreadState的handlePolledCommands方法
2.6 getAndExecuteCommand
我们来看一下具体是如何处理数据的
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
//从binder驱动中读写数据
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
//读取BR响应码
...
result = executeCommand(cmd);
...
return result;
}
2.8 executeCommand
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
...
case BR_TRANSACTION:
{
binder_transaction_data_secctx tr_secctx;
binder_transaction_data& tr = tr_secctx.transaction_data;
if (cmd == (int) BR_TRANSACTION_SEC_CTX) {
result = mIn.read(&tr_secctx, sizeof(tr_secctx));
} else {
result = mIn.read(&tr, sizeof(tr));
tr_secctx.secctx = 0;
}
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
//读取数据到缓冲区
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
...
Parcel reply;
status_t error;
//对于ServiceManager的binder节点来说,是没有ptr的
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
//对于其他binder服务端来说,tr.cookie为本地BBinder对象指针
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d sid %s uid %d\n",
// mCallingPid, origPid, (origSid ? origSid : "<N/A>"), origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
//非TF_ONE_WAY模式下需要Reply
sendReply(reply, 0);
} else {
...
//TO_ONE_WAY模式下不需要Reply,这里只打了些日志
}
...
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
2.9 BR_TRANSACTION
BR_TRANSACTION这个case主要用于处理客户端的数据响应,首先从读缓存中读取了binder_transaction_data,然后实例化了一个Parcel对象,从binder_transaction_data中将实际数据读取出来.
接着找到本地BBinder对象,对于ServiceManager来说就是之前在main函数中setTheContextObject的ServiceManager对象,而对于其他binder服务端来说,则是通过tr.cookie获取,然后调用BBinder的transact函数
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
//确保从头开始读取数据
data.setDataPosition(0);
if (reply != nullptr && (flags & FLAG_CLEAR_BUF)) {
//标记这个Parcel在释放时需要将内存中数据用0覆盖(涉及安全)
reply->markSensitive();
}
status_t err = NO_ERROR;
//这里的code是由binder客户端请求传递过来的
//是客户端与服务端的一个约定
//它标识了客户端像服务端发起的是哪种请求
switch (code) {
...
default:
err = onTransact(code, data, reply, flags);
break;
}
// In case this is being transacted on in the same process.
if (reply != nullptr) {
//设置数据指针偏移为0,这样后续读取数据便会从头开始
reply->setDataPosition(0);
if (reply->dataSize() > LOG_REPLIES_OVER_SIZE) {
ALOGW("Large reply transaction of %zu bytes, interface descriptor %s, code %d",
reply->dataSize(), String8(getInterfaceDescriptor()).c_str(), code);
}
}
return err;
}
2.10 onTransact
这个函数主要调用了onTransact函数,是可以被重写的,ServiceManager继承了BnServiceManager,在BnServiceManager中重写了这个onTransact函数
ServiceManager -> BnServiceManager -> BnInterface<IServiceManager> -> IServiceManager & BBinder
::android::status_t BnServiceManager::onTransact(uint32_t _aidl_code, const ::android::Parcel& _aidl_data, ::android::Parcel* _aidl_reply, uint32_t _aidl_flags) {
::android::status_t _aidl_ret_status = ::android::OK;
switch (_aidl_code) {
case BnServiceManager::TRANSACTION_getService: {
//参数name
::std::string in_name;
::android::sp<::android::IBinder> _aidl_return;
//类型检查
if (!(_aidl_data.checkInterface(this))) {
_aidl_ret_status = ::android::BAD_TYPE;
break;
}
//读取参数name
_aidl_ret_status = _aidl_data.readUtf8FromUtf16(&in_name);
if (((_aidl_ret_status) != (::android::OK))) {
break;
}
//确认数据已读完
if (auto st = _aidl_data.enforceNoDataAvail();
!st.isOk()) {
_aidl_ret_status = st.writeToParcel(_aidl_reply);
break;
}
//执行真正的getService函数
::android::binder::Status _aidl_status(getService(in_name, &_aidl_return));
//将状态值写入reply
_aidl_ret_status = _aidl_status.writeToParcel(_aidl_reply);
if (((_aidl_ret_status) != (::android::OK))) {
break;
}
if (!_aidl_status.isOk()) {
break;
}
//将返回值写入reply
_aidl_ret_status = _aidl_reply->writeStrongBinder(_aidl_return);
if (((_aidl_ret_status) != (::android::OK))) {
break;
}
}
break;
...
}
if (_aidl_ret_status == ::android::UNEXPECTED_NULL) {
_aidl_ret_status = ::android::binder::Status::fromExceptionCode(::android::binder::Status::EX_NULL_POINTER).writeOverParcel(_aidl_reply);
}
return _aidl_ret_status;
}
可以看到,是先读取状态值,后读取返回值的.
三、getService
Status ServiceManager::getService(const std::string& name, sp<IBinder>* outBinder) {
*outBinder = tryGetService(name, true);
// returns ok regardless of result for legacy reasons
return Status::ok();
}
sp<IBinder> ServiceManager::tryGetService(const std::string& name, bool startIfNotFound) {
auto ctx = mAccess->getCallingContext();
//返回值
sp<IBinder> out;
Service* service = nullptr;
//从map中寻找相应的服务
if (auto it = mNameToService.find(name); it != mNameToService.end()) {
service = &(it->second);
if (!service->allowIsolated) {
uid_t appid = multiuser_get_app_id(ctx.uid);
bool isIsolated = appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END;
if (isIsolated) {
return nullptr;
}
}
//返回值指向对应service的binder对象
out = service->binder;
}
if (!mAccess->canFind(ctx, name)) {
return nullptr;
}
if (!out && startIfNotFound) {
tryStartService(name);
}
if (out) {
// Setting this guarantee each time we hand out a binder ensures that the client-checking
// loop knows about the event even if the client immediately drops the service
service->guaranteeClient = true;
}
return out;
}
这里返回了相应service的binder对象,并将其写入到了reply中
3.1 waitForResponse
status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{
status_t err;
status_t statusBuffer;
//将binder reply请求打包好写入写缓冲区
err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);
if (err < NO_ERROR) return err;
return waitForResponse(nullptr, nullptr);
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
...
case BR_TRANSACTION_COMPLETE:
//参数为两个nullptr,直接跳转到finish结束
if (!reply && !acquireResult) goto finish;
break;
...
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
logExtendedError();
}
return err;
}
在向binder驱动发送BC_REPLY请求后我们会收到BR_TRANSACTION_COMPLETE响应,根据我们传入waitForResponse的两个参数值,会直接跳出函数中的循环,结束此次binder通信.
Looper会持续监听binder驱动的fd,等待下一条binder消息的到来
四、 总结
至此binder服务端通信流程已经基本解释清楚了.另外值得一提的是在通信过程中executeCommand方法里判断了本次通信模型是否期待返回值,对于非oneway模式下的通信还是较为简单的,比如我们只需要调用服务,例如使用AMS,此时binder收到客户端的请求返回代理对象就可以了
然而还有一种场景是我们需要等待远程的回应消息,此时会调用sendReply()进行相应的回复.