概述
Binder系统断断续续学习了三次,才有个比较深刻的认识,以下是学习binder前给自己提出的问题和总结
1 调用者线程有没有binder线程状态?
这个问题很好解答,先给出答案,没有,在binder调用的时候检查binder线程状态的地方只有binder_thread_read 函数,这里面检查binder thread状态的条件是
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
上面的条件在正常的客户端binder同步请求中是不会发生的,因为transaction_stack不会等于null
2 被动注册和主动注册的binder线程如何区分?
Binder驱动请求创建线程
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
线程启动完成后注册到binder驱动
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER)
所以通过BC_ENTER_LOOPER BC_REGISTER_LOOPER 区分是主动启动的还是被动启动的
3如何关闭线程?
是通过BR_FINISHED命令关闭的 目前没有发现binder驱动有调用该命令
4传输数据大小限制?
取决与Binder mmap时候的大小 对于异步传输的最多只能使用一半的buffer,分配失败后会对Parcel进行大小检查 超出则抛出异常 模拟器中是1m
5不跨线程调用会不会发生跨进程调用?
if (ref->node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
"
ref %d desc %d -> node %d u%016llxn",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
}
首先查询服务的时候肯定会跨进程,要通过ServiceManager根据描述符找到binder对象,当ServiceManager返回结果给binder驱动的时候,驱动发现要返回的Binder对象与发起请求的客户端在同一进程,就会直接把binder_node中保存的BBinder对象的信息返回给客户端,客户端收到后发现返回的是BBinder,也就是service,调用的时候就变成了直接调用service中的方法
6 callingUid,callingPid的原理?
BR_TRANSACTION的时候设置
case BR_TRANSACTION:
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
int64_t IPCThreadState::clearCallingIdentity()
{
int64_t token = ((int64_t)mCallingUid<<32) | mCallingPid;
clearCaller();
return token;
}
void IPCThreadState::clearCaller()
{
mCallingPid = getpid();
mCallingUid = getuid();
}
7如何保证hwbinder中所表述的禁止抢占防止地优先级的线程持锁影响高优先级线程?
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lxn",
proc->pid, current->pid, cmd, arg);*/
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(context, __func__);
static inline void binder_lock(struct binder_context *context, const char *tag) {
trace_binder_lock(tag);
mutex_lock(&context->binder_main_lock);
preempt_disable();
trace_binder_locked(tag);
}
使用preempt_disable(); 禁止抢占(新的binder机制中要优化的地方,需要细粒度的锁,参考HIDL文档 https://source.android.com/devices/architecture/hidl/binder-ipc)
8 找到bugrepot 或者trace进程通信两端
对于Binder log的分析
1 失败日志
—— BINDER FAILED TRANSACTION LOG (/sys/kernel/debug/binder/failed_transaction_log) —— 0: async from
3439:4795 to 0:0 node 4874126 handle 1373 size 180:0 0: async from
3439:6537 to 0:0 node 6147477 handle 908 size 180:0 0: call from
2956:2956 to 0:0 node 5786939 handle 16 size 60:0
—— 0.002s was the duration of ‘BINDER FAILED TRANSACTION LOG’ ——
输出自
static int binder_failed_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_device *device;
struct binder_context *context;
hlist_for_each_entry(device, &binder_devices, hlist) {
context = &device->context;
print_binder_transaction_log(m,
&context->transaction_log_failed);
}
return 0;
}
seq_printf(m,
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%dn",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
e->to_node, e->target_handle, e->data_size, e->offsets_size);
请按照变量名称对号入座,这里会进行解释
1 debug_id 由用户进程调用者设置,用于调试
2 call_type 有三种类型: 返回 异步和调用
3 from_proc 发起请求的进程
4 from_thread 发起请求的线程
5 to_proc 注意to 为0 说明已经死掉了
6 to_thread
8 context_name binder驱动初始化设置的 用于区分多个binder设备
9 to_node 创建binder_node的时候创建的debug_id 全局递增的
9 target_handle 目标binder_node在本进程中binder_ref的handler号
10 传输的数据大小
11 传输的binder 大小
2传输log
—— BINDER TRANSACTION LOG (/sys/kernel/debug/binder/transaction_log) ——
9957426: async from 26100:26100 to 583:0 node 4545956 handle 13 size 80:0
9957427: reply from 3439:3467 to 5578:5579 node 0 handle -1 size 4:0
9957428: async from 25126:25126 to 583:0 node 8415269 handle 31 size 80:0
9957429: async from 25126:25126 to 583:0 node 8415269 handle 31 size 80:0
—— 0.002s was the duration of ‘BINDER TRANSACTION LOG’ ——
seq_printf(m,
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%dn",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
e->to_node, e->target_handle, e->data_size, e->offsets_size);
同上
3进程中的buffer信息
binder transactions: proc 32029 buffer 8639543: 0000000000000000
size 8:0 delivered buffer 8637220: 0000000000000000 size 40:0
delivered buffer 8640497: 0000000000000000 size 8:0 delivered
buffer 8636273: 0000000000000000 size 140:0 delivered buffer
8647323: 0000000000000000 size 140:0 delivered buffer 8685479:
0000000000000000 size 140:0 delivered buffer 8638119:
0000000000000000 size 140:0 delivered buffer 8638034:
0000000000000000 size 140:0 delivered buffer 8638614:
0000000000000000 size 140:0 delivered buffer 8644089:
0000000000000000 size 140:0 delivered buffer 8642092:
0000000000000000 size 448:0 delivered buffer 8639383:
0000000000000000 size 760:0 delivered proc 28230 buffer 8365750:
0000000000000000 size 36:8 delivered buffer 8432356:
0000000000000000 size 4:0 delivered
static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %p size %zd:%zd %sn",
prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
buffer->transaction ? "active" : "delivered");
}
1 prefix:” buffer”
2 buffer->debug_id 全局递增 创建事务时候分配的id 也能反映出buffer是何时创建
3 data : 数据缓冲区的地址,被释放后变成0
4 data_size 数据大小
5 offsets_size binder对象大小
6 正在传输还是完成? 由使用端申请释放
4各命令数统计
这段比较长 含义直接标出
下面全都是每个命令执行的次数 BC开头的是由用户空间写往内核空间 BR开头的是又内核空间写往用户空间
------ BINDER STATS (/sys/kernel/debug/binder/stats) ------
binder stats:
BC_TRANSACTION: 6032383 请求执行事务
BC_REPLY: 3721603 请求发送结果
BC_FREE_BUFFER: 9752109 请求释放buffer
BC_INCREFS: 110317 请求增加binder_ref弱引用计数
BC_ACQUIRE: 110764
请求增加binder_ref强引用计数
BC_RELEASE: 98966 请求减少binder_ref强引用计数
BC_DECREFS: 98619 请求减少binder_ref弱引用计数
BC_INCREFS_DONE: 92551 增加bbinder弱引用完成
BC_ACQUIRE_DONE: 92600 增加bbinder弱引用完成
BC_REGISTER_LOOPER: 820
由驱动请求创建的线程创建完成通知驱动
BC_ENTER_LOOPER: 432 主动创建的线程创建完成通知驱动
BC_REQUEST_DEATH_NOTIFICATION: 38144 请求注册死亡通知次数 (注意一个进程最多对一个服务注册一次死亡通知)
BC_CLEAR_DEATH_NOTIFICATION: 36585 请求清清除死亡通知的次数
BC_DEAD_BINDER_DONE: 1001
BR_TRANSACTION: 6032245 返回结果
BR_REPLY: 3721589
BR_DEAD_REPLY: 96 注册的时候已经死亡
BR_TRANSACTION_COMPLETE: 9753885 回复收到BC_TRANSACTION
BR_INCREFS: 92563 请求增加BBinder 弱引用
BR_ACQUIRE: 92612 请求增加BBinder 强引用
BR_RELEASE: 80654 减少BBinder弱引用
BR_DECREFS: 80597 减少BBinder强引用
BR_SPAWN_LOOPER: 830 请求创建Binder线程
BR_DEAD_BINDER: 1193 死亡通知
BR_CLEAR_DEATH_NOTIFICATION_DONE: 36585 清除死亡通知完成
BR_FAILED_REPLY: 4
proc: active 115 total 416 active:存活的进程 total:总共创建的进程数量(指binder驱动中binder_proc数据结构创建次数)
thread: active 1507 total 24563
binder_thread
node: active 3848 total 92571 binder_node
ref: active 5445 total 111002
binder_ref
death: active 925 total 38144
binder_ref_death
transaction: active 1 total 9753887
binder_transaction
transaction_complete: active 1 total 9753887
binder_work
proc 4274
threads: 5
requested threads: 0+2/15
ready threads 3
free async space 520192
nodes: 2
refs: 7 s 7 w 7
buffers: 0
pending transactions: 0
BC_TRANSACTION: 25
BC_REPLY: 4
BC_FREE_BUFFER: 36
BC_INCREFS: 9
BC_ACQUIRE: 9
BC_RELEASE: 2
BC_DECREFS: 2
BC_INCREFS_DONE: 3
BC_ACQUIRE_DONE: 3
BC_REGISTER_LOOPER: 2
BC_ENTER_LOOPER: 1
BR_TRANSACTION: 15
BR_REPLY: 21
BR_TRANSACTION_COMPLETE: 29
BR_INCREFS: 3
BR_ACQUIRE: 3
BR_RELEASE: 1
BR_DECREFS: 1
BR_SPAWN_LOOPER: 2
5 binder状态信息(最重要的一段)
------ BINDER STATE (/sys/kernel/debug/binder/state) ------
binder state:
dead nodes:
node 9617265: u00000000e5483d80 c00000000e548c8c0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9616994: u00000000e2b648c0 c00000000dbd884c0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9618129: u00000000c7fbba60 c00000000c63ff360 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9618112: u00000000c7fbb820 c00000000e548cae0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9618070: u00000000c7fbb750 c00000000c63ff1a0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9617712: u00000000c7cf53c0 c00000000c75dd0c0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 11575
node 9620151: u00000000c56780e0 c00000000c56faf60 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9624600: u00000000dc699420 c00000000cbd583c0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9661453: u00000000c8153470 c00000000c8b64300 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 7721847: u00000000dbe2e1b0 c00000000dbd88900 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 5306
node 8634598: u00000000dc696fa0 c00000000ca6cc140 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 8445373: u00000000c78846b0 c00000000c6ebc420 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 8000150: u00000000dbe55840 c00000000e55e9660 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 23421
node 7464302: u0000007f867100a0 c0000007f866fb880 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 7937014: u00000000dbe7db80 c00000000c7498fc0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 11575
node 7939303: u00000000c337dbb0 c00000000c7ff2b60 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 7910736: u00000000ca443fe0 c00000000e2b65580 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 7988204: u00000000dc698ea0 c00000000dbe7efc0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 7941080: u00000000c81e2080 c00000000c81da1c0 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 11575
binder进程死亡或者关闭驱动的时候如果属于它的binder_node还有引用对象,则会将这个binder_node链入上下文的context->binder_dead_nodes队列,这里所打印的
就是链入该对象的binder_node信息(还有引用就退出的情况 需要好好研究)
seq_printf(m, "
node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count);
1 debug_id: 创建node时候分配
2 node-ptr: binder服务端BBinder中的弱引用指针地址
3 cookie : 强引用地址
4 has_strong_ref: binder_node增加了强引用对象 bbinder还没有增加相应的强引用计数
5 has_weak_ref : binder_node增加了弱引用对象 bbinder还没有增加相应的弱引用计数
6 local_strong_refs: server进程对binder_node强引用计数(自身进程引用)
7 local_weak_refs: server进程对binder_node弱引用计数(自身进程引用)
8 internal_strong_refs:其他进程的强引用计数
9 node的binder_ref个数 用这个反映外部弱引用计数
10 ref 所属的进程
这段还有一部分 就是proc的状态信息:
proc 4274
thread 4274: l 00
thread 4298: l 00
thread 4304: l 12
thread 4305: l 11
thread 4361: l 11
node 9903843: u0000007f97fe0420 c0000007f866af340 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
node 9903868: u0000007f97fe04e0 c0000007f866af380 hs 1 hw 1 ls 0 lw 0 is 1 iw 1 proc 3439
ref 9903834: desc 0 node 1 s 1 w 1 d 0000000000000000
ref 9903839: desc 1 node 462361 s 1 w 1 d 0000000000000000
ref 9903848: desc 2 node 462338 s 1 w 1 d 0000000000000000
ref 9903849: desc 3 node 462589 s 1 w 1 d 0000000000000000
ref 9903850: desc 4 node 462654 s 1 w 1 d 0000000000000000
ref 9903854: desc 5 node 9903853 s 1 w 1 d 0000000000000000
ref 9903862: desc 6 node 460305 s 1 w 1 d 0000000000000000
首先分三段 分别是 thread信息 node信息 ref信息
thread 信息如下:
static void print_binder_thread(struct seq_file *m,
struct binder_thread *thread,
int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
seq_printf(m, "
thread %d: l %02xn", thread->pid, thread->looper);
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
print_binder_transaction(m,
"
outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
print_binder_transaction(m,
"
incoming transaction", t);
t = t->to_parent;
} else {
print_binder_transaction(m, "
bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work(m, "
", "
pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
1 tid
2 状态 如下
enum {
BINDER_LOOPER_STATE_REGISTERED
= 0x01,
BINDER_LOOPER_STATE_ENTERED
= 0x02,
BINDER_LOOPER_STATE_EXITED
= 0x04,
BINDER_LOOPER_STATE_INVALID
= 0x08,
BINDER_LOOPER_STATE_WAITING
= 0x10,
BINDER_LOOPER_STATE_NEED_RETURN = 0x20
};
如果包含事务要执行的话还会打印如下transaction信息:
static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
{
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
t->to_proc ? t->to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
if (t->buffer == NULL) {
seq_puts(m, " buffer freen");
return;
}
if (t->buffer->target_node)
seq_printf(m, " node %d",
t->buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %pn",
t->buffer->data_size, t->buffer->offsets_size,
t->buffer->data);
}
还有binder_work信息 这些都很重要 是用来找出两个进程通信的依据. 最关键的地方我不分析 主要就是从 transaction中找证据
2 node 信息 和前面讲的node信息一致 只不过打印的本进程的node
3 ref 信息
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
{
seq_printf(m, "
ref %d: desc %d %snode %d s %d w %d d %pn",
ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
ref->node->debug_id, ref->strong, ref->weak, ref->death);
}
1 debug_id
2 desc 序号 相当与binder引用对象的handle
3 node->proc 引用的node是否已经死亡
4 node的debug_id
5 strong 强引用计数
6 弱引用计数
7 接受死亡通知的对象的地址
其他日志信息(下回分解)
最后
以上就是故意招牌为你收集整理的Android Binder学习总结对于Binder log的分析的全部内容,希望文章能够帮你解决Android Binder学习总结对于Binder log的分析所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复