我是靠谱客的博主 留胡子百褶裙,最近开发中收集的这篇文章主要介绍error page怎么解决_以htop为例 看怎么做Android iOS app cpu使用率监控,觉得挺不错的,现在分享给大家,希望可以做个参考。
概述
- 申明
- 源码
- 进程cpu使用率
- cpu核心使用率
- 总结
- app耗电监控之cpu使用率监控
linux/android 获取进程或系统cpu使用率,可以简单的读取/proc/pid/stat /proc/stat等文件快速搞定。一个cat命令搞定。
而mac os,却比较麻烦,几乎都是通过api编程方式,那是哪些api呢?
android app 和iOS app 怎么做cpu 耗电监控呢?
申明
本文说的全是错的。
源码
- htop 源码: https://github.com/hishamhm/htop.git
- apple 开源Darwin代码
获取单个进程的 utime 和 stime
//DarwinProcess.c
void DarwinProcess_setFromLibprocPidinfo(DarwinProcess *proc, DarwinProcessList *dpl) {
struct proc_taskinfo pti; // 详情结构体
//call bsd fill_taskprocinfo() to get info
if(sizeof(pti) == proc_pidinfo(proc->super.pid, PROC_PIDTASKINFO, 0, &pti, sizeof(pti))) { // mac 获取pid 详情
if(0 != proc->utime || 0 != proc->stime) {
uint64_t diff = (pti.pti_total_system - proc->stime)
+ (pti.pti_total_user - proc->utime);
proc->super.percent_cpu = (double)diff * (double)dpl->super.cpuCount
/ ((double)dpl->global_diff * 100000.0);
// fprintf(stderr, "%f %llu %llu %llu %llu %llun", proc->super.percent_cpu,
// proc->stime, proc->utime, pti.pti_total_system, pti.pti_total_user, dpl->global_diff);
// exit(7);
}
proc->super.time = (pti.pti_total_system + pti.pti_total_user) / 10000000;
proc->super.nlwp = pti.pti_threadnum;
proc->super.m_size = pti.pti_virtual_size / 1024 / PAGE_SIZE_KB;
proc->super.m_resident = pti.pti_resident_size / 1024 / PAGE_SIZE_KB;
proc->super.majflt = pti.pti_faults;
proc->super.percent_mem = (double)pti.pti_resident_size * 100.0
/ (double)dpl->host_info.max_mem;
// stime and utime
proc->stime = pti.pti_total_system;
proc->utime = pti.pti_total_user;
dpl->super.kernelThreads += 0; /*pti.pti_threads_system;*/
dpl->super.userlandThreads += pti.pti_threadnum; /*pti.pti_threads_user;*/
dpl->super.totalTasks += pti.pti_threadnum;
dpl->super.runningTasks += pti.pti_numrunning;
}
}
核心代码:
#define PROC_PIDTASKINFO 4
struct proc_taskinfo pti;
proc_pidinfo(proc->super.pid, PROC_PIDTASKINFO, 0, &pti, sizeof(pti));
//proc_info.h
struct proc_taskinfo {
uint64_t pti_virtual_size; /* virtual memory size (bytes) */
uint64_t pti_resident_size; /* resident memory size (bytes) */
//关键成员
uint64_t pti_total_user; /* total time */
uint64_t pti_total_system;
uint64_t pti_threads_user; /* existing threads only */
uint64_t pti_threads_system;
int32_t pti_policy; /* default policy for new threads */
int32_t pti_faults; /* number of page faults */
int32_t pti_pageins; /* number of actual pageins */
int32_t pti_cow_faults; /* number of copy-on-write faults */
int32_t pti_messages_sent; /* number of messages sent */
int32_t pti_messages_received; /* number of messages received */
int32_t pti_syscalls_mach; /* number of mach system calls */
int32_t pti_syscalls_unix; /* number of unix system calls */
int32_t pti_csw; /* number of context switches */
int32_t pti_threadnum; /* number of threads in the task */
int32_t pti_numrunning; /* number of running threads */
int32_t pti_priority; /* task priority*/
};
//proc_info.c
int
proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval)
{
struct proc * p = PROC_NULL;
int error = ENOTSUP;
int gotref = 0;
int findzomb = 0;
int shortversion = 0;
uint32_t size;
int zombie = 0;
int thuniqueid = 0;
int uniqidversion = 0;
boolean_t check_same_user;
...
switch (flavor) {
...
case PROC_PIDTASKINFO: {
struct proc_taskinfo ptinfo;
error = proc_pidtaskinfo(p, &ptinfo);
if (error == 0) {
//copyout 类似linux的 copy_to_user , 数据从kernel 拷贝到userland
error = copyout(&ptinfo, buffer, sizeof(struct proc_taskinfo));
if (error == 0)
*retval = sizeof(struct proc_taskinfo);
}
}
break;
...
}
int
proc_pidtaskinfo(proc_t p, struct proc_taskinfo * ptinfo)
{
task_t task;
task = p->task;
bzero(ptinfo, sizeof(struct proc_taskinfo));
fill_taskprocinfo(task, (struct proc_taskinfo_internal *)ptinfo); //helin
return(0);
}
struct proc_taskinfo_internal {
uint64_t pti_virtual_size; /* virtual memory size (bytes) */
uint64_t pti_resident_size; /* resident memory size (bytes) */
uint64_t pti_total_user; /* total time */
uint64_t pti_total_system;
uint64_t pti_threads_user; /* existing threads only */
uint64_t pti_threads_system;
int32_t pti_policy; /* default policy for new threads */
int32_t pti_faults; /* number of page faults */
int32_t pti_pageins; /* number of actual pageins */
int32_t pti_cow_faults; /* number of copy-on-write faults */
int32_t pti_messages_sent; /* number of messages sent */
int32_t pti_messages_received; /* number of messages received */
int32_t pti_syscalls_mach; /* number of mach system calls */
int32_t pti_syscalls_unix; /* number of unix system calls */
int32_t pti_csw; /* number of context switches */
int32_t pti_threadnum; /* number of threads in the task */
int32_t pti_numrunning; /* number of running threads */
int32_t pti_priority; /* task priority*/
};
//bsd_kern.c
void
fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) //helin
{
vm_map_t map;
task_absolutetime_info_data_t tinfo;
thread_t thread;
uint32_t cswitch = 0, numrunning = 0;
uint32_t syscalls_unix = 0;
uint32_t syscalls_mach = 0;
task_lock(task);
map = (task == kernel_task)? kernel_map: task->map;
ptinfo->pti_virtual_size = map->size;
ptinfo->pti_resident_size =
(mach_vm_size_t)(pmap_resident_count(map->pmap))
* PAGE_SIZE_64;
ptinfo->pti_policy = ((task != kernel_task)?
POLICY_TIMESHARE: POLICY_RR);
tinfo.threads_user = tinfo.threads_system = 0;
//task总的utime 和 stime
tinfo.total_user = task->total_user_time;
tinfo.total_system = task->total_system_time;
//遍历线程
queue_iterate(&task->threads, thread, thread_t, task_threads) {
uint64_t tval;
spl_t x;
if (thread->options & TH_OPT_IDLE_THREAD)
continue;
x = splsched();
thread_lock(thread);
if ((thread->state & TH_RUN) == TH_RUN)
numrunning++;
cswitch += thread->c_switch; //上下文切换
//各个线程的utime和stime
tval = timer_grab(&thread->user_timer);
tinfo.threads_user += tval;
tinfo.total_user += tval;
tval = timer_grab(&thread->system_timer);
if (thread->precise_user_kernel_time) {
tinfo.threads_system += tval;
tinfo.total_system += tval;
} else {
/* system_timer may represent either sys or user */
tinfo.threads_user += tval;
tinfo.total_user += tval;
}
syscalls_unix += thread->syscalls_unix;
syscalls_mach += thread->syscalls_mach;
thread_unlock(thread);
splx(x);
}
//task累计时间, 耗时统计使用该2项:
ptinfo->pti_total_system = tinfo.total_system;
ptinfo->pti_total_user = tinfo.total_user;
//内核态或用户态线程单独时间
ptinfo->pti_threads_system = tinfo.threads_system;
ptinfo->pti_threads_user = tinfo.threads_user;
ptinfo->pti_faults = task->faults;
ptinfo->pti_pageins = task->pageins;
ptinfo->pti_cow_faults = task->cow_faults;
ptinfo->pti_messages_sent = task->messages_sent;
ptinfo->pti_messages_received = task->messages_received;
ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
ptinfo->pti_csw = task->c_switch + cswitch; //上下文切换
ptinfo->pti_threadnum = task->thread_count;
ptinfo->pti_numrunning = numrunning;
ptinfo->pti_priority = task->priority;
task_unlock(task);
}
cpu整体耗时
typedef struct DarwinProcessList_ {
ProcessList super;
host_basic_info_data_t host_info;
vm_statistics_data_t vm_stats;
//htop自己的结构体:前后load累计值,方便后续取差值算百分比
processor_cpu_load_info_t prev_load;
processor_cpu_load_info_t curr_load;
uint64_t kernel_threads;
uint64_t user_threads;
uint64_t global_diff;
} DarwinProcessList;
cpu使用率是 抽样后取差值,再计算百分比
// kernel cpu的 4种状态
#define CPU_STATE_MAX 4
#define CPU_STATE_USER 0
#define CPU_STATE_SYSTEM 1
#define CPU_STATE_IDLE 2
#define CPU_STATE_NICE 3
// 分别计算各个cpu的各种状态下的ticks差,再算百分比,得到各个cpu的百分比使用率
double Platform_setCPUValues(Meter* mtr, int cpu) {
if (cpu == 0) {
return Platform_setCPUAverageValues(mtr);
}
DarwinProcessList *dpl = (DarwinProcessList *)mtr->pl;
processor_cpu_load_info_t prev = &dpl->prev_load[cpu-1];
processor_cpu_load_info_t curr = &dpl->curr_load[cpu-1];
double total = 0;
/* Take the sums */
//计算所有状态的ticks差的总和
for(size_t i = 0; i < CPU_STATE_MAX; ++i) {
total += (double)curr->cpu_ticks[i] - (double)prev->cpu_ticks[i];
}
//3种状态的 当前百分比, IDLE不用ticks,所有忽略
mtr->values[CPU_METER_NICE]
= ((double)curr->cpu_ticks[CPU_STATE_NICE] - (double)prev->cpu_ticks[CPU_STATE_NICE])* 100.0 / total;
mtr->values[CPU_METER_NORMAL]
= ((double)curr->cpu_ticks[CPU_STATE_USER] - (double)prev->cpu_ticks[CPU_STATE_USER])* 100.0 / total;
mtr->values[CPU_METER_KERNEL]
= ((double)curr->cpu_ticks[CPU_STATE_SYSTEM] - (double)prev->cpu_ticks[CPU_STATE_SYSTEM])* 100.0 / total;
Meter_setItems(mtr, 3);
/* Convert to percent and return */
//3个有用ticks的百分比累加,得到该cpu总体百分比
total = mtr->values[CPU_METER_NICE] + mtr->values[CPU_METER_NORMAL] + mtr->values[CPU_METER_KERNEL];
return CLAMP(total, 0.0, 100.0);
}
下面来看下各个cpu的各个状态的ticks怎么得来的
ProcessList_allocateCPULoadInfo(&this->curr_load);
unsigned ProcessList_allocateCPULoadInfo(processor_cpu_load_info_t *p) {
mach_msg_type_number_t info_size = sizeof(processor_cpu_load_info_t);
unsigned cpu_count;
// TODO Improving the accuracy of the load counts woule help a lot.
if(0 != host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &cpu_count, (processor_info_array_t *)p, &info_size)) {
CRT_fatalError("Unable to retrieve CPU infon");
}
return cpu_count;
}
关键函数为:
host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &cpu_count, (processor_info_array_t *)p, &info_size)
//host.c
kern_return_t
host_processor_info(host_t host, //helin
processor_flavor_t flavor,
natural_t * out_pcount,
processor_info_array_t * out_array,
mach_msg_type_number_t * out_array_count)
{
kern_return_t result;
processor_t processor;
host_t thost;
processor_info_t info;
unsigned int icount, tcount;
unsigned int pcount, i;
vm_offset_t addr;
vm_size_t size, needed;
vm_map_copy_t copy;
...
result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC); // 分配空间保存processor_info数据
...
info = (processor_info_t)addr;
processor = processor_list; // cpu链表
tcount = icount;
//获取第一个cpu信息
result = processor_info(processor, flavor, &thost, info, &tcount);
if (result != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr, size);
return (result);
}
if (pcount > 1) {
for (i = 1; i < pcount; i++) {
simple_lock(&processor_list_lock);
processor = processor->processor_list; //helin: 链表next item
simple_unlock(&processor_list_lock);
info += icount;
tcount = icount;
//获取第i个 cpu信息
result = processor_info(processor, flavor, &thost, info, &tcount);
if (result != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr, size);
return (result);
}
}
}
...
return (KERN_SUCCESS);
}
关键函数:
processor_info(processor, flavor, &thost, info, &tcount);
//processor.c
kern_return_t
processor_info(
register processor_t processor,
processor_flavor_t flavor,
host_t *host,
processor_info_t info,
mach_msg_type_number_t *count)
{
register int cpu_id, state;
...
cpu_id = processor->cpu_id;
switch (flavor) {
...
case PROCESSOR_CPU_LOAD_INFO:
{
processor_cpu_load_info_t cpu_load_info; //cpu 负载信息
timer_t idle_state;
uint64_t idle_time_snapshot1, idle_time_snapshot2;
uint64_t idle_time_tstamp1, idle_time_tstamp2;
/*
* We capture the accumulated idle time twice over
* the course of this function, as well as the timestamps
* when each were last updated. Since these are
* all done using non-atomic racy mechanisms, the
* most we can infer is whether values are stable.
* timer_grab() is the only function that can be
* used reliably on another processor's per-processor
* data.
*/
if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
return (KERN_FAILURE);
cpu_load_info = (processor_cpu_load_info_t) info;
//#1. 获取 CPU_STATE_USER 和 CPU_STATE_SYSTEM ticks
if (precise_user_kernel_time) {
cpu_load_info->cpu_ticks[CPU_STATE_USER] =
(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
} else {
uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
timer_grab(&PROCESSOR_DATA(processor, system_state));
cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
}
idle_state = &PROCESSOR_DATA(processor, idle_state);
idle_time_snapshot1 = timer_grab(idle_state);
idle_time_tstamp1 = idle_state->tstamp;
/*
* Idle processors are not continually updating their
* per-processor idle timer, so it may be extremely
* out of date, resulting in an over-representation
* of non-idle time between two measurement
* intervals by e.g. top(1). If we are non-idle, or
* have evidence that the timer is being updated
* concurrently, we consider its value up-to-date.
*/
//#2. 更新 CPU_STATE_IDLE 状态ticks
if (PROCESSOR_DATA(processor, current_state) != idle_state) {
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
} else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
(idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
/* Idle timer is being updated concurrently, second stamp is good enough */
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
(uint32_t)(idle_time_snapshot2 / hz_tick_interval);
} else {
/*
* Idle timer may be very stale. Fortunately we have established
* that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
*/
idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
}
//#3. 更新CPU_STATE_NICE ticks
cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; // nice ticks = 0
*count = PROCESSOR_CPU_LOAD_INFO_COUNT;
*host = &realhost;
return (KERN_SUCCESS);
}
default:
result = cpu_info(flavor, cpu_id, info, count);
if (result == KERN_SUCCESS)
*host = &realhost;
return (result);
}
}
PROCESSOR_DATA 定义:
struct processor_data {
/* Processor state statistics */
timer_data_t idle_state;
timer_data_t system_state;
timer_data_t user_state;
timer_t current_state; /* points to processor's idle, system, or user state timer */
/* Thread execution timers */
timer_t thread_timer; /* points to current thread's user or system timer */
timer_t kernel_timer; /* points to current thread's system_timer */
...
};
typedef struct processor_data processor_data_t;
#define PROCESSOR_DATA(processor, member)
(processor)->processor_data.member
总结
cpu整体使用率:
host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &cpu_count, (processor_info_array_t *)p, &info_size)
进程单个使用率:
struct proc_taskinfo pti;
proc_pidinfo(proc->super.pid, PROC_PIDTASKINFO, 0, &pti, sizeof(pti));
app耗电监控之cpu使用率监控
android app 和iOS app 怎么做cpu监控呢?
可以参考上文方法,读取进程下各个线程cpu的utime stime等,如果使用率超过比如80%,那么就需要dump 线程堆栈 方便后续耗时耗电分析。
话说 当年乔布斯重回苹果,收拾原来的mac os烂摊子,曾经和Linux之父 Linus 沟通 mac使用采用Linux作为内核,但是没谈成。不然现在的 mac iphone 就可能和Android 成为兄弟了。
最后
以上就是留胡子百褶裙为你收集整理的error page怎么解决_以htop为例 看怎么做Android iOS app cpu使用率监控的全部内容,希望文章能够帮你解决error page怎么解决_以htop为例 看怎么做Android iOS app cpu使用率监控所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复