我是靠谱客的博主 醉熏身影,最近开发中收集的这篇文章主要介绍linux kernel中timer的使用,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

linux kernel中timer的使用

在kernel中如果想周期性的干些什么事情,或者某个特定时间干些什么事情,可以使用timer。
例如像周期性地dump某段buffer的数据等等。

先来看看使用方法。
先定义一个struct timer_list的对象。eg: struct timer_list dump_t;
这个对象相当于一个闹钟,其中包含了时间点,也就是什么时候激活闹钟;一个函数指针,闹钟激活后干活的地方;
还有一个void 指针,在闹钟干活的时候可能需要传给它一些当前的数据。

先看使用方法,之后再稍微深入了解一下。
前面定义好了struct timer_list对象,接下来就需要初始化该对象。
调用函数init_timer进行初步初始化。
然后对结构体中的一些成员进行赋值:
  init_timer(&dump_t);
  dump_t.function = dump_function;
  dump_t.data = (unsigned long) my_dev;
  dump_t.expires = jiffies + HZ;    // 1秒钟之后timer被激活,如果是n秒,将HZ改为n*HZ。注意单位是tick
这样就OK了么?
当然没有,需要把timer加到timer list中,也就是要告诉系统,你申请了这么一个timer。
  add_timer(&dump_t);
  
现在不明白的地方就是timer到了,干活的地方,即dump_function函数。
函数声明:
static void dump_function(unsigned long channel);
其中的实现么,就随意了,看你想让这个timer干些什么活。
注意一点,这个timer只会响应一次,因为jiffies + HZ时间点只有一个。
如果想让此timer周期性地干活,就需要在dump_function函数中重新启动该timer。
启动方法:
 del_timer(&dump_t);
 dump_t.function = dump_function;
 dump_t.data = (unsigned long) mydev;
 dump_t.expires = jiffies + HZ;
 add_timer(&dump_t);
 
 
使用方法至此基本上介绍完了。
下面看看timer的具体实现。

先看看struct timer_list的定义:
struct timer_list {
 /*
  * All fields that change during normal runtime grouped to the
  * same cacheline
  */
 struct list_head entry;
 unsigned long expires;
 struct tvec_base *base;

 void (*function)(unsigned long);
 unsigned long data;

 int slack;

#ifdef CONFIG_TIMER_STATS
 int start_pid;
 void *start_site;
 char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
 struct lockdep_map lockdep_map;
#endif
};

init_timer的定义:
#define init_timer(timer)      
 do {        
  static struct lock_class_key __key;   
  init_timer_key((timer), #timer, &__key);  
 } while (0)
 
/**
 * init_timer_key - initialize a timer
 * @timer: the timer to be initialized
 * @name: name of the timer
 * @key: lockdep class key of the fake lock used for tracking timer
 *       sync lock dependencies
 *
 * init_timer_key() must be done to a timer prior calling *any* of the
 * other timer functions.
 */
void init_timer_key(struct timer_list *timer,
      const char *name,
      struct lock_class_key *key)
{
 debug_init(timer);
 __init_timer(timer, name, key);
}

static inline void debug_init(struct timer_list *timer)
{
 debug_timer_init(timer);
 trace_timer_init(timer);
}

static inline void debug_timer_init(struct timer_list *timer)
{
 debug_object_init(timer, &timer_debug_descr);
}

/**
 * debug_object_init - debug checks when an object is initialized
 * @addr: address of the object
 * @descr: pointer to an object specific debug description structure
 */
void debug_object_init(void *addr, struct debug_obj_descr *descr)
{
 if (!debug_objects_enabled)
  return;

 __debug_object_init(addr, descr, 0);
}

static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
 enum debug_obj_state state;
 struct debug_bucket *db;
 struct debug_obj *obj;
 unsigned long flags;

 fill_pool();

 db = get_bucket((unsigned long) addr);

 raw_spin_lock_irqsave(&db->lock, flags);

 obj = lookup_object(addr, db);
 if (!obj) {
  obj = alloc_object(addr, db, descr);
  if (!obj) {
   debug_objects_enabled = 0;
   raw_spin_unlock_irqrestore(&db->lock, flags);
   debug_objects_oom();
   return;
  }
  debug_object_is_on_stack(addr, onstack);
 }

 switch (obj->state) {
 case ODEBUG_STATE_NONE:
 case ODEBUG_STATE_INIT:
 case ODEBUG_STATE_INACTIVE:
  obj->state = ODEBUG_STATE_INIT;
  break;

 case ODEBUG_STATE_ACTIVE:
  debug_print_object(obj, "init");
  state = obj->state;
  raw_spin_unlock_irqrestore(&db->lock, flags);
  debug_object_fixup(descr->fixup_init, addr, state);
  return;

 case ODEBUG_STATE_DESTROYED:
  debug_print_object(obj, "init");
  break;
 default:
  break;
 }

 raw_spin_unlock_irqrestore(&db->lock, flags);
}

static void __init_timer(struct timer_list *timer,
    const char *name,
    struct lock_class_key *key)
{
 timer->entry.next = NULL;
 timer->base = __raw_get_cpu_var(tvec_bases);
 timer->slack = -1;
#ifdef CONFIG_TIMER_STATS
 timer->start_site = NULL;
 timer->start_pid = -1;
 memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
 lockdep_init_map(&timer->lockdep_map, name, key, 0);
}

对下面这个东东比较感兴趣:
 timer->base = __raw_get_cpu_var(tvec_bases);
 
看看__raw_get_cpu_var的实现,其中SMP和非SMP有差别。
SMP的情况:
#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
非SMP的情况:
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))

先看简单的:
#define VERIFY_PERCPU_PTR(__p) ({   
 __verify_pcpu_ptr((__p));   
 (typeof(*(__p)) __kernel __force *)(__p); 
})

 * Macro which verifies @ptr is a percpu pointer without evaluating
 * @ptr.  This is to be used in percpu accessors to verify that the
 * input parameter is a percpu pointer.
 */
#define __verify_pcpu_ptr(ptr) do {     
 const void __percpu *__vpp_verify = (typeof(ptr))NULL;  
 (void)__vpp_verify;      
} while (0)

再看看__this_cpu_ptr:
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)

/*
 * Add a offset to a pointer but keep the pointer as is.
 *
 * Only S390 provides its own means of moving the pointer.
 */
#ifndef SHIFT_PERCPU_PTR
/* Weird cast keeps both GCC and sparse happy. */
#define SHIFT_PERCPU_PTR(__p, __offset) ({    
 __verify_pcpu_ptr((__p));     
 RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset));
})
#endif

又合二为一了。

init先看到这,还是一头雾水,先把代码整理出来,以后学习用。

下面看看add_timer的实现。
/**
 * add_timer - start a timer
 * @timer: the timer to be added
 *
 * The kernel will do a ->function(->data) callback from the
 * timer interrupt at the ->expires point in the future. The
 * current time is 'jiffies'.
 *
 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 * fields must be set prior calling this function.
 *
 * Timers with an ->expires field in the past will be executed in the next
 * timer tick.
 */
void add_timer(struct timer_list *timer)
{
 BUG_ON(timer_pending(timer));
 mod_timer(timer, timer->expires);
}

/**
 * timer_pending - is a timer pending?
 * @timer: the timer in question
 *
 * timer_pending will tell whether a given timer is currently pending,
 * or not. Callers must ensure serialization wrt. other operations done
 * to this timer, eg. interrupt contexts, or other CPUs on SMP.
 *
 * return value: 1 if the timer is pending, 0 if not.
 */
static inline int timer_pending(const struct timer_list * timer)
{
 return timer->entry.next != NULL;
}

/**
 * mod_timer - modify a timer's timeout
 * @timer: the timer to be modified
 * @expires: new timeout in jiffies
 *
 * mod_timer() is a more efficient way to update the expire field of an
 * active timer (if the timer is inactive it will be activated)
 *
 * mod_timer(timer, expires) is equivalent to:
 *
 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 *
 * Note that if there are multiple unserialized concurrent users of the
 * same timer, then mod_timer() is the only safe way to modify the timeout,
 * since add_timer() cannot modify an already running timer.
 *
 * The function returns whether it has modified a pending timer or not.
 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 * active timer returns 1.)
 */
int mod_timer(struct timer_list *timer, unsigned long expires)
{
 expires = apply_slack(timer, expires);

 /*
  * This is a common optimization triggered by the
  * networking code - if the timer is re-modified
  * to be the same thing then just return:
  */
 if (timer_pending(timer) && timer->expires == expires)
  return 1;

 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
}

/*
 * Decide where to put the timer while taking the slack into account
 *
 * Algorithm:
 *   1) calculate the maximum (absolute) time
 *   2) calculate the highest bit where the expires and new max are different
 *   3) use this bit to make a mask
 *   4) use the bitmask to round down the maximum time, so that all last
 *      bits are zeros
 */
static inline
unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
{
 unsigned long expires_limit, mask;
 int bit;

 if (timer->slack >= 0) {
  expires_limit = expires + timer->slack;
 } else {
  long delta = expires - jiffies;

  if (delta < 256)
   return expires;

  expires_limit = expires + delta / 256;
 }
 mask = expires ^ expires_limit;
 if (mask == 0)
  return expires;

 bit = find_last_bit(&mask, BITS_PER_LONG);

 mask = (1 << bit) - 1;

 expires_limit = expires_limit & ~(mask);

 return expires_limit;
}

static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
      bool pending_only, int pinned)
{
 struct tvec_base *base, *new_base;
 unsigned long flags;
 int ret = 0 , cpu;

 timer_stats_timer_set_start_info(timer);
 BUG_ON(!timer->function);

 base = lock_timer_base(timer, &flags);

 if (timer_pending(timer)) {
  detach_timer(timer, 0);
  if (timer->expires == base->next_timer &&
      !tbase_get_deferrable(timer->base))
   base->next_timer = base->timer_jiffies;
  ret = 1;
 } else {
  if (pending_only)
   goto out_unlock;
 }

 debug_activate(timer, expires);

 cpu = smp_processor_id();

#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
  cpu = get_nohz_timer_target();
#endif
 new_base = per_cpu(tvec_bases, cpu);

 if (base != new_base) {
  /*
   * We are trying to schedule the timer on the local CPU.
   * However we can't change timer's base while it is running,
   * otherwise del_timer_sync() can't detect that the timer's
   * handler yet has not finished. This also guarantees that
   * the timer is serialized wrt itself.
   */
  if (likely(base->running_timer != timer)) {
   /* See the comment in lock_timer_base() */
   timer_set_base(timer, NULL);
   spin_unlock(&base->lock);
   base = new_base;
   spin_lock(&base->lock);
   timer_set_base(timer, base);
  }
 }

 timer->expires = expires;
 if (time_before(timer->expires, base->next_timer) &&
     !tbase_get_deferrable(timer->base))
  base->next_timer = timer->expires;
 internal_add_timer(base, timer);

out_unlock:
 spin_unlock_irqrestore(&base->lock, flags);

 return ret;
}

太多了,不一一看了,只看internal_add_timer。

static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
{
 unsigned long expires = timer->expires;
 unsigned long idx = expires - base->timer_jiffies;
 struct list_head *vec;

 if (idx < TVR_SIZE) {
  int i = expires & TVR_MASK;
  vec = base->tv1.vec + i;
 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  int i = (expires >> TVR_BITS) & TVN_MASK;
  vec = base->tv2.vec + i;
 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  vec = base->tv3.vec + i;
 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  vec = base->tv4.vec + i;
 } else if ((signed long) idx < 0) {
  /*
   * Can happen if you add a timer with expires == jiffies,
   * or you set a timer to go off in the past
   */
  vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 } else {
  int i;
  /* If the timeout is larger than 0xffffffff on 64-bit
   * architectures then we use the maximum timeout:
   */
  if (idx > 0xffffffffUL) {
   idx = 0xffffffffUL;
   expires = idx + base->timer_jiffies;
  }
  i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  vec = base->tv5.vec + i;
 }
 /*
  * Timers are FIFO:
  */
 list_add_tail(&timer->entry, vec);
}

/**
 * list_add_tail - add a new entry
 * @new: new entry to be added
 * @head: list head to add it before
 *
 * Insert a new entry before the specified head.
 * This is useful for implementing queues.
 */
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
 __list_add(new, head->prev, head);
}

__list_add的实现就比较简单了,基本链表操作。

最后

以上就是醉熏身影为你收集整理的linux kernel中timer的使用的全部内容,希望文章能够帮你解决linux kernel中timer的使用所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(45)

评论列表共有 0 条评论

立即
投稿
返回
顶部