summaryrefslogtreecommitdiff
path: root/kernel/time/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timer.c')
-rw-r--r--kernel/time/timer.c49
1 files changed, 41 insertions, 8 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index c16c48de01c5..658051c97a3c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1252,8 +1252,8 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
}
}
-static int collect_expired_timers(struct timer_base *base,
- struct hlist_head *heads)
+static int __collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
{
unsigned long clk = base->clk;
struct hlist_head *vec;
@@ -1279,9 +1279,9 @@ static int collect_expired_timers(struct timer_base *base,
#ifdef CONFIG_NO_HZ_COMMON
/*
- * Find the next pending bucket of a level. Search from @offset + @clk upwards
- * and if nothing there, search from start of the level (@offset) up to
- * @offset + clk.
+ * Find the next pending bucket of a level. Search from level start (@offset)
+ * + @clk upwards and if nothing there, search from start of the level
+ * (@offset) up to @offset + clk.
*/
static int next_pending_bucket(struct timer_base *base, unsigned offset,
unsigned clk)
@@ -1298,14 +1298,14 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
}
/*
- * Search the first expiring timer in the various clock levels.
+ * Search the first expiring timer in the various clock levels. Caller must
+ * hold base->lock.
*/
static unsigned long __next_timer_interrupt(struct timer_base *base)
{
unsigned long clk, next, adj;
unsigned lvl, offset = 0;
- spin_lock(&base->lock);
next = base->clk + NEXT_TIMER_MAX_DELTA;
clk = base->clk;
for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
@@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
clk >>= LVL_CLK_SHIFT;
clk += adj;
}
- spin_unlock(&base->lock);
return next;
}
@@ -1416,7 +1415,10 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (cpu_is_offline(smp_processor_id()))
return expires;
+ spin_lock(&base->lock);
nextevt = __next_timer_interrupt(base);
+ spin_unlock(&base->lock);
+
if (time_before_eq(nextevt, basej))
expires = basem;
else
@@ -1424,6 +1426,37 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
return cmp_next_hrtimer_event(basem, expires);
}
+
+static int collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+{
+ /*
+ * NOHZ optimization. After a long idle sleep we need to forward the
+ * base to current jiffies. Avoid a loop by searching the bitfield for
+ * the next expiring timer.
+ */
+ if ((long)(jiffies - base->clk) > 2) {
+ unsigned long next = __next_timer_interrupt(base);
+
+ /*
+ * If the next timer is ahead of time forward to current
+ * jiffies, otherwise forward to the next expiry time.
+ */
+ if (time_after(next, jiffies)) {
+ /* The call site will increment clock! */
+ base->clk = jiffies - 1;
+ return 0;
+ }
+ base->clk = next;
+ }
+ return __collect_expired_timers(base, heads);
+}
+#else
+static inline int collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+{
+ return __collect_expired_timers(base, heads);
+}
#endif
/*