summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <mleitner@redhat.com>2014-10-13 14:03:30 -0300
committerZefan Li <lizefan@huawei.com>2014-12-01 18:02:44 +0800
commit50f1b3d5a47088a67176ae72ffefda3e25873cf8 (patch)
tree3bf878427cabd4934ce51a68e40382ec935a6c29
parentb54ca6089908121df8ea66545520c08048db7d80 (diff)
ipv4: disable bh while doing route gc
Further tests revealed that after moving the garbage collector to a work queue and protecting it with a spinlock may leave the system prone to soft lockups if bottom half gets very busy. It was reproced with a set of firewall rules that REJECTed packets. If the NIC bottom half handler ends up running on the same CPU that is running the garbage collector on a very large cache, the garbage collector will not be able to do its job due to the amount of work needed for handling the REJECTs and also won't reschedule. The fix is to disable bottom half during the garbage collecting, as it already was in the first place (most calls to it came from softirqs). Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Zefan Li <lizefan@huawei.com>
-rw-r--r--net/ipv4/route.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 9e7909eef8d1..6c34bc98bce7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -998,7 +998,7 @@ static void __do_rt_garbage_collect(int elasticity, int min_interval)
* do not make it too frequently.
*/
- spin_lock(&rt_gc_lock);
+ spin_lock_bh(&rt_gc_lock);
RT_CACHE_STAT_INC(gc_total);
@@ -1101,7 +1101,7 @@ work_done:
dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
expire = ip_rt_gc_timeout;
out:
- spin_unlock(&rt_gc_lock);
+ spin_unlock_bh(&rt_gc_lock);
}
static void __rt_garbage_collect(struct work_struct *w)