summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/net_namespace.c24
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/openvswitch/vport.c4
-rw-r--r--net/tipc/core.c2
13 files changed, 55 insertions, 28 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index cb5290b8c428..5221f975a4cc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -349,7 +349,7 @@ static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
- struct net *net, *tmp;
+ struct net *net, *tmp, *peer;
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
@@ -365,14 +365,6 @@ static void cleanup_net(struct work_struct *work)
list_for_each_entry(net, &net_kill_list, cleanup_list) {
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
- for_each_net(tmp) {
- int id = __peernet2id(tmp, net, false);
-
- if (id >= 0)
- idr_remove(&tmp->netns_ids, id);
- }
- idr_destroy(&net->netns_ids);
-
}
rtnl_unlock();
@@ -398,12 +390,26 @@ static void cleanup_net(struct work_struct *work)
*/
rcu_barrier();
+ rtnl_lock();
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
+ /* Unreference net from all peers (no need to loop over
+ * net_exit_list because idr_destroy() will be called for each
+ * element of this list.
+ */
+ for_each_net(peer) {
+ int id = __peernet2id(peer, net, false);
+
+ if (id >= 0)
+ idr_remove(&peer->netns_ids, id);
+ }
+ idr_destroy(&net->netns_ids);
+
list_del_init(&net->exit_list);
put_user_ns(net->user_ns);
net_drop_ns(net);
}
+ rtnl_unlock();
}
static DECLARE_WORK(net_cleanup_work, cleanup_net);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ee0608bb3bc0..7ebed55b5f7d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1932,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
struct ifinfomsg *ifm,
struct nlattr **tb)
{
- struct net_device *dev;
+ struct net_device *dev, *aux;
int err;
- for_each_netdev(net, dev) {
+ for_each_netdev_safe(net, dev, aux) {
if (dev->group == group) {
err = do_setlink(skb, dev, ifm, tb, NULL, 0);
if (err < 0)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d78427652d2..92825443fad6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net)
return 0;
err2:
- kfree(mrt);
+ ipmr_free_table(mrt);
err1:
fib_rules_unregister(ops);
return err;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a2dfed4783b..f1756ee02207 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 34b682617f50..52028f449a89 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
return 0;
err2:
- kfree(mrt);
+ ip6mr_free_table(mrt);
err1:
fib_rules_unregister(ops);
return err;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 471ed24aabae..14ecdaf06bf7 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ /* Only set hop_limit on the interface if it is higher than
+ * the current hop_limit.
+ */
+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ } else {
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ }
if (rt)
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5d46832c6f72..1f5e62229aaa 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
TCP_SKB_CB(skb)->sacked = 0;
}
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+ /* We need to move header back to the beginning if xfrm6_policy_check()
+ * and tcp_v6_fill_cb() are going to be called again.
+ */
+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+ sizeof(struct inet6_skb_parm));
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1543,6 +1552,7 @@ do_time_wait:
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
sk = sk2;
+ tcp_v6_restore_cb(skb);
goto process;
}
/* Fall through to ACK */
@@ -1551,6 +1561,7 @@ do_time_wait:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
+ tcp_v6_restore_cb(skb);
goto no_tcp_socket;
case TCP_TW_SUCCESS:
;
@@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 2e9953b2db84..53d931172088 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
noblock, &err);
else
skb = sock_alloc_send_skb(sk, len, noblock, &err);
- if (!skb) {
- err = -ENOMEM;
+ if (!skb)
goto out;
- }
if (iucv->transport == AF_IUCV_TRANS_HIPER)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a48bad468880..7702978a4c99 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
- del_timer_sync(&tid_rx->reorder_timer);
-
for (i = 0; i < tid_rx->buf_size; i++)
__skb_queue_purge(&tid_rx->reorder_buf[i]);
kfree(tid_rx->reorder_buf);
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
del_timer_sync(&tid_rx->session_timer);
+ /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
+ spin_lock_bh(&tid_rx->reorder_lock);
+ tid_rx->removed = true;
+ spin_unlock_bh(&tid_rx->reorder_lock);
+ del_timer_sync(&tid_rx->reorder_timer);
+
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 944bdc04e913..1eb730bf8752 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
set_release_timer:
- mod_timer(&tid_agg_rx->reorder_timer,
- tid_agg_rx->reorder_time[j] + 1 +
- HT_RX_REORDER_BUF_TIMEOUT);
+ if (!tid_agg_rx->removed)
+ mod_timer(&tid_agg_rx->reorder_timer,
+ tid_agg_rx->reorder_time[j] + 1 +
+ HT_RX_REORDER_BUF_TIMEOUT);
} else {
del_timer(&tid_agg_rx->reorder_timer);
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 925e68fe64c7..fb0fc1302a58 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -175,6 +175,7 @@ struct tid_ampdu_tx {
* @reorder_lock: serializes access to reorder buffer, see below.
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
* and ssn.
+ * @removed: this session is removed (but might have been found due to RCU)
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_rx {
u16 timeout;
u8 dialog_token;
bool auto_seq;
+ bool removed;
};
/**
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ec2954ffc690..067a3fff1d2c 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
ASSERT_OVSL();
hlist_del_rcu(&vport->hash_node);
-
- vport->ops->destroy(vport);
-
module_put(vport->ops->owner);
+ vport->ops->destroy(vport);
}
/**
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 935205e6bcfe..be1c9fa60b09 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -152,11 +152,11 @@ out_netlink:
static void __exit tipc_exit(void)
{
tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_net_ops);
tipc_netlink_stop();
tipc_netlink_compat_stop();
tipc_socket_stop();
tipc_unregister_sysctl();
- unregister_pernet_subsys(&tipc_net_ops);
pr_info("Deactivated\n");
}