summaryrefslogtreecommitdiff
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorHidehiro Kawai <hidehiro.kawai.ez@hitachi.com>2015-07-27 14:55:16 +0900
committerCorey Minyard <cminyard@mvista.com>2015-09-03 15:02:29 -0500
commit06e5e345fea8df24b1d935f98741343df4cab664 (patch)
tree7ba5bd37d180c796b1518199c057af065d9450c0 /drivers/char/ipmi
parent82802f968bd3118af04eaeb3814c21d9813be527 (diff)
ipmi: Avoid touching possible corrupted lists in the panic context
When processing queued messages in the panic context, IPMI driver tries to do it without any locking to avoid deadlocks. However, this means we can touch a corrupted list if the kernel panicked while manipulating the list. Fortunately, current `add-tail and del-from-head' style implementation won't touch the corrupted part, but it is inherently risky. To get rid of the risk, this patch re-initializes the message lists on panic if the related spinlock has already been acquired. As the result, we may lose queued messages, but it's not so painful. Dropping messages on the received message list is also less problematic because no one can respond the received messages. Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com> Fixed a comment typo. Signed-off-by: Corey Minyard <cminyard@mvista.com>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6e191ff910e6..cdac5f7037e5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4511,6 +4511,23 @@ static int panic_event(struct notifier_block *this,
/* Interface is not ready. */
continue;
+ /*
+ * If we were interrupted while locking xmit_msgs_lock or
+ * waiting_rcv_msgs_lock, the corresponding list may be
+ * corrupted. In this case, drop items on the list for
+ * the safety.
+ */
+ if (!spin_trylock(&intf->xmit_msgs_lock)) {
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ } else
+ spin_unlock(&intf->xmit_msgs_lock);
+
+ if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
+ else
+ spin_unlock(&intf->waiting_rcv_msgs_lock);
+
intf->run_to_completion = 1;
intf->handlers->set_run_to_completion(intf->send_info, 1);
}