Blame SOURCES/0019-Fix-handling-of-Machine-Check-Exceptions.patch

06486d
From 744e4c6a6cbbb9ba0569bf8e3ab50171e974b2e3 Mon Sep 17 00:00:00 2001
06486d
From: Denys Vlasenko <dvlasenk@redhat.com>
06486d
Date: Mon, 6 Jan 2014 17:18:31 +0100
06486d
Subject: [ABRT PATCH 19/27] Fix handling of Machine Check Exceptions.
06486d
06486d
Closes #764.
06486d
06486d
If non-fatal MCE is seen, abrt will detect it as an oops
06486d
and alert user in a usual manner. When user opens this
06486d
abrt problem for reporting, he will see that "comment"
06486d
field is pre-filled with a text.
06486d
What it says depends on whether mcelog tool is installed.
06486d
If mcelog is installed, the text will say that hardware errors
06486d
were detected, and will show the tail of either /var/log/mcelog
06486d
or syslog.
06486d
Otherwise the text will say that hardware errors
06486d
were detected, but they can't be usefully diagnosed,
06486d
and user is strongly advised to install mcelog tool.
06486d
06486d
If fatal MCE is encountered, kernel always panics,
06486d
(abrt has no chance of catching the oops),
06486d
kdump kicks in, and then after reboot abrt says that new vmcore
06486d
is found. When user generates backtrace, he will see oops text
06486d
which starts with
06486d
"Machine Check Exception: BANK nnn ..." and (hopefully)
06486d
is already explanatory enough.
06486d
06486d
(Yes, it's weird that kernel shows human-readable error messages
06486d
on fatal MCEs but doesn't do that for non-fatal ones.
06486d
This makes fetching MCE info significantly different...
06486d
I wish kernel would show human-readable MCEs in both cases,
06486d
we wouldn't need mcelog then... oh well.)
06486d
06486d
In order to generate meaningful hash for MCE's,
06486d
oops hashing was extended for oopses without backtraces.
06486d
06486d
Since MCEs, unlike regular oopses, don't contain kernel version,
06486d
additional magic is added to extract kernel version
06486d
in vmcore event handling.
06486d
06486d
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
06486d
06486d
Related to rhbz#1032077
06486d
06486d
Signed-off-by: Jakub Filak <jfilak@redhat.com>
06486d
---
06486d
 src/lib/kernel.c              | 31 +++++++++++++++++++++++++
06486d
 src/plugins/koops_event.conf  | 54 +++++++++++++++++++++++++++++++++++++++++++
06486d
 src/plugins/vmcore_event.conf | 18 ++++++++++++++-
06486d
 3 files changed, 102 insertions(+), 1 deletion(-)
06486d
06486d
diff --git a/src/lib/kernel.c b/src/lib/kernel.c
06486d
index ce8815b..340ec39 100644
06486d
--- a/src/lib/kernel.c
06486d
+++ b/src/lib/kernel.c
06486d
@@ -115,8 +115,29 @@ static const char *const s_koops_suspicious_strings[] = {
06486d
      * arch/x86/kernel/cpu/mcheck/p5.c:		"CPU#%d: Machine Check Exception:  0x%8X (type 0x%8X).\n",
06486d
      * arch/x86/kernel/cpu/mcheck/mce.c:	pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
06486d
      * drivers/edac/sb_edac.c:			printk("CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
06486d
+     *
06486d
+     * MCEs can be fatal (they panic kernel) or not.
06486d
+     * Fatal MCE are delivered as exception#18 to the CPU.
06486d
+     * Non-fatal ones sometimes are delivered as exception#18;
06486d
+     * other times they are silently recorded in magic MSRs, CPU is not alerted.
06486d
+     * Linux kernel periodically (up to 5 mins interval) reads those MSRs
06486d
+     * and if MCE is seen there, it is piped in binary form through
06486d
+     * /dev/mcelog to whoever listens on it. (Such as mcelog tool in --daemon
06486d
+     * mode; but cat 
06486d
+     *
06486d
+     * "Machine Check Exception:" message is printed *only*
06486d
+     * by fatal MCEs (so far, future kernels may be different).
06486d
+     * It will be caught as vmcore if kdump is configured.
06486d
+     *
06486d
+     * Non-fatal MCEs have "[Hardware Error]: Machine check events logged"
06486d
+     * message in kernel log.
06486d
+     * When /dev/mcelog is read, *no additional kernel log messages appear*:
06486d
+     * if we want more readable data, we must rely on other tools
06486d
+     * (such as mcelog daemon consuming binary /dev/mcelog and writing
06486d
+     * human-readable /var/log/mcelog).
06486d
      */
06486d
     "Machine Check Exception:",
06486d
+    "Machine check events logged",
06486d
 
06486d
     /* X86 TRAPs */
06486d
     "divide error:",
06486d
@@ -299,6 +320,16 @@ next_line:
06486d
             if (strcasestr(curline, "Call Trace:")) /* yes, it must be case-insensitive */
06486d
                 inbacktrace = 1;
06486d
             else
06486d
+            /* Fatal MCE's have a few lines of useful information between
06486d
+             * first "Machine check exception:" line and the final "Kernel panic"
06486d
+             * line. Such oops, of course, is only detectable in kdumps (tested)
06486d
+             * or possibly pstore-saved logs (I did not try this yet).
06486d
+             * In order to capture all these lines, we treat final line
06486d
+             * as "backtrace" (which is admittedly a hack):
06486d
+             */
06486d
+            if (strstr(curline, "Kernel panic - not syncing"))
06486d
+                inbacktrace = 1;
06486d
+            else
06486d
             if (strnlen(curline, 9) > 8
06486d
              && (  (curline[0] == '(' && curline[1] == '[' && curline[2] == '<')
06486d
                 || (curline[0] == '[' && curline[1] == '<'))
06486d
diff --git a/src/plugins/koops_event.conf b/src/plugins/koops_event.conf
06486d
index c0277c8..7dfbe36 100644
06486d
--- a/src/plugins/koops_event.conf
06486d
+++ b/src/plugins/koops_event.conf
06486d
@@ -4,6 +4,60 @@ EVENT=post-create analyzer=Kerneloops
06486d
         abrt-action-analyze-oops &&
06486d
         dmesg >>dmesg &&
06486d
         abrt-action-save-kernel-data
06486d
+        abrt-action-save-kernel-data || exit $?
06486d
+        #
06486d
+        # If it exists, we can save a copy of MCE log here:
06486d
+        #test -f /var/log/mcelog && cp /var/log/mcelog .
06486d
+        # but in current config, sosreport already does that.
06486d
+        #
06486d
+        # See if MCEs were seen but mcelog isn't installed or running
06486d
+        grep -qFi 'Machine check events logged' dmesg || exit 0
06486d
+        #
06486d
+        # There was an MCE. IOW: it's not a bug, it's a HW error.
06486d
+        # Did mcelog logged it to /var/log/mcelog
06486d
+        # (RHEL6 by default does this)?
06486d
+        test -f /var/log/mcelog &&
06486d
+        {
06486d
+                # (Ab)use user comment field to inform user about it.
06486d
+                echo "The kernel log indicates that hardware errors were detected."
06486d
+                echo "/var/log/mcelog file may have more information."
06486d
+                echo "The last 20 lines of /var/log/mcelog are:"
06486d
+                echo "========================================="
06486d
+                # Redirecting sterr in case selinux makes it unreadable
06486d
+                # (annoying anyway, but at least user knows what's going on):
06486d
+                tail -n20 /var/log/mcelog 2>&1
06486d
+                exit 0
06486d
+        } >comment
06486d
+        #
06486d
+        # On RHEL7, mcelog is run so that its output ends up in syslog.
06486d
+        # Do we see that?
06486d
+        grep -qFi 'mcelog: Hardware event' /var/log/messages &&
06486d
+        {
06486d
+                echo "The kernel log indicates that hardware errors were detected."
06486d
+                echo "System log may have more information."
06486d
+                echo "The last 20 mcelog lines of system log are:"
06486d
+                echo "========================================="
06486d
+                # Redirecting sterr in case selinux makes it unreadable
06486d
+                # (annoying anyway, but at least user knows what's going on):
06486d
+                grep -Fi 'mcelog:' /var/log/messages | tail -n20 2>&1
06486d
+                exit 0
06486d
+        } >comment
06486d
+        #
06486d
+        # Apparently, there is no running mcelog daemon!
06486d
+        # Let user know that he needs one.
06486d
+        {
06486d
+        echo "The kernel log indicates that hardware errors were detected."
06486d
+        echo "The data was saved by kernel for processing by the mcelog tool."
06486d
+        echo "However, neither /var/log/mcelog nor system log contain mcelog messages."
06486d
+        echo "Most likely reason is that mcelog is not installed or not configured"
06486d
+        echo "to be started during boot."
06486d
+        echo "Without this tool running, the binary data saved by kernel"
06486d
+        echo "is of limited usefulness."
06486d
+        echo "(You can save this data anyway by running 'cat </dev/mcelog >FILE')."
06486d
+        echo "The recommended course of action is to install mcelog."
06486d
+        echo "If another hardware error would occur, a user-readable description"
06486d
+        echo "of it will be saved in system log or /var/log/mcelog."
06486d
+        } >comment
06486d
 
06486d
 # If you want behavior similar to one provided by kerneloops daemon
06486d
 # distributed by kerneloops.org - that is, if you want
06486d
diff --git a/src/plugins/vmcore_event.conf b/src/plugins/vmcore_event.conf
06486d
index f8de3c5..655d842 100644
06486d
--- a/src/plugins/vmcore_event.conf
06486d
+++ b/src/plugins/vmcore_event.conf
06486d
@@ -1,6 +1,22 @@
06486d
 # analyze
06486d
 EVENT=analyze_VMcore analyzer=vmcore
06486d
-        abrt-action-analyze-vmcore &&
06486d
+        # If kdump machinery already extracted dmesg...
06486d
+        if test -f vmcore-dmesg.txt; then
06486d
+            # ...use that
06486d
+            abrt-dump-oops -o vmcore-dmesg.txt >backtrace || exit $?
06486d
+            #
06486d
+            # Does "kernel" element exist?
06486d
+            test -f kernel && exit 0
06486d
+            #
06486d
+            # Try creating it from vmcore-dmesg.txt:
06486d
+            # MCE oopses don't have kernel version in them,
06486d
+            # but it should be specified earlier in the log.
06486d
+            k=`sed -n '/Linux version/ s/.*Linux version \([^ ]*\) .*/\1/p' vmcore-dmesg.txt | tail -n1`
06486d
+            test "$k" != "" && printf "%s" "$k" >kernel
06486d
+        else
06486d
+            # No vmcore-dmesg.txt, do it the hard way:
06486d
+            abrt-action-analyze-vmcore
06486d
+        fi &&
06486d
         abrt-action-analyze-oops &&
06486d
         abrt-action-save-kernel-data
06486d
 
06486d
-- 
06486d
1.8.3.1
06486d