I just made a permanent clean fix for this, at least for MD (software RAID). It can easily be modified to fix for LVM too. Edit /etc/grub.d/00_header and change the recordfail section to this:
if [ "$quick_boot" = 1 ]; then
cat <<EOF
function recordfail {
set recordfail=1
EOF
FS="$(grub-probe --target=fs "${grubdir}")"
GRUBMDDEVICE="$(grub-probe --target=disk "${grubdir}" | grep \/dev\/md)"
if [ $? -eq 0 ] ; then
cat <<EOF
# GRUB lacks write support for $GRUBMDDEVICE, so recordfail support is disabled.
EOF
else
case "$FS" in
btrfs | cpiofs | newc | odc | romfs | squash4 | tarfs | zfs)
cat <<EOF
# GRUB lacks write support for $FS, so recordfail support is disabled.
EOF
;;
*)
cat <<EOF
if [ -n "\${have_grubenv}" ]; then if [ -z "\${boot_once}" ]; then save_env recordfail; fi; fi
EOF
esac
fi
cat <<EOF
}
EOF
fi
I just made a permanent clean fix for this, at least for MD (software RAID). It can easily be modified to fix for LVM too. Edit /etc/grub. d/00_header and change the recordfail section to this:
if [ "$quick_boot" = 1 ]; then "$(grub- probe --target=fs "${grubdir}")" E="$(grub- probe --target=disk "${grubdir}" | grep \/dev\/md)"
cat <<EOF
function recordfail {
set recordfail=1
EOF
FS=
GRUBMDDEVIC
if [ $? -eq 0 ] ; then
cat <<EOF
# GRUB lacks write support for $GRUBMDDEVICE, so recordfail support is disabled.
EOF
else
case "$FS" in
btrfs | cpiofs | newc | odc | romfs | squash4 | tarfs | zfs)
cat <<EOF
# GRUB lacks write support for $FS, so recordfail support is disabled.
EOF
;;
*)
cat <<EOF
if [ -n "\${have_grubenv}" ]; then if [ -z "\${boot_once}" ]; then save_env recordfail; fi; fi
EOF
esac
fi
cat <<EOF
}
EOF
fi