# default is set in /etc/sysconfig/system
START_UDEV=no
;;
+ nomdadm)
+ # default is set in /etc/sysconfig/system
+ MDADM=no
+ ;;
+ nomultipath)
+ # default is set in /etc/sysconfig/system
+ DM_MULTIPATH=no
+ ;;
nousb)
nousb=1
;;
done
}
+# launch repair shell
+# which after exiting will reboot
+repair_shell() {
+ local reason="$2"
+
+ # don't use '\n' in nls macro !
+ echo
+ echo
+ echo "$reason"
+ nls "*** Dropping you to a shell; the system will reboot"
+ nls "*** when you leave the shell."
+ echo
+
+ [ "$SELINUX" = "1" ] && disable_selinux
+ if ! is_no "$RUN_SULOGIN_ON_ERR"; then
+ /sbin/sulogin
+ else
+ /bin/sh
+ fi
+
+ run_cmd "Unmounting file systems" umount -a
+ run_cmd "Remounting root filesystem in ro mode" mount -n -o remount,ro /
+ run_cmd "Automatic reboot in progress" reboot
+}
+
check_root_fs() {
show "Checking root filesystem"; started
initlog -c "fsck -C -T -a $fsckoptions /"
# A return of 4 or higher means there were serious problems.
if [ $rc -gt 3 ]; then
[ -e /proc/splash ] && echo "verbose" > /proc/splash
- # don't use '\n' in nls macro !
- echo
- echo
- nls "*** An error occurred during the file system check."
- nls "*** Dropping you to a shell; the system will reboot"
- nls "*** when you leave the shell."
- echo
PS1="$(nls '(Repair filesystem)# ')"; export PS1
- [ "$SELINUX" = "1" ] && disable_selinux
- if ! is_no "$RUN_SULOGIN_ON_ERR"; then
- /sbin/sulogin
- else
- /bin/sh
- fi
+ repair_shell "$(nls '*** An error occurred during the file system check.')"
- run_cmd "Unmounting file systems" umount -a
- mount -n -o remount,ro /
- run_cmd "Automatic reboot in progress" reboot
# A return of 2 or 3 means that filesystem was repaired but we need
# to reboot.
elif [ "$rc" = "2" -o "$rc" = "3" ]; then
fi
}
+# Add raid devices
+init_mdadm() {
+ if [ ! -x /sbin/mdadm -o ! -f /etc/mdadm.conf ]; then
+ return
+ fi
+
+ modprobe -s md
+ local rc=0
+ if [ -f /proc/mdstat ]; then
+ golvm=0
+
+ if grep -qE "^([[:blank:]]|)ARRAY[[:blank:]]" /etc/mdadm.conf; then
+ show "Starting up RAID devices"; busy
+ /sbin/mdadm --assemble --scan --auto=yes
+ rc=$?
+ if [ "$rc" -eq 0 -o "$rc" -eq 2 ]; then
+ # rc is used later, too so set sane value
+ rc=0
+ deltext; ok
+ golvm=1
+ else
+ deltext; fail
+ fi
+
+ fi
+
+ # A non-zero return means there were problems
+ if [ $rc -gt 0 ]; then
+ [ -e /proc/splash ] && echo "verbose" > /proc/splash
+ show "Starting up RAID devices"; fail
+
+ PS1="$(nls '(RAID Repair)# ')"; export PS1
+ repair_shell "$(nls '*** An error occurred during the RAID startup.')"
+ fi
+
+ # LVM on RAID (keep in sync with LVM init)
+ if [ "$golvm" -eq "1" ]; then
+ if [ -x /sbin/vgscan -a -x /sbin/vgchange ]; then
+ run_cmd "Scanning for LVM volume groups (on RAID)" /sbin/vgscan $lvmignorelocking
+ run_cmd "Activating LVM volume groups (on RAID)" /sbin/vgchange -a y $lvmsysinit
+ [ "$lvmversion" = "2" ] && /sbin/vgmknodes
+ fi
+ fi
+ show "Starting up RAID devices"; ok
+ fi
+ return $rc
+}
+
+# Init LVM
+init_lvm() {
+ if [ ! -x /sbin/vgscan -o ! -x /sbin/vgchange ] && ! is_yes "$EVMS_LVM"; then
+ return
+ fi
+
+ if is_no "$LVM2"; then
+ lvmversion=$(LC_ALL=C /sbin/vgchange --version 2>/dev/null | awk '/LVM version:/{if ($3 >= 2) print "2"}')
+ else
+ lvmversion=2
+ fi
+
+ if [ "$lvmversion" = "1" ] ; then
+ modprobe -s lvm-mod
+ lvmignorelocking=""
+ lvmsysinit=""
+ elif [ "$lvmversion" = "2" ] ; then
+ modprobe -s dm-mod
+ lvmignorelocking="--ignorelockingfailure"
+ lvmsysinit="--sysinit"
+ else
+ modprobe -s lvm-mod
+ # device mapper (2.5+ and patched 2.4)
+ modprobe -s dm-mod
+ lvmignorelocking=""
+ lvmsysinit=""
+ fi
+
+ run_cmd "Scanning for LVM volume groups" /sbin/vgscan $lvmignorelocking
+ run_cmd "Activating LVM volume groups" /sbin/vgchange -a y $lvmsysinit
+ if [ "$lvmversion" = "2" ]; then
+ /sbin/vgmknodes $lvmignorelocking
+ # display VG statistics
+ /sbin/vgdisplay -s $lvmignorelocking
+ fi
+}
+
# boot logging to /var/log/boot.log. install showconsole package to get it.
if ! is_no "$RC_BOOTLOG" && [ -x /sbin/blogd ]; then
RC_BOOTLOG=1
is_fsmounted devtmpfs /dev || mount -n -t devtmpfs devtmpfs /dev
load_kernel_modules modules.preudev
/sbin/start_udev
- [ -x /sbin/initctl ] && /sbin/initctl -q start udev
+ use_upstart && [ -x /sbin/initctl ] && /sbin/initctl -q start udev
elif [ -x /lib/firmware/firmware-loader.sh ]; then
/sbin/sysctl -e -w kernel.hotplug=/lib/firmware/firmware-loader.sh > /dev/null 2>&1
fi
done
fi
- if [ -x /sbin/multipath ] && ! is_no "$DM_MULTIPATH"; then
+ if ! is_no "$DM_MULTIPATH" [ -x /sbin/multipath ]; then
# first make nodes that were discarded due (possible) new /dev mount
modprobe -s dm-mod
/sbin/dmsetup mknodes
/sbin/dmsetup ls --target multipath --exec '/sbin/kpartx -a -p p'
fi
- if [ -x /sbin/dmraid ]; then
+ if ! is_no "$DMRAID" && [ -x /sbin/dmraid ]; then
run_cmd "Activating ATARAID devices" /sbin/dmraid -ay
fi
# Find and activate volume groups:
# EVMS
- if [ -x /sbin/evms_activate ]; then
+ if is_yes "$EVMS_LVM" && [ -x /sbin/evms_activate ]; then
if [ "$(kernelverser)" -lt "002006" ]; then
# Linux 2.4 core modules
modprobe -s evms
fi
fi
- # LVM (keep in sync with LVM starting after RAID run!)
- if ! is_no "$LVM2" && [ -x /sbin/vgscan -a -x /sbin/vgchange ] || is_yes "$EVMS_LVM"; then
- if is_no "$LVM2"; then
- lvmversion=$(LC_ALL=C /sbin/vgchange --version 2>/dev/null | awk '/LVM version:/{if ($3 >= 2) print "2"}')
- else
- lvmversion=2
- fi
- if [ "$lvmversion" = "1" ] ; then
- modprobe -s lvm-mod
- lvmignorelocking=""
- lvmsysinit=""
- elif [ "$lvmversion" = "2" ] ; then
- modprobe -s dm-mod
- lvmignorelocking="--ignorelockingfailure"
- lvmsysinit="--sysinit"
- else
- modprobe -s lvm-mod
- # device mapper (2.5+ and patched 2.4)
- modprobe -s dm-mod
- lvmignorelocking=""
- lvmsysinit=""
- fi
-
- run_cmd "Scanning for LVM volume groups" /sbin/vgscan $lvmignorelocking
- run_cmd "Activating LVM volume groups" /sbin/vgchange -a y $lvmsysinit
- if [ "$lvmversion" = "2" ]; then
- /sbin/vgmknodes $lvmignorelocking
- # display VG statistics
- /sbin/vgdisplay -s $lvmignorelocking
- fi
+ # Init LVM
+ if ! is_no "$LVM2"; then
+ init_lvm
fi
if [ "$delay_cryptsetup" != 0 ]; then
[ $delay_cryptsetup = 0 ] && ok || fail
fi
- # Add raid devices
- if [ -x /sbin/mdadm -a -f /etc/mdadm.conf ] || [ -f /etc/raidtab ]; then
- modprobe -s md
- if [ -f /proc/mdstat ]; then
- goraidtab=1
- golvm=0
- rc=0
- if [ -x /sbin/mdadm -a -f /etc/mdadm.conf ]; then
- if grep -qE "^([[:blank:]]|)ARRAY[[:blank:]]" /etc/mdadm.conf 2>/dev/null; then
- show "Starting up RAID devices"; busy
- /sbin/mdadm --assemble --scan --auto=yes
- rc=$?
- if [ "$rc" -eq 0 -o "$rc" -eq 2 ]; then
- # rc is used later, too so set sane value
- rc=0
- deltext; ok
- goraidtab=0
- golvm=1
- else
- deltext; fail
- fi
-
- fi
- fi
-
- if [ -f /etc/raidtab -a "$goraidtab" -eq 1 ]; then
- for i in $(awk '!/^#/ && /raiddev/{print $2}' /etc/raidtab 2>/dev/null); do
- golvm=1
- RAIDDEV=$(basename $i)
- RAIDSTAT=$(grep "^$RAIDDEV : active" /proc/mdstat 2>/dev/null)
- show "Starting up RAID device %s" $RAIDDEV
- busy
- if [ -z "$RAIDSTAT" ]; then
- # Try raidstart first...if that fails then
- # fall back to raid0run and if that fails too
- # fall back to raidadd, raidrun.
- RESULT=1
- if [ -x /sbin/raidstart ]; then
- /sbin/raidstart $i
- RESULT=$?
- fi
- if [ $RESULT -gt 0 -a -x /sbin/raid0run ]; then
- /sbin/raid0run $i
- RESULT=$?
- fi
- if [ $RESULT -gt 0 -a -x /sbin/raidadd -a -x /sbin/raidrun ]; then
- /sbin/raidadd $i
- /sbin/raidrun $i
- RESULT=$?
- fi
- if [ $RESULT -gt 0 ]; then
- rc=1
- fail
- else
- ok
- fi
- else
- ok
- fi
- done
- fi
-
- # A non-zero return means there were problems
- if [ $rc -gt 0 ]; then
- [ -e /proc/splash ] && echo "verbose" > /proc/splash
- show "Starting up RAID devices"; fail
- echo
- echo
- nls "*** An error occurred during the RAID startup."
- nls "*** Dropping you to a shell; the system will reboot"
- nls "*** when you leave the shell."
- echo
-
- PS1="$(nls '(RAID Repair)# ')"; export PS1
- [ "$SELINUX" = "1" ] && disable_selinux
- if ! is_no "$RUN_SULOGIN_ON_ERR"; then
- /sbin/sulogin
- else
- /bin/sh
- fi
-
- run_cmd "Unmounting file systems" umount -a
- run_cmd "Remounting root filesystem in ro mode" mount -n -o remount,ro /
- run_cmd "Automatic reboot in progress" reboot
- fi
- # LVM on RAID (keep in sync with LVM setting few lines above)
- if [ "$golvm" -eq "1" ]; then
- if [ -x /sbin/vgscan -a -x /sbin/vgchange ]; then
- run_cmd "Scanning for LVM volume groups (on RAID)" /sbin/vgscan $lvmignorelocking
- run_cmd "Activating LVM volume groups (on RAID)" /sbin/vgchange -a y $lvmsysinit
- [ "$lvmversion" = "2" ] && /sbin/vgmknodes
- fi
- fi
- show "Starting up RAID devices"; ok
- fi
+ if ! is_no "$MDADM"; then
+ init_mdadm
fi
_RUN_QUOTACHECK=0
# A return of 2 or higher means there were serious problems
if [ $rc -gt 1 ]; then
[ -e /proc/splash ] && echo "verbose" > /proc/splash
- echo
- echo
- nls "*** An error occurred during the file system check."
- nls "*** Dropping you to a shell; the system will reboot"
- nls "*** when you leave the shell."
- echo
PS1="$(nls '(Repair filesystem)# ')"; export PS1
- [ "$SELINUX" = "1" ] && disable_selinux
- if ! is_no "$RUN_SULOGIN_ON_ERR"; then
- /sbin/sulogin
- else
- /bin/sh
- fi
+ repair_shell "$(nls '*** An error occurred during the file system check.')"
- run_cmd "Unmounting file systems" umount -a
- run_cmd "Remounting root filesystem in ro mode" mount -n -o remount,ro /
- run_cmd "Automatic reboot in progress" reboot
elif [ "$rc" = "1" -a -x /sbin/quotacheck ]; then
_RUN_QUOTACHECK=1
fi
# Mount all other filesystems (except for NFS and /proc, which is already
# mounted). Contrary to standard usage,
# filesystems are NOT unmounted in single user mode.
-
run_cmd "Mounting local filesystems" mount -a -t nonfs,nfs4,smbfs,ncpfs,proc,cifs -O no_netdev
# now we have /usr mounted, recheck if we have gettext and tput available.
# A return of 2 or higher means there were serious problems.
if [ $rc -gt 1 ]; then
[ -e /proc/splash ] && echo "verbose" > /proc/splash
- echo
- echo
- nls "*** An error occurred during the file system check."
- nls "*** Dropping you to a shell; the system will reboot"
- nls "*** when you leave the shell."
- echo
PS1="$(nls '(Repair filesystem)# ')"; export PS1
- [ "$SELINUX" = "1" ] && disable_selinux
- if ! is_no "$RUN_SULOGIN_ON_ERR"; then
- /sbin/sulogin
- else
- /bin/sh
- fi
+ repair_shell "$(nls '*** An error occurred during the file system check.')"
- run_cmd "Unmounting file systems" umount -a
- run_cmd "Remounting root filesystem in ro mode" mount -n -o remount,ro /
- run_cmd "Automatic reboot in progress" reboot
elif [ "$rc" = "1" -a -x /sbin/quotacheck ]; then
_RUN_QUOTACHECK=1
fi
. /etc/rc.d/rc.serial
fi
- if [ -f /proc/sys/kernel/panic -a -n "$PANIC_REBOOT_TIME" -a "$PANIC_REBOOT_TIME" -gt "0" ]; then
+ if [ -n "$PANIC_REBOOT_TIME" -a "$PANIC_REBOOT_TIME" -gt "0" -a -f /proc/sys/kernel/panic ]; then
show 'Setting %s seconds for kernel reboot after panic' "$PANIC_REBOOT_TIME"; busy
# NOTE: you should use /etc/sysctl.conf instead
if sysctl -w kernel.panic=$PANIC_REBOOT_TIME >/dev/null 2>&1; then ok; else fail; fi