initrd.scripts: refactor start_volumes(), improve readability

master
Fabio Erculiani 12 years ago
parent 382691d467
commit f0775326eb

@ -979,126 +979,103 @@ zfs_init() {
} }
start_volumes() { start_volumes() {
#good_msg 'Checking if volumes need to be started...'
# Here, we check for /dev/device-mapper, and if it exists, we setup a # Here, we check for /dev/device-mapper, and if it exists, we setup a
# a symlink, which should hopefully fix bug #142775 and bug #147015 # a symlink, which should hopefully fix bug #142775 and bug #147015
if [ -e /dev/device-mapper ] && [ ! -e /dev/mapper/control ] if [ -e /dev/device-mapper ] && [ ! -e /dev/mapper/control ]; then
then
mkdir -p /dev/mapper mkdir -p /dev/mapper
ln -sf /dev/device-mapper /dev/mapper/control ln -sf /dev/device-mapper /dev/mapper/control
fi fi
if [ "${USE_MDADM}" = '1' ] if [ "${USE_MDADM}" = "1" ]; then
then if [ -e "/sbin/mdadm" ]; then
if [ -e '/sbin/mdadm' ]
then
/sbin/mdadm --assemble --scan /sbin/mdadm --assemble --scan
else else
bad_msg "mdadm not found: skipping mdadm raid assembly!" bad_msg "mdadm not found: skipping mdadm raid assembly!"
fi fi
fi fi
if [ "${USE_MULTIPATH_NORMAL}" = '1' ] if [ "${USE_MULTIPATH_NORMAL}" = "1" ]; then
then
good_msg "Scanning for multipath devices" good_msg "Scanning for multipath devices"
/sbin/multipath -v 0 /sbin/multipath -v 0
# TODO(lxnay): horrible sleep!
sleep 2 sleep 2
good_msg "Activating multipath devices" good_msg "Activating multipath devices"
/sbin/dmsetup ls --target multipath --exec "/sbin/kpartx -a -v" /sbin/dmsetup ls --target multipath --exec "/sbin/kpartx -a -v"
#for MULTIPATH_VOLUMES in /dev/mapper/*; do kpartx -a $MULTIPATH_VOLUMES; done
fi fi
if [ "${USE_DMRAID_NORMAL}" = '1' ] if [ "${USE_DMRAID_NORMAL}" = "1" ]; then
then if [ -e "/sbin/dmraid" ]; then
if [ -e '/sbin/dmraid' ]
then
good_msg "Activating Device-Mapper RAID(s)" good_msg "Activating Device-Mapper RAID(s)"
if [ '${DMRAID_OPTS}' = '' ] /sbin/dmraid -ay ${DMRAID_OPTS}
then else
/sbin/dmraid -ay bad_msg "/sbin/dmraid not found"
else
/sbin/dmraid -ay ${DMRAID_OPTS}
fi
fi fi
fi fi
if [ "${USE_LVM_NORMAL}" = '1' ] if [ "${USE_LVM_NORMAL}" = "1" ]; then
then if [ -e "/sbin/lvm" ]; then
if [ -e '/sbin/lvm' ] if is_mdev; then
then for dev in ${RAID_DEVICES}; do
if is_mdev
then
for dev in ${RAID_DEVICES}
do
setup_md_device "${dev}" setup_md_device "${dev}"
done done
fi fi
# This is needed for /sbin/lvm to accept the following logic # This is needed for /sbin/lvm to accept the following logic
lvm_commands="#! /sbin/lvm" local cmds="#! /sbin/lvm"
# If there is a cahe, update it. Unbreak at least dmcrypt # If there is a cahe, update it. Unbreak at least dmcrypt
[ -d /etc/lvm/cache ] && lvm_commands="${lvm_commands} \nvgscan" [ -d /etc/lvm/cache ] && cmds="${cmds} \nvgscan"
# To activate volumegroups on all devices in the cache # To activate volumegroups on all devices in the cache
lvm_commands="${lvm_commands} \nvgchange -ay --sysinit" cmds="${cmds} \nvgchange -ay --sysinit"
if is_mdev if is_mdev; then
then
# To create symlinks so users can use # To create symlinks so users can use
# real_root=/dev/vg/root # real_root=/dev/vg/root
# This needs to run after vgchange, using # This needs to run after vgchange, using
# vgchange --mknodes is too early. # vgchange --mknodes is too early.
lvm_commands="${lvm_commands} \nvgmknodes --ignorelockingfailure" cmds+="${cmds} \nvgmknodes --ignorelockingfailure"
fi fi
# And finally execute it all (/proc/... needed if lvm is compiled without readline) # And finally execute it all (/proc/... needed if lvm
# is compiled without readline)
good_msg "Scanning for and activating Volume Groups" good_msg "Scanning for and activating Volume Groups"
printf "%b\n" "${lvm_commands}" | /sbin/lvm /proc/self/fd/0 printf "%b\n" "${cmds}" | /sbin/lvm /proc/self/fd/0
else else
bad_msg "vgscan or vgchange not found: skipping LVM volume group activation!" bad_msg "/sbin/lvm not found: skipping LVM activation"
fi fi
fi fi
if [ "${USE_ZFS}" = '1' ] if [ "${USE_ZFS}" = "1" ]; then
then
# Avoid race involving asynchronous module loading # Avoid race involving asynchronous module loading
if call_func_timeout wait_for_zfs 5 if call_func_timeout wait_for_zfs 5; then
then
bad_msg "Cannot import ZFS pool because /dev/zfs is missing" bad_msg "Cannot import ZFS pool because /dev/zfs is missing"
elif [ -z "${ZFS_POOL}" ]
then elif [ -z "${ZFS_POOL}" ]; then
good_msg "Importing ZFS pools" good_msg "Importing ZFS pools"
/sbin/zpool import -N -a ${ZPOOL_FORCE} /sbin/zpool import -N -a ${ZPOOL_FORCE}
if [ "${?}" = "0" ]; then
if [ "$?" = '0' ]
then
good_msg "Importing ZFS pools succeeded" good_msg "Importing ZFS pools succeeded"
else else
bad_msg "Imported ZFS pools failed" bad_msg "Imported ZFS pools failed"
fi fi
else
if [ "$(zpool list -H -o name ${ZFS_POOL} 2>&1)" = "$ZFS_POOL" ] else
then local pools=$(zpool list -H -o name ${ZFS_POOL} 2>&1)
if [ "${pools}" = "${ZFS_POOL}" ]; then
good_msg "ZFS pool ${ZFS_POOL} already imported." good_msg "ZFS pool ${ZFS_POOL} already imported."
if [ -n "${CRYPT_ROOT}" -o -n "${CRYPT_SWAP}" ] if [ -n "${CRYPT_ROOT}" ] || [ -n "${CRYPT_SWAP}" ]; then
then
good_msg "LUKS detected. Reimporting ${ZFS_POOL}" good_msg "LUKS detected. Reimporting ${ZFS_POOL}"
/sbin/zpool export -f "${ZFS_POOL}" /sbin/zpool export -f "${ZFS_POOL}"
/sbin/zpool import -N ${ZPOOL_FORCE} "${ZFS_POOL}" /sbin/zpool import -N ${ZPOOL_FORCE} "${ZFS_POOL}"
fi fi
else else
good_msg "Importing ZFS pool ${ZFS_POOL}" good_msg "Importing ZFS pool ${ZFS_POOL}"
/sbin/zpool import -N ${ZPOOL_FORCE} "${ZFS_POOL}" /sbin/zpool import -N ${ZPOOL_FORCE} "${ZFS_POOL}"
if [ "$?" = '0' ] if [ "${?}" = "0" ]; then
then
good_msg "Import of ${ZFS_POOL} succeeded" good_msg "Import of ${ZFS_POOL} succeeded"
else else
bad_msg "Import of ${ZFS_POOL} failed" bad_msg "Import of ${ZFS_POOL} failed"
@ -1121,8 +1098,7 @@ start_iscsi() {
iscsistart -b iscsistart -b
fi fi
if [ -n "${ISCSI_INITIATORNAME}" ] && [ -n "${ISCSI_TARGET}" ] && [ -n "${ISCSI_ADDRESS}" ] if [ -n "${ISCSI_INITIATORNAME}" ] && [ -n "${ISCSI_TARGET}" ] && [ -n "${ISCSI_ADDRESS}" ]; then
then
good_msg "Activating iSCSI via cmdline" good_msg "Activating iSCSI via cmdline"
if [ "${ISCSI_TGPT}" ] if [ "${ISCSI_TGPT}" ]

Loading…
Cancel
Save