"Live RAID" Setup for Ubuntu 12

Who?

iWeb has some good deals on dedicated servers. For 2-3x what a hosted VM costs, you can get a machine all your own, no shary-shary with anybody. (Check out iWeb's warehouse clearance section.) What's especially nice is you can have the CPU virtualization features turned on and run your own little VM farm. Setting up a Virtualbox farm will be the subject of a future article.

What?

A scary procedure whereby a running Linux with 2 independent disks is converted to a software RAID-1 based system without reinstalling the OS and (fingers x'd) preserving all data.

Why?

iWeb's bare metal jobs generally come with 2 identical disks, but without RAID. For whatever reason, hardware RAID based systems are way more expensive. Well, in the Ubuntu 10 era, iWeb sold servers with software RAID on the cheap...that's why I switched to them. For Ubuntu 12 they stopped. I had a 10 server with software RAID-1 on iWeb and really wanted to move it to a 12 with more RAM, but no more. They still had the 2 disk server I wanted, just no RAID.

When?

Only when you've vetted you're particilar setup with a trial run on a VM. Note that if my config had failed on iWeb, they would have charged me for a wipe/reinstall. Caveat emptor...

Woa-there-pardner, is this safe?

Well, yes and no. First, big disclaimer: DO NOT TRY THIS ON A SYSTEM WITH UN-BACKED UP DATA, A RUNNING PRODUCTION SYSTEM, OR ANY SYSTEM THAT YOU AREN'T WILLING TO WIPE IF YOU FAIL. Second, BRING UP A VM WITH THE DISKS AND PARTITIONING YOU'RE WORKING WITH AND EXPERIMENT THERE, MAKE SURE YOU'VE GOT IT DOWN TO A "T". Rolling back to snapshot is so much easier!

Once again...

...don't try this without a net!

So, without further warning, here it is, in the form of a lightly commented shell script:

# inspired by:
# http://www.howtoforge.com/how-to-set-up-software-raid1-on-a-running-system-incl-grub2-configuration-ubuntu-10.04
# http://unix.stackexchange.com/questions/5297/debian-grub2-moving-root-partition-to-new-drive

#
# !!! IMPORTANT, DANGER, WARNING !!!
#
# 1. Don't do this on an un backed up system
# 2. Don't do this on a system you aren't willing to wipe and start over on
# 3. Assumes a start disk partition below, if you don't have this, the script below 
#    will fail and possibly leave your system unbootable
# 4. You have been warned!
#
# Filesystem     1K-blocks    Used Available Use% Mounted on
# /dev/sda7      476591956 1046424 451335984   1% /
# udev             8188212       4   8188208   1% /dev
# tmpfs            3278808     384   3278424   1% /run
# none                5120       0      5120   0% /run/lock
# none             8197012       0   8197012   0% /run/shm
# /dev/sdb1      480720616  202664 456098676   1% /disk2
# /dev/sda1         186663   29975    147051  17% /boot
# /dev/sda6        1968588   35720   1832868   2% /tmp

apt-get install --yes mdadm
# accept default values for postfix setup

modprobe linear
modprobe multipath
modprobe raid0
modprobe raid1
modprobe raid5
modprobe raid6
modprobe raid10

cat /proc/mdstat
# and make sure you have:
# Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
# unused devices: <none>

umount /dev/sdb1
sfdisk -d /dev/sda | sfdisk --force /dev/sdb

fdisk /dev/sdb <<EOF
t
1
fd
t
5
fd
t
6
fd
t
7
fd
w
EOF

# precautionary
mdadm --zero-superblock /dev/sdb1
mdadm --zero-superblock /dev/sdb5
mdadm --zero-superblock /dev/sdb6
mdadm --zero-superblock /dev/sdb7
# following is expected unless this partition already has a fs
# mdadm: Unrecognised md component device - /dev/sdb7

mdadm --create /dev/md1 --level=1 --raid-disks=2 missing /dev/sdb1 <<<"y"
mdadm --create /dev/md5 --level=1 --raid-disks=2 missing /dev/sdb5 <<<"y"
mdadm --create /dev/md6 --level=1 --raid-disks=2 missing /dev/sdb6 <<<"y"
mdadm --create /dev/md7 --level=1 --raid-disks=2 missing /dev/sdb7 <<<"y"

cat /proc/mdstat
# should yield:
#Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
#md7 : active raid1 sdb7[1]
#81170304 blocks super 1.2 [2/1] [_U]
#
#md6 : active raid1 sdb6[1]
#1950656 blocks super 1.2 [2/1] [_U]
#
#md5 : active raid1 sdb5[1]
#498368 blocks super 1.2 [2/1] [_U]
#
#md1 : active raid1 sdb1[1]
#194368 blocks super 1.2 [2/1] [_U]
#
#unused devices: <none>

mkfs.ext4 /dev/md1
mkswap /dev/md5
mkfs.ext4 /dev/md6
mkfs.ext4 /dev/md7
cp /etc/mdadm/mdadm.conf /etc/mdadm/mdadm.conf.orig
mdadm --examine --scan >> /etc/mdadm/mdadm.conf
tail /etc/mdadm/mdadm.conf
# should see:
# MAILADDR root

# # definitions of existing MD arrays

# # This file was auto-generated on Sun, 03 Feb 2013 11:57:14 -0500
# # by mkconf $Id$
# ARRAY /dev/md/1 metadata=1.2 UUID=a924d704:9fa6d8c0:99017e2c:88d17a7f name=cl-t066-446cl:1
# ARRAY /dev/md/5 metadata=1.2 UUID=793b63d7:59040307:4ae1b663:5ea98ae7 name=cl-t066-446cl:5
# ARRAY /dev/md/6 metadata=1.2 UUID=dd1fce40:09b7e35d:b3508c29:ec998795 name=cl-t066-446cl:6
# ARRAY /dev/md/7 metadata=1.2 UUID=47178134:35fbd6d3:dc179dba:d87e4042 name=cl-t066-446cl:7

# copy sda partitions to md
mkdir /mnt/md1 && mount /dev/md1 /mnt/md1 && cd /boot && cp -dpRx . /mnt/md1
mkdir /mnt/md6 && mount /dev/md6 /mnt/md6 && cd /tmp  && cp -dpRx . /mnt/md6
mkdir /mnt/md7 && mount /dev/md7 /mnt/md7 && cd /     && cp -dpRx . /mnt/md7

# chroot to new raid filesystem for update-grub
mount -o bind /dev /mnt/md7/dev
mount -t proc none  /mnt/md7/proc
mount -t sysfs none /mnt/md7/sys
chroot /mnt/md7
mount /dev/md1 /boot
mount /dev/md6 /tmp
dpkg-reconfigure mdadm # IMPORTANT: choose to "yes" to reboot degraded
update-grub
update-initramfs -u
# you may see the following, it's ok:
# mdadm: cannot open /dev/md/1: No such file or directory
# mdadm: cannot open /dev/md/5: No such file or directory
# mdadm: cannot open /dev/md/6: No such file or directory
# mdadm: cannot open /dev/md/7: No such file or directory
grub-install /dev/sda
grub-install /dev/sdb

# update /etc/fstab defpends on whether you mount via volume UUIDs or /dev entries
cp /etc/fstab /etc/fstab.orig

# option 1: uuid's
# update fstab with new uuids
blkid | grep md
vi /etc/fstab
# copy/paste the mount lines, comment the originals, and add new uuid to copies, e.g.,
# # / was on /dev/sda7 during installation
# UUID=f22531c1-8c4c-49f2-b02a-6406a0954cac /               ext4    errors=remount-ro 0       1
# # /boot was on /dev/sda1 during installation
# UUID=4957f741-d8e6-4ff8-99fc-91a3055f5f88 /boot           ext4    defaults        0       2
# # /disk2 was on /dev/sdb1 during installation
# #UUID=fba25463-ed65-417f-9920-b0bd7457a197 /disk2          ext4    defaults        0       2
# # /tmp was on /dev/sda6 during installation
# UUID=b3bea78a-acb3-4486-8bcd-7ef6bec3f629 /tmp            ext4    defaults        0       2
# # swap was on /dev/sda5 during installation
# UUID=3ac3d2df-932c-4185-8434-5c833074ab21 none            swap    sw              0       0

# option2: /dev entries
# change /dev/sda# entries to /dev/md# entries, and comment out /disk2, e.g.,
# # <file system> <mount point>   <type>  <options>       <dump>  <pass>
# proc            /proc           proc    nodev,noexec,nosuid 0       0
# /dev/md7       /               ext4    errors=remount-ro 0       1
# /dev/md1       /boot           ext4    defaults        0       2
# /dev/md6       /tmp            ext4    defaults        0       2
# /dev/md5       none            swap    sw              0       0
# #/dev/sdb1       /disk2          ext4    defaults       1       2
vi /etc/fstab

# reboot onto (degraded) raid array, and cross fingers!
reboot

# see if we have md happiness...
df # should yield:
# Filesystem     1K-blocks    Used Available Use% Mounted on
# /dev/md7       476462752 1048212 451211552   1% /
# udev             8188008       4   8188004   1% /dev
# tmpfs            3278808     428   3278380   1% /run
# none                5120       8      5112   1% /run/lock
# none             8197012       0   8197012   0% /run/shm
# /dev/md6         1967504   35720   1831836   2% /tmp
# /dev/md1          186491   30385    146478  18% /boot

# Finally, change partition types in sda and add to array, rebuild should start
fdisk /dev/sda <<EOF
t
1
fd
t
5
fd
t
6
fd
t
7
fd
w
EOF
mdadm --add /dev/md1 /dev/sda1
mdadm --add /dev/md5 /dev/sda5
mdadm --add /dev/md6 /dev/sda6
mdadm --add /dev/md7 /dev/sda7

# recommended
vi /etc/aliases # and add "root: myemail@domain.tld" to bottom

# watch the rebuild
watch cat /proc/mdstat

fine