Merge remote-tracking branch 'upstream/master'
diff --git a/build_lxc.sh b/build_lxc.sh
index c1ab995..787c4bd 100755
--- a/build_lxc.sh
+++ b/build_lxc.sh
@@ -69,6 +69,11 @@
 # Destroy the old container
 lxc-destroy -n $CONTAINER
 
+# If this call is to TERMINATE the container then exit
+if [ "$TERMINATE" = "1" ]; then
+    exit
+fi
+
 # Create the container
 lxc-create -n $CONTAINER -t natty -f $LXC_CONF
 
diff --git a/build_lxc_multi.sh b/build_lxc_multi.sh
index efa7deb..50be4f5 100755
--- a/build_lxc_multi.sh
+++ b/build_lxc_multi.sh
@@ -4,16 +4,21 @@
 COMPUTE_HOSTS=${COMPUTE_HOSTS:-192.168.1.53,192.168.1.54}
 
 # Networking params
-NAMESERVER=${NAMESERVER:-192.168.2.1}
+NAMESERVER=${NAMESERVER:-192.168.1.1}
 GATEWAY=${GATEWAY:-192.168.1.1}
+NETMASK=${NETMASK:-255.255.255.0}
+FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
+
+# Setting this to 1 shuts down and destroys our containers without relaunching.
+TERMINATE=${TERMINATE:-0}
 
 # Variables common amongst all hosts in the cluster
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0"
+COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1"
 
 # Helper to launch containers
 function run_lxc {
     # For some reason container names with periods can cause issues :/
-    CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
+    CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
 }
 
 # Launch the head node - headnode uses a non-ip domain name,
@@ -21,10 +26,12 @@
 run_lxc STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,dash,mysql,rabbit"
 
 # Wait till the head node is up
-while ! wget -q -O- http://$HEAD_HOST | grep -q username; do
-    echo "Waiting for head node ($HEAD_HOST) to start..."
-    sleep 5
-done
+if [ ! "$TERMINATE" = "1" ]; then
+    while ! wget -q -O- http://$HEAD_HOST | grep -q username; do
+        echo "Waiting for head node ($HEAD_HOST) to start..."
+        sleep 5
+    done
+fi
 
 # Launch the compute hosts
 for compute_host in ${COMPUTE_HOSTS//,/ }; do
diff --git a/stack.sh b/stack.sh
index b47729e..e9218b8 100755
--- a/stack.sh
+++ b/stack.sh
@@ -308,6 +308,9 @@
 if [ -n "$FLAT_INTERFACE" ]; then
     add_nova_flag "--flat_interface=$FLAT_INTERFACE"
 fi
+if [ -n "$MULTI_HOST" ]; then
+    add_nova_flag "--multi_host=$MULTI_HOST"
+fi
 
 # create a new named screen to store things in
 screen -d -m -S nova -t nova
@@ -319,7 +322,7 @@
     # device - used to manage qcow images)
     sudo modprobe nbd || true
     sudo modprobe kvm || true
-    # user needs to be member of libvirtd group for nova-compute to use libvirt
+    # User needs to be member of libvirtd group for nova-compute to use libvirt.
     sudo usermod -a -G libvirtd `whoami`
     # if kvm wasn't running before we need to restart libvirt to enable it
     sudo /etc/init.d/libvirt-bin restart
@@ -386,9 +389,9 @@
 # so send the start command by forcing text into the window.
 # Only run the services specified in ``ENABLED_SERVICES``
 
-NL=`echo -ne '\015'`
-
+# our screen helper to launch a service in a hidden named screen
 function screen_it {
+    NL=`echo -ne '\015'`
     if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
         screen -S nova -X screen -t $1
         screen -S nova -p $1 -X stuff "$2$NL"
@@ -399,7 +402,13 @@
 screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
 screen_it key "$KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF"
 screen_it n-api "$NOVA_DIR/bin/nova-api"
-screen_it n-cpu "$NOVA_DIR/bin/nova-compute"
+# Launching nova-compute should be as simple as running ``nova-compute`` but 
+# have to do a little more than that in our script.  Since we add the group 
+# ``libvirtd`` to our user in this script, when nova-compute is run it is
+# within the context of our original shell (so our groups won't be updated). 
+# We can send the command nova-compute to the ``newgrp`` command to execute
+# in a specific context.
+screen_it n-cpu "echo $NOVA_DIR/bin/nova-compute | newgrp libvirtd"
 screen_it n-net "$NOVA_DIR/bin/nova-network"
 screen_it n-sch "$NOVA_DIR/bin/nova-scheduler"
 # nova-vncproxy binds a privileged port, and so needs sudo
@@ -418,14 +427,13 @@
 
     # extract ami-tty/image, aki-tty/image & ari-tty/image
     mkdir -p $FILES/images
-    cd $FILES/images
-    tar -zxf $DEST/tty.tgz
+    tar -zxf $FILES/tty.tgz -C $FILES/images
 
     # add images to glance 
     # FIXME: kernel/ramdisk is hardcoded - use return result from add
-    glance add name="tty-kernel" is_public=true container_format=aki disk_format=aki < aki-tty/image 
-    glance add name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < ari-tty/image 
-    glance add name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < ami-tty/image
+    glance add name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image 
+    glance add name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image 
+    glance add name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < $FILES/images/ami-tty/image
 fi
 
 # Using the cloud