Merge "Accept Quantums rootwrap.conf in etc/quantum/rootwrap.conf"
diff --git a/functions b/functions
index f8f63c5..fe50547 100644
--- a/functions
+++ b/functions
@@ -858,26 +858,69 @@
 }
 
 
+# _run_process() is designed to be backgrounded by run_process() to simulate a
+# fork.  It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it().  The log filename is derived
+# from the service name and global-and-now-misnamed SCREEN_LOGDIR
+# _run_process service "command-line"
+function _run_process() {
+    local service=$1
+    local command="$2"
+
+    # Undo logging redirections and close the extra descriptors
+    exec 1>&3
+    exec 2>&3
+    exec 3>&-
+    exec 6>&-
+
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+        export PYTHONUNBUFFERED=1
+    fi
+
+    exec /bin/bash -c "$command"
+    die "$service exec failure: $command"
+}
+
+
+# run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command.  This is meant to duplicate the semantics
+# of screen_it() without screen.  PIDs are written to
+# $SERVICE_DIR/$SCREEN_NAME/$service.pid
+# run_process service "command-line"
+function run_process() {
+    local service=$1
+    local command="$2"
+
+    # Spawn the child process
+    _run_process "$service" "$command" &
+    echo $!
+}
+
+
 # Helper to launch a service in a named screen
 # screen_it service "command-line"
 function screen_it {
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    SCREEN_DEV=`trueorfalse True $SCREEN_DEV`
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
 
     if is_service_enabled $1; then
         # Append the service to the screen rc file
         screen_rc "$1" "$2"
 
-        screen -S $SCREEN_NAME -X screen -t $1
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            screen -S $SCREEN_NAME -X screen -t $1
 
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-            screen -S $SCREEN_NAME -p $1 -X log on
-            ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-        fi
+            if [[ -n ${SCREEN_LOGDIR} ]]; then
+                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+                screen -S $SCREEN_NAME -p $1 -X log on
+                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+            fi
 
-        if [[ "$SCREEN_DEV" = "True" ]]; then
             # sleep to allow bash to be ready to be send the command - we are
             # creating a new window in screen and then sends characters, so if
             # bash isn't running by the time we send the command, nothing happens
@@ -886,7 +929,8 @@
             NL=`echo -ne '\015'`
             screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
         else
-            screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\""
+            # Spawn directly without screen
+            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid
         fi
     fi
 }
diff --git a/lib/baremetal b/lib/baremetal
index 57048a1..5326dd1 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -33,7 +33,7 @@
 # baremetal driver uses that to push a disk image onto the node(s).
 #
 # Below we define various defaults which control the behavior of the
-# baremetal compute service, and inform it of the hardware it will contorl.
+# baremetal compute service, and inform it of the hardware it will control.
 #
 # Below that, various functions are defined, which are called by devstack
 # in the following order:
@@ -395,7 +395,7 @@
        ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
        ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
 
-    # override DEFAULT_IMAGE_NAME so that tempest can find the image 
+    # override DEFAULT_IMAGE_NAME so that tempest can find the image
     # that we just uploaded in glance
     DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
 }
diff --git a/lib/nova b/lib/nova
index 23346b7..f0c8315 100644
--- a/lib/nova
+++ b/lib/nova
@@ -66,6 +66,59 @@
 QEMU_CONF=/etc/libvirt/qemu.conf
 
 
+# Nova Network Configuration
+# --------------------------
+
+# Set defaults according to the virt driver
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+    PUBLIC_INTERFACE_DEFAULT=eth3
+    GUEST_INTERFACE_DEFAULT=eth1
+    # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
+    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+    NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
+    PUBLIC_INTERFACE_DEFAULT=eth0
+    FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+    FLAT_NETWORK_BRIDGE_DEFAULT=br100
+    STUB_NETWORK=${STUB_NETWORK:-False}
+else
+    PUBLIC_INTERFACE_DEFAULT=br100
+    GUEST_INTERFACE_DEFAULT=eth0
+    FLAT_NETWORK_BRIDGE_DEFAULT=br100
+fi
+
+NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
+VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
+EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
+
+# If you are using the FlatDHCP network mode on multiple hosts, set the
+# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
+# have an IP or you risk breaking things.
+#
+# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
+# hiccup while the network is moved from the flat interface to the flat network
+# bridge.  This will happen when you launch your first instance.  Upon launch
+# you will lose all connectivity to the node, and the VM launch will probably
+# fail.
+#
+# If you are running on a single node and don't need to access the VMs from
+# devices other than that node, you can set ``FLAT_INTERFACE=``
+# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
+FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
+
+# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
+# allows network operations and routing for a VM to occur on the server that is
+# running the VM - removing a SPOF and bandwidth bottleneck.
+MULTI_HOST=`trueorfalse False $MULTI_HOST`
+
+# Test floating pool and range are used for testing.  They are defined
+# here until the admin APIs can replace nova-manage
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
+
+
 # Entry Points
 # ------------
 
@@ -439,6 +492,49 @@
         # Replace the first '=' with ' ' for iniset syntax
         iniset $NOVA_CONF DEFAULT ${I/=/ }
     done
+
+    # All nova-compute workers need to know the vnc configuration options
+    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+    if is_service_enabled n-cpu; then
+        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
+        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
+        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
+        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
+    fi
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
+    else
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+    fi
+
+    if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
+      # Address on which instance vncservers will listen on compute hosts.
+      # For multi-host, this should be the management ip of the compute host.
+      VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
+      iniset $NOVA_CONF DEFAULT vnc_enabled true
+      iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
+      iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+    else
+      iniset $NOVA_CONF DEFAULT vnc_enabled false
+    fi
+
+    if is_service_enabled n-spice; then
+      # Address on which instance spiceservers will listen on compute hosts.
+      # For multi-host, this should be the management ip of the compute host.
+      SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+      SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
+      iniset $NOVA_CONF spice enabled true
+      iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
+      iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
+    else
+      iniset $NOVA_CONF spice enabled false
+    fi
+
+    iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
+    iniset_rpc_backend nova $NOVA_CONF DEFAULT
+    iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
 }
 
 # create_nova_cache_dir() - Part of the init_nova() process
@@ -450,7 +546,7 @@
 }
 
 function create_nova_conf_nova_network() {
-    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN"
+    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
     iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
     iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
diff --git a/lib/quantum b/lib/quantum
index 2ef883a..09cde64 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -53,7 +53,7 @@
 # that must be set in localrc for connectivity across hosts with
 # Quantum.
 #
-# With Quantum networking the NET_MAN variable is ignored.
+# With Quantum networking the NETWORK_MANAGER variable is ignored.
 
 
 # Save trace setting
@@ -365,13 +365,13 @@
 # Start running processes, including screen
 function start_quantum_agents() {
     # Start up the quantum agents if enabled
-    screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
-    screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
-    screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+    screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+    screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+    screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
 
     if is_service_enabled q-lbaas; then
-        screen_it q-lbaas "python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+        screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
     fi
 }
 
@@ -415,6 +415,7 @@
     cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
 
     iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME`
+    iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
 
     _quantum_setup_rootwrap
 }
@@ -534,7 +535,6 @@
 
     iniset $QUANTUM_CONF DEFAULT verbose True
     iniset $QUANTUM_CONF DEFAULT debug True
-    iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
     iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE
     iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
 
diff --git a/stack.sh b/stack.sh
index 14bb161..9a87a5f 100755
--- a/stack.sh
+++ b/stack.sh
@@ -278,11 +278,6 @@
 # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
 CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
 
-# Name of the LVM volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
-
 # Generic helper to configure passwords
 function read_password {
     XTRACE=$(set +o | grep xtrace)
@@ -326,64 +321,6 @@
 }
 
 
-# Nova Network Configuration
-# --------------------------
-
-# FIXME: more documentation about why these are important options.  Also
-# we should make sure we use the same variable names as the option names.
-
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-    PUBLIC_INTERFACE_DEFAULT=eth3
-    # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
-    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
-    GUEST_INTERFACE_DEFAULT=eth1
-elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
-    PUBLIC_INTERFACE_DEFAULT=eth0
-    FLAT_NETWORK_BRIDGE_DEFAULT=br100
-    FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
-    FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
-    NET_MAN=${NET_MAN:-FlatManager}
-    STUB_NETWORK=${STUB_NETWORK:-False}
-else
-    PUBLIC_INTERFACE_DEFAULT=br100
-    FLAT_NETWORK_BRIDGE_DEFAULT=br100
-    GUEST_INTERFACE_DEFAULT=eth0
-fi
-
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-NET_MAN=${NET_MAN:-FlatDHCPManager}
-EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
-
-# Test floating pool and range are used for testing.  They are defined
-# here until the admin APIs can replace nova-manage
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
-# allows network operations and routing for a VM to occur on the server that is
-# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=`trueorfalse False $MULTI_HOST`
-
-# If you are using the FlatDHCP network mode on multiple hosts, set the
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
-# have an IP or you risk breaking things.
-#
-# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
-# hiccup while the network is moved from the flat interface to the flat network
-# bridge.  This will happen when you launch your first instance.  Upon launch
-# you will lose all connectivity to the node, and the VM launch will probably
-# fail.
-#
-# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set ``FLAT_INTERFACE=``
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
-
-## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
-
 # Database Configuration
 # ----------------------
 
@@ -800,8 +737,17 @@
 # Configure screen
 # ----------------
 
-if [ -z "$SCREEN_HARDSTATUS" ]; then
-    SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+if [[ "$USE_SCREEN" == "True" ]]; then
+    # Create a new named screen to run processes in
+    screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
+    sleep 1
+
+    # Set a reasonable status bar
+    if [ -z "$SCREEN_HARDSTATUS" ]; then
+        SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+    fi
+    screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
 fi
 
 # Clear screen rc file
@@ -810,12 +756,6 @@
     echo -n > $SCREENRC
 fi
 
-# Create a new named screen to run processes in
-screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
-sleep 1
-
-# Set a reasonable status bar
-screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
 
 # Initialize the directory for service status check
 init_service_check
@@ -980,48 +920,6 @@
     elif is_service_enabled n-net; then
         create_nova_conf_nova_network
     fi
-    # All nova-compute workers need to know the vnc configuration options
-    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
-    if is_service_enabled n-cpu; then
-        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
-        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
-        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
-        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
-        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
-        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
-    fi
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
-    else
-        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
-    fi
-
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
-      # Address on which instance vncservers will listen on compute hosts.
-      # For multi-host, this should be the management ip of the compute host.
-      VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
-      iniset $NOVA_CONF DEFAULT vnc_enabled true
-      iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
-      iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
-    else
-      iniset $NOVA_CONF DEFAULT vnc_enabled false
-    fi
-
-    if is_service_enabled n-spice; then
-      # Address on which instance spiceservers will listen on compute hosts.
-      # For multi-host, this should be the management ip of the compute host.
-      SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
-      SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
-      iniset $NOVA_CONF spice enabled true
-      iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
-      iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
-    else
-      iniset $NOVA_CONF spice enabled false
-    fi
-
-    iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
-    iniset_rpc_backend nova $NOVA_CONF DEFAULT
-    iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
 
 
     # XenServer
diff --git a/stackrc b/stackrc
index d418a0e..19674ed 100644
--- a/stackrc
+++ b/stackrc
@@ -30,8 +30,8 @@
 # stuffing text into the screen windows so that a developer can use
 # ctrl-c, up-arrow, enter to restart the service. Starting services
 # this way is slightly unreliable, and a bit slower, so this can
-# be disabled for automated testing by setting this value to false.
-SCREEN_DEV=True
+# be disabled for automated testing by setting this value to False.
+USE_SCREEN=True
 
 # Repositories
 # ------------
@@ -196,5 +196,13 @@
 # 5Gb default volume backing file size
 VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
 
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+
 PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
+
+# Compatibility until it's eradicated from CI
+USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh
index f1242ee..52b9b4e 100755
--- a/tools/build_bm_multi.sh
+++ b/tools/build_bm_multi.sh
@@ -6,7 +6,7 @@
 SHELL_AFTER_RUN=no
 
 # Variables common amongst all hosts in the cluster
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
+COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
 
 # Helper to launch containers
 function run_bm {