Merge "Enable cinder to work with qpid"
diff --git a/AUTHORS b/AUTHORS
index 6141d67..4f771ce 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -29,11 +29,13 @@
 Justin Shepherd <galstrom21@gmail.com>
 Ken Pepple <ken.pepple@rabbityard.com>
 Kiall Mac Innes <kiall@managedit.ie>
+Osamu Habuka <xiu.yushen@gmail.com>
 Russell Bryant <rbryant@redhat.com>
 Scott Moser <smoser@ubuntu.com>
 Thierry Carrez <thierry@openstack.org>
 Todd Willey <xtoddx@gmail.com>
 Tres Henry <tres@treshenry.net>
+Vincent Untz <vuntz@suse.com>
 Vishvananda Ishaya <vishvananda@gmail.com>
 Yun Mao <yunmao@gmail.com>
 Yong Sheng Gong <gongysh@cn.ibm.com>
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 38fac12..8a4f9c1 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -7,14 +7,15 @@
 #  *  Updating Aggregate details
 #  *  Testing Aggregate metadata
 #  *  Testing Aggregate delete
-#  *  TODO(johngar) - test adding a host (idealy with two hosts)
+#  *  Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
+#  *  Testing add/remove hosts (with one host)
 
 echo "**************************************************"
 echo "Begin DevStack Exercise: $0"
 echo "**************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
@@ -47,6 +48,7 @@
 # ===================
 
 AGGREGATE_NAME=test_aggregate_$RANDOM
+AGGREGATE2_NAME=test_aggregate_$RANDOM
 AGGREGATE_A_ZONE=nova
 
 exit_if_aggregate_present() {
@@ -63,6 +65,7 @@
 exit_if_aggregate_present $AGGREGATE_NAME
 
 AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1`
+AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1`
 
 # check aggregate created
 nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
@@ -120,13 +123,23 @@
 # Test aggregate-add/remove-host
 # ==============================
 if [ "$VIRT_DRIVER" == "xenserver" ]; then
-    echo "TODO(johngarbutt) add tests for add/remove host from aggregate"
+    echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
 fi
-
+HOST=`nova host-list | grep compute | get_field 1`
+# Make sure can add two aggregates to same host
+nova aggregate-add-host $AGGREGATE_ID $HOST
+nova aggregate-add-host $AGGREGATE2_ID $HOST
+if nova aggregate-add-host $AGGREGATE2_ID $HOST; then
+    echo "ERROR could add duplicate host to single aggregate"
+    exit -1
+fi
+nova aggregate-remove-host $AGGREGATE2_ID $HOST
+nova aggregate-remove-host $AGGREGATE_ID $HOST
 
 # Test aggregate-delete
 # =====================
 nova aggregate-delete $AGGREGATE_ID
+nova aggregate-delete $AGGREGATE2_ID
 exit_if_aggregate_present $AGGREGATE_NAME
 
 
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 6a0937a..7fe81ba 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -46,6 +46,8 @@
 # Default floating IP pool name
 DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova}
 
+# Default user
+DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros}
 
 # Launching servers
 # =================
@@ -150,7 +152,7 @@
 # To do this, ssh to the builder instance, mount volume, and build a volume-backed image.
 STAGING_DIR=/tmp/stage
 CIRROS_DIR=/tmp/cirros
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
 set -o errexit
 set -o xtrace
 sudo mkdir -p $STAGING_DIR
@@ -168,10 +170,10 @@
 fi
 
 # Copy cirros onto the volume
-scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz cirros@$FLOATING_IP:$STAGING_DIR
+scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz ${DEFAULT_INSTANCE_USER}@$FLOATING_IP:$STAGING_DIR
 
 # Unpack cirros into volume
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
 set -o errexit
 set -o xtrace
 cd $STAGING_DIR
@@ -221,7 +223,7 @@
 fi
 
 # Make sure our volume-backed instance launched
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
 echo "success!"
 EOF
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 4a538c6..9f7aed1 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -40,12 +40,15 @@
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
+# Boot this image, use first AMI-format image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
 
 # Launching a server
 # ==================
 
 # Find a machine image to boot
-IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
+IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
 
 # Define secgroup
 SECGROUP=euca_secgroup
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 51019a3..02259c0 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -185,7 +185,7 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP"
 
 # FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" ]; then
+if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
         print "Security group failure - ping should not be allowed!"
diff --git a/functions b/functions
index b66dc15..7a7406d 100644
--- a/functions
+++ b/functions
@@ -9,6 +9,18 @@
 set +o xtrace
 
 
+# Exit 0 if address is in network or 1 if
+# address is not in network or netaddr library
+# is not installed.
+function address_in_net() {
+    python -c "
+import netaddr
+import sys
+sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2'))
+"
+}
+
+
 # apt-get wrapper to set arguments correctly
 # apt_get operation package [package ...]
 function apt_get() {
@@ -17,6 +29,7 @@
     [[ "$(id -u)" = "0" ]] && sudo="env"
     $sudo DEBIAN_FRONTEND=noninteractive \
         http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
         apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
 }
 
@@ -486,6 +499,7 @@
     $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
         HTTP_PROXY=$http_proxy \
         HTTPS_PROXY=$https_proxy \
+        NO_PROXY=$no_proxy \
         $CMD_PIP install --use-mirrors $@
 }
 
@@ -521,6 +535,7 @@
         $SUDO_CMD \
             HTTP_PROXY=$http_proxy \
             HTTPS_PROXY=$https_proxy \
+            NO_PROXY=$no_proxy \
             python setup.py develop \
     )
 }
@@ -574,6 +589,7 @@
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
     $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
         yum install -y "$@"
 }
 
diff --git a/openrc b/openrc
index 4430e82..08ef98b 100644
--- a/openrc
+++ b/openrc
@@ -41,6 +41,10 @@
 # or NOVA_PASSWORD.
 export OS_PASSWORD=${ADMIN_PASSWORD:-secrete}
 
+# Don't put the key into a keyring by default. Testing for development is much
+# easier with this off.
+export OS_NO_CACHE=${OS_NO_CACHE:-1}
+
 # Set api HOST_IP endpoint.  SERVICE_HOST may also be used to specify the endpoint,
 # which is convenient for some localrc configurations.
 HOST_IP=${HOST_IP:-127.0.0.1}
diff --git a/stack.sh b/stack.sh
index 3827d77..c3ccbf4 100755
--- a/stack.sh
+++ b/stack.sh
@@ -2,7 +2,7 @@
 
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
 # installs and configures various combinations of **Glance**, **Horizon**,
-# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift**
+# **Keystone**, **Nova**, **Quantum** and **Swift**
 
 # This script allows you to specify configuration options of what git
 # repositories to use, enabled services, network configuration and various
@@ -60,16 +60,21 @@
 source $TOP_DIR/stackrc
 
 # HTTP and HTTPS proxy servers are supported via the usual environment variables
-# ``http_proxy`` and ``https_proxy``.  They can be set in ``localrc`` if necessary
+# ``http_proxy`` and ``https_proxy``.  Additionally if you would like to access
+# to specific server directly and not through the proxy server, you can use
+# ``no_proxy`` environment variable.  They can be set in ``localrc`` if necessary
 # or on the command line::
 #
-#     http_proxy=http://proxy.example.com:3128/ ./stack.sh
+#     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
 if [[ -n "$http_proxy" ]]; then
     export http_proxy=$http_proxy
 fi
 if [[ -n "$https_proxy" ]]; then
     export https_proxy=$https_proxy
 fi
+if [[ -n "$no_proxy" ]]; then
+    export no_proxy=$no_proxy
+fi
 
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
@@ -155,9 +160,13 @@
     else
         rpm -qa | grep sudo || install_package sudo
     fi
+    if ! getent group stack >/dev/null; then
+        echo "Creating a group called stack"
+        groupadd stack
+    fi
     if ! getent passwd stack >/dev/null; then
         echo "Creating a user called stack"
-        useradd -U -s /bin/bash -d $DEST -m stack
+        useradd -g stack -s /bin/bash -d $DEST -m stack
     fi
 
     echo "Giving stack user passwordless sudo priviledges"
@@ -247,8 +256,6 @@
 SWIFTCLIENT_DIR=$DEST/python-swiftclient
 QUANTUM_DIR=$DEST/quantum
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
-MELANGE_DIR=$DEST/melange
-MELANGECLIENT_DIR=$DEST/python-melangeclient
 
 # Default Quantum Plugin
 Q_PLUGIN=${Q_PLUGIN:-openvswitch}
@@ -257,42 +264,45 @@
 # Default Quantum Host
 Q_HOST=${Q_HOST:-localhost}
 # Which Quantum API nova should use
-NOVA_USE_QUANTUM_API=${NOVA_USE_QUANTUM_API:-v1}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
 # Default auth strategy
 Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
 
 
-# Default Melange Port
-M_PORT=${M_PORT:-9898}
-# Default Melange Host
-M_HOST=${M_HOST:-localhost}
-# Melange MAC Address Range
-M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
-
 # Name of the lvm volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
-# Nova hypervisor configuration.  We default to libvirt with **kvm** but will
-# drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
-# also install an **LXC** based system.
-VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
-LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
-
 # Nova supports pluggable schedulers.  ``FilterScheduler`` should work in most
 # cases.
 SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 
-HOST_IP_IFACE=${HOST_IP_IFACE:-eth0}
-# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable
+# Set fixed and floating range here so we can make sure not to use addresses
+# from either range when attempting to guess the ip to use for the host
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+
+# Find the interface used for the default route
+HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')}
+# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
 if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then
-    HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/");  print parts[1]}' | head -n1`
-    if [ "$HOST_IP" = "" ]; then
+    HOST_IP=""
+    HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
+    for IP in $HOST_IPS; do
+        # Attempt to filter out ip addresses that are part of the fixed and
+        # floating range. Note that this method only works if the 'netaddr'
+        # python library is installed. If it is not installed, an error
+        # will be printed and the first ip from the interface will be used.
+        if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then
+            HOST_IP=$IP
+            break;
+        fi
+    done
+    if [ "$HOST_IP" == "" ]; then
         echo "Could not determine host ip address."
-        echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0"
+        echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
         exit 1
     fi
 fi
@@ -371,11 +381,8 @@
 fi
 
 PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
 NET_MAN=${NET_MAN:-FlatDHCPManager}
 EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
 FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
@@ -421,14 +428,6 @@
 #
 # With Quantum networking the NET_MAN variable is ignored.
 
-# Using Melange IPAM:
-#
-# Make sure that quantum and melange are enabled in ENABLED_SERVICES.
-# If they are then the melange IPAM lib will be set in the QuantumManager.
-# Adding m-svc to ENABLED_SERVICES will start the melange service on this
-# host.
-
-
 # MySQL & (RabbitMQ or Qpid)
 # --------------------------
 
@@ -787,13 +786,6 @@
     # quantum
     git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
 fi
-if is_service_enabled m-svc; then
-    # melange
-    git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
-fi
-if is_service_enabled melange; then
-    git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
-fi
 if is_service_enabled cinder; then
     install_cinder
 fi
@@ -831,12 +823,6 @@
     setup_develop $QUANTUM_CLIENT_DIR
     setup_develop $QUANTUM_DIR
 fi
-if is_service_enabled m-svc; then
-    setup_develop $MELANGE_DIR
-fi
-if is_service_enabled melange; then
-    setup_develop $MELANGECLIENT_DIR
-fi
 if is_service_enabled cinder; then
     configure_cinder
 fi
@@ -1118,20 +1104,12 @@
         Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
         Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
         Q_DB_NAME="ovs_quantum"
-        if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
-            Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
-        elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
-            Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
-        fi
+        Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
         Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
         Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
         Q_DB_NAME="quantum_linux_bridge"
-        if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
-            Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin"
-        elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
-            Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
-        fi
+        Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
     else
         echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting"
         exit 1
@@ -1155,20 +1133,15 @@
         sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
     fi
 
-    if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api False
-    elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True
-    fi
+    Q_CONF_FILE=/etc/quantum/quantum.conf
+    cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
 fi
 
 # Quantum service (for controller node)
 if is_service_enabled q-svc; then
-    Q_CONF_FILE=/etc/quantum/quantum.conf
     Q_API_PASTE_FILE=/etc/quantum/api-paste.ini
     Q_POLICY_FILE=/etc/quantum/policy.json
 
-    cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
     cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
     cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
 
@@ -1190,8 +1163,6 @@
     iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME
     iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD
-
-    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
 fi
 
 # Quantum agent (for compute nodes)
@@ -1212,11 +1183,9 @@
        # Start up the quantum <-> linuxbridge agent
        # set the default network interface
        QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-       sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE
+       iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings default:$QUANTUM_LB_PRIVATE_INTERFACE
        AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py"
     fi
-    # Start up the quantum agent
-    screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
 fi
 
 # Quantum DHCP
@@ -1225,9 +1194,7 @@
 
     Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini
 
-    if [[ -e $QUANTUM_DIR/etc/dhcp_agent.ini ]]; then
-      sudo cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
-    fi
+    cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
 
     # Set verbose
     iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
@@ -1246,32 +1213,27 @@
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
         iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
     fi
-    # Start up the quantum agent
-    screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE"
 fi
 
-# Melange service
-if is_service_enabled m-svc; then
-    if is_service_enabled mysql; then
-        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
-        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;'
-    else
-        echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
-        exit 1
+# Quantum RPC support - must be updated prior to starting any of the services
+if is_service_enabled quantum; then
+    iniset $Q_CONF_FILE DEFAULT control_exchange quantum
+    if is_service_enabled qpid ; then
+        iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+    elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
+        iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST
+        iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD
     fi
-    MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
-    cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
-    sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE
-    cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
-    screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
-    echo "Waiting for melange to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
-      echo "melange-server did not start"
-      exit 1
-    fi
-    melange mac_address_range create cidr=$M_MAC_RANGE
 fi
 
+# Start the Quantum services
+screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
 
 # Nova
 # ----
@@ -1466,7 +1428,7 @@
     sudo rm -rf $NOVA_DIR/instances/*
 fi
 
-if is_service_enabled n-net; then
+if is_service_enabled n-net q-dhcp; then
     # Delete traces of nova networks from prior runs
     sudo killall dnsmasq || true
     clean_iptables
@@ -1784,6 +1746,7 @@
 
         # Setup the tgt configuration file
         if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
+           sudo mkdir -p /etc/tgt/conf.d
            echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
         fi
 
@@ -1817,28 +1780,13 @@
 add_nova_opt "s3_host=$SERVICE_HOST"
 add_nova_opt "s3_port=$S3_SERVICE_PORT"
 if is_service_enabled quantum; then
-    if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
-        add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager"
-        add_nova_opt "quantum_connection_host=$Q_HOST"
-        add_nova_opt "quantum_connection_port=$Q_PORT"
-        add_nova_opt "quantum_use_dhcp=True"
-
-        if is_service_enabled melange; then
-            add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib"
-            add_nova_opt "use_melange_mac_generation=True"
-            add_nova_opt "melange_host=$M_HOST"
-            add_nova_opt "melange_port=$M_PORT"
-        fi
-
-    elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
-        add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
-        add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
-        add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
-        add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
-        add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
-        add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
-        add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
-    fi
+    add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
+    add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
+    add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
+    add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+    add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
+    add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
+    add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
 
     if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
         NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
@@ -1957,6 +1905,13 @@
     # Need to avoid crash due to new firewall support
     XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
     add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
+elif [ "$VIRT_DRIVER" = 'openvz' ]; then
+    # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
+    #             Replace connection_type when this is fixed.
+    #             add_nova_opt "compute_driver=openvz.connection.OpenVzConnection"
+    add_nova_opt "connection_type=openvz"
+    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
+    add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
 else
     add_nova_opt "compute_driver=libvirt.LibvirtDriver"
     LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
@@ -2131,25 +2086,23 @@
 
 # If we're using Quantum (i.e. q-svc is enabled), network creation has to
 # happen after we've started the Quantum service.
-if is_service_enabled mysql && is_service_enabled nova; then
-    if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
-        # Create a small network
-        $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+if is_service_enabled q-svc; then
+    TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
 
-        # Create some floating ips
-        $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
+    # Create a small network
+    # Since quantum command is executed in admin context at this point,
+    # --tenant_id needs to be specified.
+    NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
+    quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
+elif is_service_enabled mysql && is_service_enabled nova; then
+    # Create a small network
+    $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
 
-        # Create a second pool
-        $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
-    elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
-        TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+    # Create some floating ips
+    $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
 
-        # Create a small network
-        # Since quantum command is executed in admin context at this point,
-        # --tenant_id needs to be specified.
-        NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
-        quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
-    fi
+    # Create a second pool
+    $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
 # Launching nova-compute should be as simple as running ``nova-compute`` but
@@ -2212,6 +2165,14 @@
             wget -c $image_url -O $FILES/$IMAGE_FNAME
         fi
 
+        # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
+        if [[ "$image_url" =~ 'openvz' ]]; then
+            IMAGE="$FILES/${IMAGE_FNAME}"
+            IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
+            glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format ami --disk-format ami < "$IMAGE"
+            continue
+        fi
+
         KERNEL=""
         RAMDISK=""
         DISK_FORMAT=""
@@ -2258,19 +2219,19 @@
         esac
 
         if [ "$CONTAINER_FORMAT" = "bare" ]; then
-            glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+            glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
         else
             # Use glance client to add the kernel the root filesystem.
             # We parse the results of the first upload to get the glance ID of the
             # kernel for use when uploading the root filesystem.
             KERNEL_ID=""; RAMDISK_ID="";
             if [ -n "$KERNEL" ]; then
-                KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+                KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public=True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
             fi
             if [ -n "$RAMDISK" ]; then
-                RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+                RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public=True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
             fi
-            glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+            glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public=True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
         fi
     done
 fi
diff --git a/stackrc b/stackrc
index 3bbc475..c906f95 100644
--- a/stackrc
+++ b/stackrc
@@ -91,13 +91,16 @@
 TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git
 TEMPEST_BRANCH=master
 
-# melange service
-MELANGE_REPO=${GIT_BASE}/openstack/melange.git
-MELANGE_BRANCH=master
+# Nova hypervisor configuration.  We default to libvirt with **kvm** but will
+# drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
+# also install an **LXC** or **OpenVZ** based system.
+VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
+LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
 
-# python melange client library
-MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git
-MELANGECLIENT_BRANCH=master
+# allow local overrides of env variables
+if [ -f $RC_DIR/localrc ]; then
+    source $RC_DIR/localrc
+fi
 
 # Specify a comma-separated list of uec images to download and install into glance.
 # supported urls here are:
@@ -114,19 +117,27 @@
 #      http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
-case "$LIBVIRT_TYPE" in
-    lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
-        DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-rootfs
-        IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";;
-    *)  # otherwise, use the uec style image (with kernel, ramdisk, disk)
-        DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-uec
-        IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";;
+#
+# Set default image based on LIBVIRT_TYPE or VIRT_DRIVER, which may be set in localrc
+# but allow DEFAULT_IMAGE_NAME and IMAGE_URLS to be set directly in localrc, too.
+case "$VIRT_DRIVER" in
+    openvz) # OpenVZ uses its own format of image, and does not support uec style images
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
+        IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
+    libvirt)
+        case "$LIBVIRT_TYPE" in
+            lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-rootfs}
+                IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz"};;
+            *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
+                IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
+        esac
+        ;;
+    *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
+        IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
 esac
 
-# allow local overrides of env variables
-if [ -f $RC_DIR/localrc ]; then
-    source $RC_DIR/localrc
-fi
-
 # 5Gb default volume backing file size
 VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 4d029d8..d502248 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -67,15 +67,20 @@
 # Glance should already contain images to be used in tempest
 # testing. Here we simply look for images stored in Glance
 # and set the appropriate variables for use in the tempest config
-# We ignore ramdisk and kernel images and set the IMAGE_UUID to
-# the first image returned and set IMAGE_UUID_ALT to the second,
+# We ignore ramdisk and kernel images, look for the default image
+# DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the
+# first image returned and set IMAGE_UUID_ALT to the second,
 # if there is more than one returned...
 # ... Also ensure we only take active images, so we don't get snapshots in process
 IMAGE_LINES=`glance image-list`
 IFS="$(echo -e "\n\r")"
 IMAGES=""
 for line in $IMAGE_LINES; do
-    IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`"
+    if [ -z $DEFAULT_IMAGE_NAME ]; then
+        IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`"
+    else
+        IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`"
+    fi
 done
 # Create array of image UUIDs...
 IFS=" "
@@ -127,9 +132,31 @@
 ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 ALT_PASSWORD=$OS_PASSWORD
 
-# TODO(jaypipes): Support configurable flavor refs here...
-FLAVOR_REF=1
-FLAVOR_REF_ALT=2
+# Check Nova for existing flavors and, if set, look for the
+# DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor.
+FLAVOR_LINES=`nova flavor-list`
+IFS="$(echo -e "\n\r")"
+FLAVORS=""
+for line in $FLAVOR_LINES; do
+    if [ -z $DEFAULT_INSTANCE_TYPE ]; then
+        FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`"
+    else
+        FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`"
+    fi
+done
+IFS=" "
+FLAVORS=($FLAVORS)
+NUM_FLAVORS=${#FLAVORS[*]}
+echo "Found $NUM_FLAVORS flavors"
+if [[ $NUM_FLAVORS -eq 0 ]]; then
+    echo "Found no valid flavors to use!"
+    exit 1
+fi
+FLAVOR_REF=${FLAVORS[0]}
+FLAVOR_REF_ALT=$FLAVOR_REF
+if [[ $NUM_FLAVORS -gt 1 ]]; then
+    FLAVOR_REF_ALT=${FLAVORS[1]}
+fi
 
 # Do any of the following need to be configurable?
 COMPUTE_CATALOG_TYPE=compute
@@ -141,7 +168,8 @@
 BUILD_INTERVAL=3
 BUILD_TIMEOUT=400
 RUN_SSH=True
-SSH_USER=$OS_USERNAME
+# Check for DEFAULT_INSTANCE_USER and try to connect with that account
+SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME}
 NETWORK_FOR_SSH=private
 IP_VERSION_FOR_SSH=4
 SSH_TIMEOUT=4
diff --git a/unstack.sh b/unstack.sh
index 6a55a0a..64de915 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -54,7 +54,12 @@
         echo "iSCSI target cleanup needed:"
         echo "$TARGETS"
     fi
-    stop_service tgt
+
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        stop_service tgt
+    else
+        stop_service tgtd
+    fi
 fi
 
 if [[ -n "$UNSTACK_ALL" ]]; then