Merge "add command for Add icmp  tcp/22 to default security group"
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 2a8d070..3791917 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -101,7 +101,7 @@
 		--service_id $KEYSTONE_SERVICE \
 		--publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \
 		--adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \
-		--internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0"
+		--internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0"
 fi
 
 # Nova
@@ -123,9 +123,9 @@
         keystone endpoint-create \
             --region RegionOne \
             --service_id $NOVA_SERVICE \
-            --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
-            --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s"
+            --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
+            --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s"
     fi
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api. The admin role in swift allows a user
@@ -197,9 +197,9 @@
         keystone endpoint-create \
             --region RegionOne \
             --service_id $GLANCE_SERVICE \
-            --publicurl "http://$SERVICE_HOST:9292/v1" \
-            --adminurl "http://$SERVICE_HOST:9292/v1" \
-            --internalurl "http://$SERVICE_HOST:9292/v1"
+            --publicurl "http://$SERVICE_HOST:9292" \
+            --adminurl "http://$SERVICE_HOST:9292" \
+            --internalurl "http://$SERVICE_HOST:9292"
     fi
 fi
 
@@ -223,7 +223,7 @@
             --region RegionOne \
             --service_id $SWIFT_SERVICE \
             --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:8080/v1" \
+            --adminurl "http://$SERVICE_HOST:8080" \
             --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 fi
diff --git a/functions b/functions
index c109eae..664cfa0 100644
--- a/functions
+++ b/functions
@@ -536,7 +536,11 @@
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
     fi
+
     if [[ "$os_PACKAGE" = "deb" ]]; then
+        [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update
+        NO_UPDATE_REPOS=True
+
         apt_get install "$@"
     else
         yum_install "$@"
@@ -544,6 +548,26 @@
 }
 
 
+# Distro-agnostic function to tell if a package is installed
+# is_package_installed package [package ...]
+function is_package_installed() {
+    if [[ -z "$@" ]]; then
+        return 1
+    fi
+
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        dpkg -l "$@" > /dev/null
+        return $?
+    else
+        rpm --quiet -q "$@"
+        return $?
+    fi
+}
+
+
 # Test if the named environment variable is set and not zero length
 # is_set env-var
 function is_set() {
diff --git a/lib/keystone b/lib/keystone
new file mode 100644
index 0000000..a0cc601
--- /dev/null
+++ b/lib/keystone
@@ -0,0 +1,172 @@
+# lib/keystone
+# Functions to control the configuration and operation of **Keystone**
+
+# Dependencies:
+# ``functions`` file
+# ``BASE_SQL_CONN``
+# ``SERVICE_HOST``
+# ``SERVICE_TOKEN``
+# ``S3_SERVICE_PORT`` (template backend only)
+
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_keystone
+# configure_keystone
+# init_keystone
+# start_keystone
+# stop_keystone
+# cleanup_keystone
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+KEYSTONE_DIR=$DEST/keystone
+KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
+KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
+
+KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
+
+# Select the backend for Keystopne's service catalog
+KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
+KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+
+# Set Keystone interface configuration
+KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
+KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
+KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
+KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
+KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
+KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
+
+
+# Entry Points
+# ------------
+
+# cleanup_keystone() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_keystone() {
+    # kill instances (nova)
+    # delete image files (glance)
+    # This function intentionally left blank
+    :
+}
+
+# configure_keystoneclient() - Set config files, create data dirs, etc
+function configure_keystoneclient() {
+    setup_develop $KEYSTONECLIENT_DIR
+}
+
+# configure_keystone() - Set config files, create data dirs, etc
+function configure_keystone() {
+    setup_develop $KEYSTONE_DIR
+
+    if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
+        sudo mkdir -p $KEYSTONE_CONF_DIR
+        sudo chown `whoami` $KEYSTONE_CONF_DIR
+    fi
+
+    if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
+        cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
+        cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
+    fi
+
+    # Rewrite stock ``keystone.conf``
+    iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
+    iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
+    iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
+    sed -e "
+        /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
+    " -i $KEYSTONE_CONF
+
+    # Append the S3 bits
+    iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+        # Configure ``keystone.conf`` to use sql
+        iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
+        inicomment $KEYSTONE_CONF catalog template_file
+    else
+        cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+
+        # Add swift endpoints to service catalog if swift is enabled
+        if is_service_enabled swift; then
+            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
+        fi
+
+        # Add quantum endpoints to service catalog if quantum is enabled
+        if is_service_enabled quantum; then
+            echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
+        fi
+
+        sudo sed -e "
+            s,%SERVICE_HOST%,$SERVICE_HOST,g;
+            s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+        " -i $KEYSTONE_CATALOG
+
+        # Configure ``keystone.conf`` to use templates
+        iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+        iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+    fi
+
+    # Set up logging
+    LOGGING_ROOT="devel"
+    if [ "$SYSLOG" != "False" ]; then
+        LOGGING_ROOT="$LOGGING_ROOT,production"
+    fi
+    KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
+    cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
+    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
+    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
+
+}
+
+# init_keystone() - Initialize databases, etc.
+function init_keystone() {
+    # (Re)create keystone database
+    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
+    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
+
+    # Initialize keystone database
+    $KEYSTONE_DIR/bin/keystone-manage db_sync
+
+    # Set up certificates
+    $KEYSTONE_DIR/bin/keystone-manage pki_setup
+}
+
+# install_keystoneclient() - Collect source and prepare
+function install_keystoneclient() {
+    git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
+}
+
+# install_keystone() - Collect source and prepare
+function install_keystone() {
+    git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+}
+
+# start_keystone() - Start running processes, including screen
+function start_keystone() {
+    # Start Keystone in a screen window
+    screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
+}
+
+# stop_keystone() - Stop running processes
+function stop_keystone() {
+    # Kill the Keystone screen window
+    screen -S $SCREEN_NAME -p key -X kill
+}
diff --git a/lib/quantum b/lib/quantum
new file mode 100644
index 0000000..1025d2b
--- /dev/null
+++ b/lib/quantum
@@ -0,0 +1,37 @@
+# lib/quantum
+# functions - funstions specific to quantum
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Configures keystone integration for quantum service and agents
+function quantum_setup_keystone() {
+    local conf_file=$1
+    local section=$2
+    local use_auth_url=$3
+    if [[ -n $use_auth_url ]]; then
+        iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+    else
+        iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST
+        iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT
+        iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL
+    fi
+    iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $conf_file $section admin_user $Q_ADMIN_USERNAME
+    iniset $conf_file $section admin_password $SERVICE_PASSWORD
+}
+
+function quantum_setup_ovs_bridge() {
+    local bridge=$1
+    for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
+        if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi
+        sudo ovs-vsctl --no-wait del-port $bridge $PORT
+    done
+    sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge
+    sudo ovs-vsctl --no-wait add-br $bridge
+    sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/stack.sh b/stack.sh
index 617acf2..c352a27 100755
--- a/stack.sh
+++ b/stack.sh
@@ -182,11 +182,7 @@
     sleep $ROOTSLEEP
 
     # Give the non-root user the ability to run as **root** via ``sudo``
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        dpkg -l sudo || apt_get update && install_package sudo
-    else
-        rpm -qa | grep sudo || install_package sudo
-    fi
+    is_package_installed sudo || install_package sudo
     if ! getent group stack >/dev/null; then
         echo "Creating a group called stack"
         groupadd stack
@@ -215,12 +211,7 @@
     exit 1
 else
     # We're not **root**, make sure ``sudo`` is available
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        CHECK_SUDO_CMD="dpkg -l sudo"
-    else
-        CHECK_SUDO_CMD="rpm -q sudo"
-    fi
-    $CHECK_SUDO_CMD || die "Sudo is required.  Re-run stack.sh as root ONE TIME ONLY to set up sudo."
+    is_package_installed sudo || die "Sudo is required.  Re-run stack.sh as root ONE TIME ONLY to set up sudo."
 
     # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
     sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
@@ -262,60 +253,17 @@
 sudo chown `whoami` $DATA_DIR
 
 
-# Configure Projects
-# ==================
-
-# Get project function libraries
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/n-vol
-source $TOP_DIR/lib/ceilometer
-source $TOP_DIR/lib/heat
-
-# Set the destination directories for OpenStack projects
-NOVA_DIR=$DEST/nova
-HORIZON_DIR=$DEST/horizon
-GLANCE_DIR=$DEST/glance
-GLANCECLIENT_DIR=$DEST/python-glanceclient
-KEYSTONE_DIR=$DEST/keystone
-NOVACLIENT_DIR=$DEST/python-novaclient
-KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
-OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-NOVNC_DIR=$DEST/noVNC
-SWIFT_DIR=$DEST/swift
-SWIFT3_DIR=$DEST/swift3
-SWIFTCLIENT_DIR=$DEST/python-swiftclient
-QUANTUM_DIR=$DEST/quantum
-QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
-
-# Default Quantum Plugin
-Q_PLUGIN=${Q_PLUGIN:-openvswitch}
-# Default Quantum Port
-Q_PORT=${Q_PORT:-9696}
-# Default Quantum Host
-Q_HOST=${Q_HOST:-localhost}
-# Which Quantum API nova should use
-# Default admin username
-Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
-# Default auth strategy
-Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# Use namespace or not
-Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
-
-# Name of the LVM volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
-
-# Nova supports pluggable schedulers.  The default ``FilterScheduler``
-# should work in most cases.
-SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
+# Common Configuration
+# ====================
 
 # Set fixed and floating range here so we can make sure not to use addresses
 # from either range when attempting to guess the IP to use for the host.
 # Note that setting FIXED_RANGE may be necessary when running DevStack
-# in an OpenStack cloud that uses eith of these address ranges internally.
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+# in an OpenStack cloud that uses either of these address ranges internally.
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
 
 # Find the interface used for the default route
 HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')}
@@ -356,6 +304,57 @@
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
+
+# Configure Projects
+# ==================
+
+# Get project function libraries
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/n-vol
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
+source $TOP_DIR/lib/quantum
+
+# Set the destination directories for OpenStack projects
+NOVA_DIR=$DEST/nova
+HORIZON_DIR=$DEST/horizon
+GLANCE_DIR=$DEST/glance
+GLANCECLIENT_DIR=$DEST/python-glanceclient
+NOVACLIENT_DIR=$DEST/python-novaclient
+OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
+NOVNC_DIR=$DEST/noVNC
+SWIFT_DIR=$DEST/swift
+SWIFT3_DIR=$DEST/swift3
+SWIFTCLIENT_DIR=$DEST/python-swiftclient
+QUANTUM_DIR=$DEST/quantum
+QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
+
+# Default Quantum Plugin
+Q_PLUGIN=${Q_PLUGIN:-openvswitch}
+# Default Quantum Port
+Q_PORT=${Q_PORT:-9696}
+# Default Quantum Host
+Q_HOST=${Q_HOST:-localhost}
+# Which Quantum API nova should use
+# Default admin username
+Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
+# Default auth strategy
+Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
+# Use namespace or not
+Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
+# Meta data IP
+Q_META_DATA_IP=${Q_META_DATA_IP:-}
+
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+
+# Nova supports pluggable schedulers.  The default ``FilterScheduler``
+# should work in most cases.
+SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
+
 # Generic helper to configure passwords
 function read_password {
     set +o xtrace
@@ -416,8 +415,6 @@
 fi
 
 PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
-NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
 NET_MAN=${NET_MAN:-FlatDHCPManager}
 EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
 FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
@@ -444,23 +441,28 @@
 # fail.
 #
 # If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set the flat interface to the same
-# value as ``FLAT_NETWORK_BRIDGE``.  This will stop the network hiccup from
-# occurring.
-FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+# devices other than that node, you can set FLAT_INTERFACE=
+# This will stop nova from bridging any interfaces into FLAT_NETWORK_BRIDGE.
+FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
 
 ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
 
 # Using Quantum networking:
 #
-# Make sure that quantum is enabled in ENABLED_SERVICES.  If it is the network
-# manager will be set to the QuantumManager.  If you want to run Quantum on
-# this host, make sure that q-svc is also in ENABLED_SERVICES.
-#
-# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
-# "openvswitch" and make sure the q-agt service is enabled in
+# Make sure that quantum is enabled in ENABLED_SERVICES.  If you want
+# to run Quantum on this host, make sure that q-svc is also in
 # ENABLED_SERVICES.
 #
+# If you're planning to use the Quantum openvswitch plugin, set
+# Q_PLUGIN to "openvswitch" and make sure the q-agt service is enabled
+# in ENABLED_SERVICES.  If you're planning to use the Quantum
+# linuxbridge plugin, set Q_PLUGIN to "linuxbridge" and make sure the
+# q-agt service is enabled in ENABLED_SERVICES.
+#
+# See "Quantum Network Configuration" below for additional variables
+# that must be set in localrc for connectivity across hosts with
+# Quantum.
+#
 # With Quantum networking the NET_MAN variable is ignored.
 
 
@@ -560,14 +562,6 @@
 # Set the tenant for service accounts in Keystone
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
-# Set Keystone interface configuration
-KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
-KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
-KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
-KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
 
 
 # Horizon
@@ -651,7 +645,6 @@
 
 # Install package requirements
 if [[ "$os_PACKAGE" = "deb" ]]; then
-    apt_get update
     install_package $(get_packages $FILES/apts)
 else
     install_package $(get_packages $FILES/rpms)
@@ -710,14 +703,6 @@
     install_package mysql-server
 fi
 
-if is_service_enabled quantum; then
-    if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
-        # Install deps
-        # FIXME add to files/apts/quantum, but don't install if not needed!
-        install_package python-configobj
-    fi
-fi
-
 if is_service_enabled horizon; then
     if [[ "$os_PACKAGE" = "deb" ]]; then
         # Install apache2, which is NOPRIME'd
@@ -792,10 +777,11 @@
 # Check Out Source
 # ----------------
 
+install_keystoneclient
+
 git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
 
 # Check out the client libs that are used most
-git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
 git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
 git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH
@@ -803,7 +789,7 @@
 # glance, swift middleware and nova api needs keystone middleware
 if is_service_enabled key g-api n-api swift; then
     # unified auth system (manages accounts/tokens)
-    git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+    install_keystone
 fi
 if is_service_enabled swift; then
     # storage service
@@ -850,11 +836,11 @@
 
 # Set up our checkouts so they are installed into python path
 # allowing ``import nova`` or ``import glance.client``
-setup_develop $KEYSTONECLIENT_DIR
+configure_keystoneclient
 setup_develop $NOVACLIENT_DIR
 setup_develop $OPENSTACKCLIENT_DIR
 if is_service_enabled key g-api n-api swift; then
-    setup_develop $KEYSTONE_DIR
+    configure_keystone
 fi
 if is_service_enabled swift; then
     setup_develop $SWIFT_DIR
@@ -985,6 +971,36 @@
 screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
 
 
+# Keystone
+# --------
+
+if is_service_enabled key; then
+    configure_keystone
+    init_keystone
+    start_keystone
+    echo "Waiting for keystone to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
+      echo "keystone did not start"
+      exit 1
+    fi
+
+    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
+    SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+
+    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
+    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
+    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \
+        bash -x $FILES/keystone_data.sh
+
+    # Set up auth creds now that keystone is bootstrapped
+    export OS_AUTH_URL=$SERVICE_ENDPOINT
+    export OS_TENANT_NAME=admin
+    export OS_USERNAME=admin
+    export OS_PASSWORD=$ADMIN_PASSWORD
+fi
+
+
 # Horizon
 # -------
 
@@ -1137,6 +1153,72 @@
 # -------
 
 if is_service_enabled quantum; then
+    #
+    # Quantum Network Configuration
+    #
+    # The following variables control the Quantum openvswitch and
+    # linuxbridge plugins' allocation of tenant networks and
+    # availability of provider networks. If these are not configured
+    # in localrc, tenant networks will be local to the host (with no
+    # remote connectivity), and no physical resources will be
+    # available for the allocation of provider networks.
+
+    # To use GRE tunnels for tenant networks, set to True in
+    # localrc. GRE tunnels are only supported by the openvswitch
+    # plugin, and currently only on Ubuntu.
+    ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
+
+    # If using GRE tunnels for tenant networks, specify the range of
+    # tunnel IDs from which tenant networks are allocated. Can be
+    # overriden in localrc in necesssary.
+    TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
+
+    # To use VLANs for tenant networks, set to True in localrc. VLANs
+    # are supported by the openvswitch and linuxbridge plugins, each
+    # requiring additional configuration described below.
+    ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+
+    # If using VLANs for tenant networks, set in localrc to specify
+    # the range of VLAN VIDs from which tenant networks are
+    # allocated. An external network switch must be configured to
+    # trunk these VLANs between hosts for multi-host connectivity.
+    #
+    # Example: TENANT_VLAN_RANGE=1000:1999
+    TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
+
+    # If using VLANs for tenant networks, or if using flat or VLAN
+    # provider networks, set in localrc to the name of the physical
+    # network, and also configure OVS_PHYSICAL_BRIDGE for the
+    # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
+    # agent, as described below.
+    #
+    # Example: PHYSICAL_NETWORK=default
+    PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+
+    # With the openvswitch plugin, if using VLANs for tenant networks,
+    # or if using flat or VLAN provider networks, set in localrc to
+    # the name of the OVS bridge to use for the physical network. The
+    # bridge will be created if it does not already exist, but a
+    # physical interface must be manually added to the bridge as a
+    # port for external connectivity.
+    #
+    # Example: OVS_PHYSICAL_BRIDGE=br-eth1
+    OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+
+    # With the linuxbridge plugin, if using VLANs for tenant networks,
+    # or if using flat or VLAN provider networks, set in localrc to
+    # the name of the network interface to use for the physical
+    # network.
+    #
+    # Example: LB_PHYSICAL_INTERFACE=eth1
+    LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+
+    # With the openvswitch plugin, set to True in localrc to enable
+    # provider GRE tunnels when ENABLE_TENANT_TUNNELS is False.
+    #
+    # Example: OVS_ENABLE_TUNNELING=True
+    OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
     # Put config files in ``/etc/quantum`` for everyone to find
     if [[ ! -d /etc/quantum ]]; then
         sudo mkdir -p /etc/quantum
@@ -1163,23 +1245,7 @@
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
 
-    sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE
-
-    OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True}
-    if [[ "$Q_PLUGIN" = "openvswitch" && "$OVS_ENABLE_TUNNELING" = "True" ]]; then
-        OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
-        if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
-            echo "You are running OVS version $OVS_VERSION."
-            echo "OVS 1.4+ is required for tunneling between multiple hosts."
-            exit 1
-        fi
-        if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges ""
-        else
-            iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges default
-        fi
-        iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges 1:1000
-    fi
+    iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8
 
     Q_CONF_FILE=/etc/quantum/quantum.conf
     cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
@@ -1205,45 +1271,103 @@
     iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS
 
     iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY
-    iniset $Q_API_PASTE_FILE filter:authtoken auth_host $KEYSTONE_SERVICE_HOST
-    iniset $Q_API_PASTE_FILE filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $Q_API_PASTE_FILE filter:authtoken auth_protocol $KEYSTONE_SERVICE_PROTOCOL
-    iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME
-    iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD
+    quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken
+
+    # Configure plugin
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+        if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
+            iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
+        elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
+        else
+            echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
+        fi
+
+        # Override OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc
+        # for more complex physical network configurations.
+        if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+            OVS_VLAN_RANGES=$PHYSICAL_NETWORK
+            if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+                OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE
+            fi
+        fi
+        if [[ "$OVS_VLAN_RANGES" != "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
+        fi
+
+        # Enable tunnel networks if selected
+        if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+        fi
+    elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+        if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
+        else
+            echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
+        fi
+
+        # Override LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc
+        # for more complex physical network configurations.
+        if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+            LB_VLAN_RANGES=$PHYSICAL_NETWORK
+            if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+                LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE
+            fi
+        fi
+        if [[ "$LB_VLAN_RANGES" != "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
+        fi
+    fi
 fi
 
 # Quantum agent (for compute nodes)
 if is_service_enabled q-agt; then
+    # Configure agent for plugin
     if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
-        # Set up integration bridge
+        # Setup integration bridge
         OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-        for PORT in `sudo ovs-vsctl --no-wait list-ports $OVS_BRIDGE`; do
-            if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi
-            sudo ovs-vsctl --no-wait del-port $OVS_BRIDGE $PORT
-        done
-        sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
-        sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
-        sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-        if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then
+        quantum_setup_ovs_bridge $OVS_BRIDGE
+
+        # Setup agent for tunneling
+        if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+            # Verify tunnels are supported
+            # REVISIT - also check kernel module support for GRE and patch ports
+            OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
+            if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
+                echo "You are running OVS version $OVS_VERSION."
+                echo "OVS 1.4+ is required for tunneling between multiple hosts."
+                exit 1
+            fi
+            iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
             iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
-        else
-            # Need bridge if not tunneling
-            OVS_DEFAULT_BRIDGE=${OVS_DEFAULT_BRIDGE:-br-$GUEST_INTERFACE_DEFAULT}
         fi
-        if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings ""
-        else
+
+        # Setup physical network bridge mappings.  Override
+        # OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc for more
+        # complex physical network configurations.
+        if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+            OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
             # Configure bridge manually with physical interface as port for multi-node
-            sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_DEFAULT_BRIDGE
-            iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings default:$OVS_DEFAULT_BRIDGE
+            sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
         fi
+        if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
+        fi
+
         AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py"
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
-       # Start up the quantum <-> linuxbridge agent
-       # set the default network interface
-       QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-       iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings default:$QUANTUM_LB_PRIVATE_INTERFACE
+        # Setup physical network interface mappings.  Override
+        # LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc for more
+        # complex physical network configurations.
+        if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
+            LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
+        fi
+        if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
+        fi
+
        AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py"
     fi
 fi
@@ -1264,10 +1388,7 @@
 
     # Update database
     iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8"
-    iniset $Q_DHCP_CONF_FILE DEFAULT auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0"
-    iniset $Q_DHCP_CONF_FILE DEFAULT admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $Q_DHCP_CONF_FILE DEFAULT admin_user $Q_ADMIN_USERNAME
-    iniset $Q_DHCP_CONF_FILE DEFAULT admin_password $SERVICE_PASSWORD
+    quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
 
     if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
         iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
@@ -1276,6 +1397,45 @@
     fi
 fi
 
+# Quantum L3
+if is_service_enabled q-l3; then
+    AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
+    PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+    Q_L3_CONF_FILE=/etc/quantum/l3_agent.ini
+
+    cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
+
+    # Set verbose
+    iniset $Q_L3_CONF_FILE DEFAULT verbose True
+    # Set debug
+    iniset $Q_L3_CONF_FILE DEFAULT debug True
+
+    iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP
+    iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+    iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+
+    quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
+    if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
+        iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+        # Set up external bridge
+        # Create it if it does not exist
+        sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
+        sudo ovs-vsctl --no-wait br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE
+        # remove internal ports
+        for PORT in `sudo ovs-vsctl --no-wait list-ports $PUBLIC_BRIDGE`; do
+            TYPE=$(sudo ovs-vsctl get interface $PORT type)
+            if [[ "$TYPE" == "internal" ]]; then
+                echo `sudo ip link delete $PORT` > /dev/null
+                sudo ovs-vsctl --no-wait del-port $bridge $PORT
+            fi
+        done
+        # ensure no IP is configured on the public bridge
+        sudo ip addr flush dev $PUBLIC_BRIDGE
+    elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+        iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+    fi
+fi
+
 # Quantum RPC support - must be updated prior to starting any of the services
 if is_service_enabled quantum; then
     iniset $Q_CONF_FILE DEFAULT control_exchange quantum
@@ -1289,16 +1449,6 @@
     fi
 fi
 
-# Start the Quantum services
-screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
-
-# Start up the quantum agent
-screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
-
-# Start up the quantum agent
-screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
-
-
 # Nova
 # ----
 
@@ -1311,28 +1461,23 @@
 
 cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
 
-# If Nova ships the new rootwrap filters files, deploy them
-# (owned by root) and add a parameter to ``$NOVA_ROOTWRAP``
-ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP"
-if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then
-    # Wipe any existing rootwrap.d files first
-    if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
-        sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
-    fi
-    # Deploy filters to /etc/nova/rootwrap.d
-    sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
-    sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
-    sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
-    sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
-    # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
-    sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
-    sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
-    sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
-    sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
-    # Specify rootwrap.conf as first parameter to nova-rootwrap
-    NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf"
-    ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *"
+# Deploy new rootwrap filters files (owned by root).
+# Wipe any existing rootwrap.d files first
+if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
+    sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
 fi
+# Deploy filters to /etc/nova/rootwrap.d
+sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
+sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
+sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
+sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
+# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
+sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
+sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
+sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
+sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
+# Specify rootwrap.conf as first parameter to nova-rootwrap
+ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
 
 # Set up the rootwrap sudoers for nova
 TEMPFILE=`mktemp`
@@ -1794,7 +1939,7 @@
 add_nova_opt "verbose=True"
 add_nova_opt "auth_strategy=keystone"
 add_nova_opt "allow_resize_to_same_host=True"
-add_nova_opt "root_helper=sudo $NOVA_ROOTWRAP"
+add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf"
 add_nova_opt "compute_scheduler_driver=$SCHEDULER"
 add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF"
 add_nova_opt "fixed_range=$FIXED_RANGE"
@@ -1992,119 +2137,16 @@
     fi
 fi
 
-if is_service_enabled key; then
-    # (Re)create keystone database
-    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
-    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
-
-    KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
-    KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
-    KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
-
-    if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
-        sudo mkdir -p $KEYSTONE_CONF_DIR
-        sudo chown `whoami` $KEYSTONE_CONF_DIR
-    fi
-
-    if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
-        cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
-        cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
-    fi
-
-    # Rewrite stock ``keystone.conf``
-    iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
-    iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
-    iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
-    sed -e "
-        /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
-    " -i $KEYSTONE_CONF
-    # Append the S3 bits
-    iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-        # Configure ``keystone.conf`` to use sql
-        iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
-        inicomment $KEYSTONE_CONF catalog template_file
-    else
-        KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
-        cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
-
-        # Add swift endpoints to service catalog if swift is enabled
-        if is_service_enabled swift; then
-            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
-        fi
-
-        # Add quantum endpoints to service catalog if quantum is enabled
-        if is_service_enabled quantum; then
-            echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
-        fi
-
-        sudo sed -e "
-            s,%SERVICE_HOST%,$SERVICE_HOST,g;
-            s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
-        " -i $KEYSTONE_CATALOG
-
-        # Configure ``keystone.conf`` to use templates
-        iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
-        iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
-    fi
-
-    # Set up logging
-    LOGGING_ROOT="devel"
-    if [ "$SYSLOG" != "False" ]; then
-        LOGGING_ROOT="$LOGGING_ROOT,production"
-    fi
-    KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
-    cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
-    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
-    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
-
-    # Initialize keystone database
-    $KEYSTONE_DIR/bin/keystone-manage db_sync
-
-    # Set up certificates
-    $KEYSTONE_DIR/bin/keystone-manage pki_setup
-
-    # Launch keystone and wait for it to answer before continuing
-    screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
-    echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
-      echo "keystone did not start"
-      exit 1
-    fi
-
-    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
-    SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
-
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
-    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
-    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \
-        bash -x $FILES/keystone_data.sh
-
-    # Set up auth creds now that keystone is bootstrapped
-    export OS_AUTH_URL=$SERVICE_ENDPOINT
-    export OS_TENANT_NAME=admin
-    export OS_USERNAME=admin
-    export OS_PASSWORD=$ADMIN_PASSWORD
-
-    # Create an access key and secret key for nova ec2 register image
-    if is_service_enabled swift3 && is_service_enabled nova; then
-        NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
-        NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
-        CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
-        ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
-        SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
-        add_nova_opt "s3_access_key=$ACCESS_KEY"
-        add_nova_opt "s3_secret_key=$SECRET_KEY"
-        add_nova_opt "s3_affix_tenant=True"
-    fi
+# Create an access key and secret key for nova ec2 register image
+if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
+    NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+    NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
+    CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
+    ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
+    SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
+    add_nova_opt "s3_access_key=$ACCESS_KEY"
+    add_nova_opt "s3_secret_key=$SECRET_KEY"
+    add_nova_opt "s3_affix_tenant=True"
 fi
 
 screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver"
@@ -2120,16 +2162,44 @@
     fi
 fi
 
-# If we're using Quantum (i.e. q-svc is enabled), network creation has to
-# happen after we've started the Quantum service.
 if is_service_enabled q-svc; then
+    # Start the Quantum service
+    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+    echo "Waiting for Quantum to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then
+      echo "Quantum did not start"
+      exit 1
+    fi
+
+    # Configure Quantum elements
+    # Configure internal network & subnet
+
     TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
 
     # Create a small network
     # Since quantum command is executed in admin context at this point,
     # ``--tenant_id`` needs to be specified.
     NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
-    quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
+    SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+    if is_service_enabled q-l3; then
+        # Create a router, and add the private subnet as one of its interfaces
+        ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
+        quantum router-interface-add $ROUTER_ID $SUBNET_ID
+        # Create an external network, and a subnet. Configure the external network as router gw
+        EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2)
+        EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+        quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
+        if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+            CIDR_LEN=${FLOATING_RANGE#*/}
+            sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
+            sudo ip link set $PUBLIC_BRIDGE up
+        fi
+        if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
+            # Explicitly set router id in l3 agent configuration
+            iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
+        fi
+   fi
+
 elif is_service_enabled mysql && is_service_enabled nova; then
     # Create a small network
     $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
@@ -2141,6 +2211,11 @@
     $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
+# Start up the quantum agents if enabled
+screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
+screen_it q-l3 "sudo python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE"
+
 # The group **libvirtd** is added to the current user in this script.
 # Use 'sg' to execute nova-compute as a member of the **libvirtd** group.
 # ``screen_it`` checks ``is_service_enabled``, it is not needed here
diff --git a/tests/functions.sh b/tests/functions.sh
index f111a48..3a0f319 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -239,3 +239,44 @@
 test_disable_negated_services 'b,a,-a' 'b'
 test_disable_negated_services 'a,b,-a' 'b'
 test_disable_negated_services 'a,-a,b' 'b'
+
+
+echo "Testing is_package_installed()"
+
+if [[ -z "$os_PACKAGE" ]]; then
+    GetOSVersion
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+    is_package_installed dpkg
+    VAL=$?
+else
+    is_package_installed rpm
+    VAL=$?
+fi
+if [[ "$VAL" -eq 0 ]]; then
+    echo "OK"
+else
+    echo "is_package_installed() on existing package failed"
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+    is_package_installed dpkg bash
+    VAL=$?
+else
+    is_package_installed rpm bash
+    VAL=$?
+fi
+if [[ "$VAL" -eq 0 ]]; then
+    echo "OK"
+else
+    echo "is_package_installed() on more than one existing package failed"
+fi
+
+is_package_installed zzzZZZzzz
+VAL=$?
+if [[ "$VAL" -ne 0 ]]; then
+    echo "OK"
+else
+    echo "is_package_installed() on non-existing package failed"
+fi