Merge "Add Quantum NEC OpenFlow plugin support"
diff --git a/README.md b/README.md
index a738554..d8538c2 100644
--- a/README.md
+++ b/README.md
@@ -103,3 +103,51 @@
 If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
 
 Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
+
+# Quantum
+
+Basic Setup
+
+In order to enable Quantum a single node setup, you'll need the following settings in your `localrc` :
+
+    disable_service n-net
+    enable_service q-svc
+    enable_service q-agt
+    enable_service q-dhcp
+    enable_service q-l3
+    enable_service q-meta
+    enable_service quantum
+    # Optional, to enable tempest configuration as part of devstack
+    enable_service tempest
+
+Then run stack.sh as normal.
+
+If tempest has been successfully configured, a basic set of smoke tests can be run as follows:
+
+    $ cd /opt/stack/tempest
+    $ nosetests tempest/tests/network/test_network_basic_ops.py
+
+Multi-Node Setup
+
+A more interesting setup involves running multiple compute nodes, with Quantum networks connecting VMs on different compute nodes.
+You should run at least one "controller node", which should have a `stackrc` that includes at least:
+
+    disable_service n-net
+    enable_service q-svc
+    enable_service q-agt
+    enable_service q-dhcp
+    enable_service q-l3
+    enable_service q-meta
+    enable_service quantum
+
+You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts:
+
+    SCHEDULER=nova.scheduler.simple.SimpleScheduler
+
+You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node:
+
+    ENABLED_SERVICES=n-cpu,rabbit,g-api,quantum,q-agt
+    SERVICE_HOST=[IP of controller node]
+    MYSQL_HOST=$SERVICE_HOST
+    RABBIT_HOST=$SERVICE_HOST
+    Q_HOST=$SERVICE_HOST
diff --git a/files/apts/n-api b/files/apts/n-api
index ad943ff..0f08daa 100644
--- a/files/apts/n-api
+++ b/files/apts/n-api
@@ -1,2 +1 @@
-gcc  # temporary because this pulls in glance to get the client without running the glance prereqs
 python-dateutil
diff --git a/files/horizon_settings.py b/files/horizon_settings.py
deleted file mode 100644
index ce92e2c..0000000
--- a/files/horizon_settings.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-PROD = False
-USE_SSL = False
-
-# Set SSL proxy settings:
-# For Django 1.4+ pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
-# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
-
-# Specify a regular expression to validate user passwords.
-# HORIZON_CONFIG = {
-#     "password_validator": {
-#         "regex": '.*',
-#         "help_text": _("Your password does not meet the requirements.")
-#     },
-#    'help_url': "http://docs.openstack.org"
-# }
-
-LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
-
-# FIXME: We need to change this to mysql, instead of sqlite.
-DATABASES = {
-    'default': {
-        'ENGINE': 'django.db.backends.sqlite3',
-        'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
-        'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'),
-    },
-}
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizion generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
-# may be situations where you would want to set this explicitly, e.g. when
-# multiple dashboard instances are distributed on different machines (usually
-# behind a load-balancer). Either you have to make sure that a session gets all
-# requests routed to the same dashboard instance or you set the same SECRET_KEY
-# for all of them.
-from horizon.utils import secret_key
-SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store'))
-
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/'
-CACHE_BACKEND = 'dummy://'
-SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# django-mailer uses a different settings attribute
-MAILER_EMAIL_BACKEND = EMAIL_BACKEND
-
-# Configure these for your outgoing email host
-# EMAIL_HOST = 'smtp.my-company.com'
-# EMAIL_PORT = 25
-# EMAIL_HOST_USER = 'djangomail'
-# EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-# AVAILABLE_REGIONS = [
-#     ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-#     ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-# ]
-
-OPENSTACK_HOST = "127.0.0.1"
-OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-# OPENSTACK_SSL_NO_VERIFY = True
-
-HORIZON_CONFIG = {
-    'dashboards': ('project', 'admin', 'settings',),
-    'default_dashboard': 'project',
-}
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
-    'name': 'native',
-    'can_edit_user': True
-}
-
-OPENSTACK_HYPERVISOR_FEATURES = {
-    'can_set_mount_point': True
-}
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'internalURL'.
-#OPENSTACK_ENDPOINT_TYPE = "publicURL"
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-SWIFT_PAGINATE_LIMIT = 100
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-#LOGGING = {
-#        'version': 1,
-#        # When set to True this will disable all logging except
-#        # for loggers specified in this configuration dictionary. Note that
-#        # if nothing is specified here and disable_existing_loggers is True,
-#        # django.db.backends will still log unless it is disabled explicitly.
-#        'disable_existing_loggers': False,
-#        'handlers': {
-#            'null': {
-#                'level': 'DEBUG',
-#                'class': 'django.utils.log.NullHandler',
-#                },
-#            'console': {
-#                # Set the level to "DEBUG" for verbose output logging.
-#                'level': 'INFO',
-#                'class': 'logging.StreamHandler',
-#                },
-#            },
-#        'loggers': {
-#            # Logging from django.db.backends is VERY verbose, send to null
-#            # by default.
-#            'django.db.backends': {
-#                'handlers': ['null'],
-#                'propagate': False,
-#                },
-#            'horizon': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'openstack_dashboard': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'novaclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'keystoneclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'glanceclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'nose.plugins.manager': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            }
-#        }
-#}
diff --git a/functions b/functions
index fe50547..edc4bf9 100644
--- a/functions
+++ b/functions
@@ -781,9 +781,9 @@
     if is_ubuntu; then
         apt_get purge "$@"
     elif is_fedora; then
-        yum remove -y "$@"
+        sudo yum remove -y "$@"
     elif is_suse; then
-        rpm -e "$@"
+        sudo rpm -e "$@"
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1148,6 +1148,12 @@
             DISK_FORMAT=qcow2
             CONTAINER_FORMAT=bare
             ;;
+        *.iso)
+            IMAGE="$FILES/${IMAGE_FNAME}"
+            IMAGE_NAME=$(basename "$IMAGE" ".iso")
+            DISK_FORMAT=iso
+            CONTAINER_FORMAT=bare
+            ;;
         *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
     esac
 
@@ -1350,5 +1356,5 @@
 
 
 # Local variables:
-# -*- mode: Shell-script -*-
+# mode: shell-script
 # End:
diff --git a/lib/baremetal b/lib/baremetal
index 5326dd1..24cce9f 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -434,3 +434,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/ceilometer b/lib/ceilometer
index d90694c..58cafd1 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -138,3 +138,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder b/lib/cinder
index b3e1904..deace68 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -53,6 +53,11 @@
 # Support for multi lvm backend configuration (default is no support)
 CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
 
+# Should cinder perform secure deletion of volumes?
+# Defaults to true, can be set to False to avoid this bug when testing:
+# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
+CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+
 # Name of the lvm volume groups to use/create for iscsi volumes
 # VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
@@ -118,9 +123,6 @@
 
 # configure_cinder() - Set config files, create data dirs, etc
 function configure_cinder() {
-    setup_develop $CINDER_DIR
-    setup_develop $CINDERCLIENT_DIR
-
     if [[ ! -d $CINDER_CONF_DIR ]]; then
         sudo mkdir -p $CINDER_CONF_DIR
     fi
@@ -362,7 +364,13 @@
 # install_cinder() - Collect source and prepare
 function install_cinder() {
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
+    setup_develop $CINDER_DIR
+}
+
+# install_cinderclient() - Collect source and prepare
+function install_cinderclient() {
     git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
+    setup_develop $CINDERCLIENT_DIR
 }
 
 # apply config.d approach (e.g. Oneiric does not have this)
@@ -426,3 +434,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/database b/lib/database
index 79b77a2..cbe886f 100644
--- a/lib/database
+++ b/lib/database
@@ -114,3 +114,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0633ab0..30450b1 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -139,3 +139,7 @@
 
 # Restore xtrace
 $MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index efc206f..b64de2c 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -90,3 +90,7 @@
 
 # Restore xtrace
 $PG_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/glance b/lib/glance
index edf6982..3376400 100644
--- a/lib/glance
+++ b/lib/glance
@@ -62,15 +62,8 @@
     sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
 }
 
-# configure_glanceclient() - Set config files, create data dirs, etc
-function configure_glanceclient() {
-    setup_develop $GLANCECLIENT_DIR
-}
-
 # configure_glance() - Set config files, create data dirs, etc
 function configure_glance() {
-    setup_develop $GLANCE_DIR
-
     if [[ ! -d $GLANCE_CONF_DIR ]]; then
         sudo mkdir -p $GLANCE_CONF_DIR
     fi
@@ -116,6 +109,15 @@
     iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
 
+    # Store the images in swift if enabled.
+    if is_service_enabled s-proxy; then
+        iniset $GLANCE_API_CONF DEFAULT default_store swift
+        iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
+        iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
+        iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
+        iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
+    fi
+
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
 
     cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
@@ -171,11 +173,13 @@
 # install_glanceclient() - Collect source and prepare
 function install_glanceclient() {
     git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH
+    setup_develop $GLANCECLIENT_DIR
 }
 
 # install_glance() - Collect source and prepare
 function install_glance() {
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
+    setup_develop $GLANCE_DIR
 }
 
 # start_glance() - Start running processes, including screen
@@ -197,3 +201,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/heat b/lib/heat
index 56d6f39..88535c3 100644
--- a/lib/heat
+++ b/lib/heat
@@ -184,3 +184,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/horizon b/lib/horizon
index 9c96b58..b63e1f8 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -29,6 +29,10 @@
 # Set up default directories
 HORIZON_DIR=$DEST/horizon
 
+# local_settings.py is used to customize Dashboard settings.
+# The example file in Horizon repo is used by default.
+HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example}
+
 # Allow overriding the default Apache user and group, default to
 # current user and his default group.
 APACHE_USER=${APACHE_USER:-$USER}
@@ -77,7 +81,7 @@
 
     # ``local_settings.py`` is used to override horizon default settings.
     local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    cp $FILES/horizon_settings.py $local_settings
+    cp $HORIZON_SETTINGS $local_settings
 
     # enable loadbalancer dashboard in case service is enabled
     if is_service_enabled q-lbaas; then
@@ -174,3 +178,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/keystone b/lib/keystone
index 17e0866..0fbc7d7 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -59,6 +59,9 @@
 KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
 KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 
+# Set the tenant for service accounts in Keystone
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
+
 
 # Entry Points
 # ------------
@@ -72,15 +75,8 @@
     :
 }
 
-# configure_keystoneclient() - Set config files, create data dirs, etc
-function configure_keystoneclient() {
-    setup_develop $KEYSTONECLIENT_DIR
-}
-
 # configure_keystone() - Set config files, create data dirs, etc
 function configure_keystone() {
-    setup_develop $KEYSTONE_DIR
-
     if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
         sudo mkdir -p $KEYSTONE_CONF_DIR
     fi
@@ -302,6 +298,7 @@
 # install_keystoneclient() - Collect source and prepare
 function install_keystoneclient() {
     git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
+    setup_develop $KEYSTONECLIENT_DIR
 }
 
 # install_keystone() - Collect source and prepare
@@ -311,6 +308,7 @@
         install_ldap
     fi
     git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+    setup_develop $KEYSTONE_DIR
 }
 
 # start_keystone() - Start running processes, including screen
@@ -343,3 +341,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/ldap b/lib/ldap
index 0a0d197..53f6837 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -37,6 +37,12 @@
     #update ldap olcdb
     sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE
 
+    # On fedora we need to manually add cosine and inetorgperson schemas
+    if is_fedora; then
+        sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
+        sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
+    fi
+
     # add our top level ldap nodes
     if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then
         printf "LDAP already configured for OpenStack\n"
@@ -70,3 +76,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/nova b/lib/nova
index f0c8315..8d045b5 100644
--- a/lib/nova
+++ b/lib/nova
@@ -65,6 +65,9 @@
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
+NOVNC_DIR=$DEST/noVNC
+SPICE_DIR=$DEST/spice-html5
+
 
 # Nova Network Configuration
 # --------------------------
@@ -106,7 +109,7 @@
 # If you are running on a single node and don't need to access the VMs from
 # devices other than that node, you can set ``FLAT_INTERFACE=``
 # This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
+FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
 
 # ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
 # allows network operations and routing for a VM to occur on the server that is
@@ -153,8 +156,11 @@
         fi
 
         # Logout and delete iscsi sessions
-        sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true
-        sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true
+        tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+        for target in $tgts; do
+            sudo iscsiadm --mode node -T $target --logout || true
+        done
+        sudo iscsiadm --mode node --op delete || true
 
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
@@ -163,11 +169,6 @@
     sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
 }
 
-# configure_novaclient() - Set config files, create data dirs, etc
-function configure_novaclient() {
-    setup_develop $NOVACLIENT_DIR
-}
-
 # configure_nova_rootwrap() - configure Nova's rootwrap
 function configure_nova_rootwrap() {
     # Deploy new rootwrap filters files (owned by root).
@@ -198,8 +199,6 @@
 
 # configure_nova() - Set config files, create data dirs, etc
 function configure_nova() {
-    setup_develop $NOVA_DIR
-
     # Put config files in ``/etc/nova`` for everyone to find
     if [[ ! -d $NOVA_CONF_DIR ]]; then
         sudo mkdir -p $NOVA_CONF_DIR
@@ -221,9 +220,9 @@
         # Get the sample configuration file in place
         cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
 
-        iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST
+        iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
         if is_service_enabled tls-proxy; then
-            iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL
+            iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
         fi
         iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
         iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova
@@ -352,7 +351,8 @@
         # ----------------
 
         # Nova stores each instance in its own directory.
-        mkdir -p $NOVA_INSTANCES_PATH
+        sudo mkdir -p $NOVA_INSTANCES_PATH
+        sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
 
         # You can specify a different disk to be mounted and used for backing the
         # virtual machines.  If there is a partition labeled nova-instances we
@@ -590,6 +590,7 @@
 # install_novaclient() - Collect source and prepare
 function install_novaclient() {
     git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
+    setup_develop $NOVACLIENT_DIR
 }
 
 # install_nova() - Collect source and prepare
@@ -620,6 +621,7 @@
     fi
 
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
+    setup_develop $NOVA_DIR
 }
 
 # start_nova_api() - Start the API process ahead of other things
@@ -656,6 +658,11 @@
     screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
     screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
     screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth"
+
+    # Starting the nova-objectstore only if swift3 service is not enabled.
+    # Swift will act as s3 objectstore.
+    is_service_enabled swift3 || \
+        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
 }
 
 # stop_nova() - Stop running processes (non-screen)
@@ -668,3 +675,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/quantum b/lib/quantum
index efdd43d..9ad1538 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -179,7 +179,7 @@
 # Agent loadbalancer service plugin functions
 # -------------------------------------------
 # Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer
 
 # Use security group or not
 if has_quantum_plugin_security_group; then
@@ -761,3 +761,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/quantum_plugins/agent_loadbalancer b/lib/quantum_plugins/services/agent_loadbalancer
similarity index 60%
rename from lib/quantum_plugins/agent_loadbalancer
rename to lib/quantum_plugins/services/agent_loadbalancer
index 87e7aaa..b6528b0 100644
--- a/lib/quantum_plugins/agent_loadbalancer
+++ b/lib/quantum_plugins/services/agent_loadbalancer
@@ -7,6 +7,7 @@
 
 
 AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent"
+AGENT_LBAAS_PLUGIN=quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin
 
 function quantum_agent_lbaas_install_agent_packages() {
     if is_ubuntu || is_fedora; then
@@ -19,9 +20,9 @@
 
 function quantum_agent_lbaas_configure_common() {
     if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
-        Q_SERVICE_PLUGIN_CLASSES="quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+        Q_SERVICE_PLUGIN_CLASSES=$AGENT_LBAAS_PLUGIN
     else
-        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$AGENT_LBAAS_PLUGIN"
     fi
 }
 
@@ -31,13 +32,9 @@
 
     LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
 
-    cp $QUANTUM_DIR/etc/lbaas_agent.ini /$LBAAS_AGENT_CONF_FILENAME
+    cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
 
-    if [[ $Q_PLUGIN == 'linuxbridge' || $Q_PLUGIN == 'brocade' ]]; then
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.BridgeInterfaceDriver"
-    else
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.OVSInterfaceDriver"
-    fi
+    quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
 
     if is_fedora; then
         iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index bbd51f0..d08cb01 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -159,3 +159,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/swift b/lib/swift
index 2c87d21..eb57477 100644
--- a/lib/swift
+++ b/lib/swift
@@ -28,6 +28,7 @@
 SWIFT_DIR=$DEST/swift
 SWIFTCLIENT_DIR=$DEST/python-swiftclient
 SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
+SWIFT3_DIR=$DEST/swift3
 
 # TODO: add logging to different location.
 
@@ -40,6 +41,12 @@
 # TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
 SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
 
+if is_service_enabled s-proxy && is_service_enabled swift3; then
+    # If we are using swift3, we can default the s3 port to swift instead
+    # of nova-objectstore
+    S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+fi
+
 # DevStack will create a loop-back disk formatted as XFS to store the
 # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
 # kilobytes.
@@ -72,9 +79,9 @@
 # Port bases used in port number calclution for the service "nodes"
 # The specified port number will be used, the additinal ports calculated by
 # base_port + node_num * 10
-OBJECT_PORT_BASE=6013
-CONTAINER_PORT_BASE=6011
-ACCOUNT_PORT_BASE=6012
+OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013}
+CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
+ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
 
 
 # Entry Points
@@ -99,59 +106,11 @@
     local swift_node_config
     local swift_log_dir
 
-    setup_develop $SWIFT_DIR
-
     # Make sure to kill all swift processes first
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
-    # First do a bit of setup by creating the directories and
-    # changing the permissions so we can run it as our user.
-
-    USER_GROUP=$(id -g)
-    sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
-    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
-
-    # Create a loopback disk and format it to XFS.
-    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
-        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
-            sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img
-        fi
-    fi
-
-    mkdir -p  ${SWIFT_DATA_DIR}/drives/images
-    sudo touch  ${SWIFT_DATA_DIR}/drives/images/swift.img
-    sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-    dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
-        bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
-
-    # Make a fresh XFS filesystem
-    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-    # Mount the disk with mount options to make it as efficient as possible
-    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
-    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
-            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
-    fi
-
-    # Create a link to the above mount and
-    # create all of the directories needed to emulate a few different servers
-    for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
-        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
-        node=${SWIFT_DATA_DIR}/${node_number}/node
-        node_device=${node}/sdb1
-        [[ -d $node ]] && continue
-        [[ -d $drive ]] && continue
-        sudo install -o ${USER} -g $USER_GROUP -d $drive
-        sudo install -o ${USER} -g $USER_GROUP -d $node_device
-        sudo chown -R $USER: ${node}
-    done
-
-   sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
-   sudo chown -R $USER: ${SWIFT_CONF_DIR}
+    sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
+    sudo chown -R $USER: ${SWIFT_CONF_DIR}
 
     if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
         # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
@@ -320,9 +279,55 @@
         tee /etc/rsyslog.d/10-swift.conf
 }
 
-# configure_swiftclient() - Set config files, create data dirs, etc
-function configure_swiftclient() {
-    setup_develop $SWIFTCLIENT_DIR
+# create_swift_disk - Create Swift backing disk
+function create_swift_disk() {
+    local node_number
+
+    # First do a bit of setup by creating the directories and
+    # changing the permissions so we can run it as our user.
+
+    USER_GROUP=$(id -g)
+    sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
+
+    # Create a loopback disk and format it to XFS.
+    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
+            sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img
+        fi
+    fi
+
+    mkdir -p ${SWIFT_DATA_DIR}/drives/images
+    sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img
+    sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+    dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
+        bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
+
+    # Make a fresh XFS filesystem
+    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+    # Mount the disk with mount options to make it as efficient as possible
+    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
+    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
+    fi
+
+    # Create a link to the above mount and
+    # create all of the directories needed to emulate a few different servers
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
+        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
+        node=${SWIFT_DATA_DIR}/${node_number}/node
+        node_device=${node}/sdb1
+        [[ -d $node ]] && continue
+        [[ -d $drive ]] && continue
+        sudo install -o ${USER} -g $USER_GROUP -d $drive
+        sudo install -o ${USER} -g $USER_GROUP -d $node_device
+        sudo chown -R $USER: ${node}
+    done
 }
 
 # init_swift() - Initialize rings
@@ -331,6 +336,9 @@
     # Make sure to kill all swift processes first
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
+    # Forcibly re-create the backing filesystem
+    create_swift_disk
+
     # This is where we create three different rings for swift with
     # different object servers binding on different ports.
     pushd ${SWIFT_CONF_DIR} >/dev/null && {
@@ -359,13 +367,14 @@
 
 function install_swift() {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
+    setup_develop $SWIFT_DIR
 }
 
 function install_swiftclient() {
     git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
+    setup_develop $SWIFTCLIENT_DIR
 }
 
-
 # start_swift() - Start running processes, including screen
 function start_swift() {
     # (re)start rsyslog
@@ -414,3 +423,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/tempest b/lib/tempest
index 85e643e..c1dc3a3 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -294,3 +294,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/template b/lib/template
index 02de5ce..72904fe 100644
--- a/lib/template
+++ b/lib/template
@@ -78,3 +78,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/tls b/lib/tls
index 202edef..fb8f4b9 100644
--- a/lib/tls
+++ b/lib/tls
@@ -316,3 +316,7 @@
 
     stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
 }
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/openrc b/openrc
index 3ef44fd..8af2854 100644
--- a/openrc
+++ b/openrc
@@ -27,8 +27,8 @@
 source $RC_DIR/stackrc
 
 # Load the last env variables if available
-if [[ -r $TOP_DIR/.stackenv ]]; then
-    source $TOP_DIR/.stackenv
+if [[ -r $RC_DIR/.stackenv ]]; then
+    source $RC_DIR/.stackenv
 fi
 
 # Get some necessary configuration
diff --git a/stack.sh b/stack.sh
index 9a87a5f..1010b4e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -269,14 +269,12 @@
 # Set the destination directories for OpenStack projects
 HORIZON_DIR=$DEST/horizon
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-NOVNC_DIR=$DEST/noVNC
-SPICE_DIR=$DEST/spice-html5
-SWIFT3_DIR=$DEST/swift3
 
-# Should cinder perform secure deletion of volumes?
-# Defaults to true, can be set to False to avoid this bug when testing:
-# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
-CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+
+# Interactive Configuration
+# -------------------------
+
+# Do all interactive config up front before the logging spew begins
 
 # Generic helper to configure passwords
 function read_password {
@@ -322,7 +320,6 @@
 
 
 # Database Configuration
-# ----------------------
 
 # To select between database backends, add the following to ``localrc``:
 #
@@ -335,8 +332,7 @@
 initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
 
 
-# RabbitMQ or Qpid
-# --------------------------
+# Queue Configuration
 
 # Rabbit connection info
 if is_service_enabled rabbit; then
@@ -344,53 +340,45 @@
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 fi
 
-if is_service_enabled s-proxy; then
-    # If we are using swift3, we can default the s3 port to swift instead
-    # of nova-objectstore
-    if is_service_enabled swift3;then
-        S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+
+# Keystone
+
+if is_service_enabled key; then
+    # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
+    # just a string and is not a 'real' Keystone token.
+    read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+    # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
+    read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
+    # Horizon currently truncates usernames and passwords at 20 characters
+    read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
+
+    # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
+    # service in ``localrc`` (e.g. ``enable_service ldap``).
+    # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
+    # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``.  To enable the
+    # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
+    # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
+    # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
+
+    # only request ldap password if the service is enabled
+    if is_service_enabled ldap; then
+        read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
     fi
+fi
+
+
+# Swift
+
+if is_service_enabled s-proxy; then
     # We only ask for Swift Hash if we have enabled swift service.
     # ``SWIFT_HASH`` is a random unique string for a swift cluster that
     # can never change.
     read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
 fi
 
-# Set default port for nova-objectstore
-S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
 
-
-# Keystone
-# --------
-
-# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
-# just a string and is not a 'real' Keystone token.
-read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
-# Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
-read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
-# Horizon currently truncates usernames and passwords at 20 characters
-read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
-# Keystone can now optionally install OpenLDAP by adding ldap to the list
-# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap).
-# If OpenLDAP has already been installed but you need to clear out
-# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes
-# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file.  To enable the
-# Keystone Identity Driver (keystone.identity.backends.ldap.Identity)
-# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap)
-# in the localrc file.
-
-
-# only request ldap password if the service is enabled
-if is_service_enabled ldap; then
-    read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
-fi
-
-# Set the tenant for service accounts in Keystone
-SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
-
-# Log files
-# ---------
+# Configure logging
+# -----------------
 
 # Draw a spinner so the user knows something is happening
 function spinner() {
@@ -576,9 +564,11 @@
 # Grab clients first
 install_keystoneclient
 install_glanceclient
+install_cinderclient
 install_novaclient
 # Check out the client libs that are used most
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
+setup_develop $OPENSTACKCLIENT_DIR
 
 # glance, swift middleware and nova api needs keystone middleware
 if is_service_enabled key g-api n-api s-proxy; then
@@ -638,45 +628,43 @@
 
 echo_summary "Configuring OpenStack projects"
 
-# Set up our checkouts so they are installed into python path
-# allowing ``import nova`` or ``import glance.client``
-configure_keystoneclient
-configure_novaclient
-setup_develop $OPENSTACKCLIENT_DIR
+# Set up our checkouts so they are installed in the python path
+
 if is_service_enabled key g-api n-api s-proxy; then
     configure_keystone
 fi
+
 if is_service_enabled s-proxy; then
     configure_swift
-    configure_swiftclient
     if is_service_enabled swift3; then
         setup_develop $SWIFT3_DIR
     fi
 fi
+
 if is_service_enabled g-api n-api; then
     configure_glance
 fi
 
-# Do this _after_ glance is installed to override the old binary
-# TODO(dtroyer): figure out when this is no longer necessary
-configure_glanceclient
-
 if is_service_enabled nova; then
     # First clean up old instances
     cleanup_nova
     configure_nova
 fi
+
 if is_service_enabled horizon; then
     configure_horizon
 fi
+
 if is_service_enabled quantum; then
     setup_quantumclient
     setup_quantum
 fi
+
 if is_service_enabled heat; then
     configure_heat
     configure_heatclient
 fi
+
 if is_service_enabled cinder; then
     configure_cinder
 fi
@@ -698,6 +686,7 @@
     # don't be naive and add to existing line!
 fi
 
+
 # Syslog
 # ------
 
@@ -831,17 +820,7 @@
 
 if is_service_enabled g-reg; then
     echo_summary "Configuring Glance"
-
     init_glance
-
-    # Store the images in swift if enabled.
-    if is_service_enabled s-proxy; then
-        iniset $GLANCE_API_CONF DEFAULT default_store swift
-        iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
-        iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
-        iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
-        iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
-    fi
 fi
 
 
@@ -992,6 +971,7 @@
     fi
 fi
 
+
 # Launch Services
 # ===============
 
@@ -1021,7 +1001,10 @@
     iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
 fi
 
-screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
+if is_service_enabled zeromq; then
+    echo_summary "Starting zermomq receiver"
+    screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
+fi
 
 # Launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
@@ -1066,12 +1049,6 @@
     start_ceilometer
 fi
 
-# Starting the nova-objectstore only if swift3 service is not enabled.
-# Swift will act as s3 objectstore.
-is_service_enabled swift3 || \
-    screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
-
-
 # Configure and launch heat engine, api and metadata
 if is_service_enabled heat; then
     # Initialize heat, including replacing nova flavors
@@ -1081,6 +1058,7 @@
     start_heat
 fi
 
+
 # Create account rc files
 # =======================
 
@@ -1191,6 +1169,7 @@
 # Check the status of running services
 service_check
 
+
 # Fin
 # ===
 
diff --git a/stackrc b/stackrc
index 19674ed..5a4c580 100644
--- a/stackrc
+++ b/stackrc
@@ -20,7 +20,18 @@
 # screen tabs. To change the default list, use the ``enable_service`` and
 # ``disable_service`` functions in ``localrc``.
 # For example, to enable Swift add this to ``localrc``:
-# enable_service swift
+#  enable_service swift
+# In order to enable Quantum (a single node setup) add the following
+# settings in `` localrc``:
+#  disable_service n-net
+#  enable_service q-svc
+#  enable_service q-agt
+#  enable_service q-dhcp
+#  enable_service q-l3
+#  enable_service q-meta
+#  enable_service quantum
+#  # Optional, to enable tempest configuration as part of devstack
+#  enable_service tempest
 ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
 
 # Set the default Nova APIs to enable
@@ -201,8 +212,16 @@
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
+# Set default port for nova-objectstore
+S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
+
+# Common network names
 PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
 
 # Compatibility until it's eradicated from CI
 USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/tools/xen/functions b/tools/xen/functions
new file mode 100644
index 0000000..5b4a661
--- /dev/null
+++ b/tools/xen/functions
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+function xapi_plugin_location {
+    for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"
+    do
+        if [ -d $PLUGIN_DIR ]
+        then
+            echo $PLUGIN_DIR
+            return 0
+        fi
+    done
+    return 1
+}
+
+function zip_snapshot_location {
+    echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g"
+}
+
+function create_directory_for_kernels {
+    mkdir -p "/boot/guest"
+}
+
+function extract_remote_zipball {
+    local ZIPBALL_URL=$1
+
+    local LOCAL_ZIPBALL=$(mktemp)
+    local EXTRACTED_FILES=$(mktemp -d)
+
+    (
+        wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate
+        unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES
+        rm -f $LOCAL_ZIPBALL
+    ) >&2
+
+    echo "$EXTRACTED_FILES"
+}
+
+function find_xapi_plugins_dir {
+    find $1 -path '*/xapi.d/plugins' -type d -print
+}
+
+function install_xapi_plugins_from_zipball {
+    local XAPI_PLUGIN_DIR
+    local EXTRACTED_FILES
+    local EXTRACTED_PLUGINS_DIR
+
+    XAPI_PLUGIN_DIR=$(xapi_plugin_location)
+
+    EXTRACTED_FILES=$(extract_remote_zipball $1)
+    EXTRACTED_PLUGINS_DIR=$(find_xapi_plugins_dir $EXTRACTED_FILES)
+
+    cp -pr $EXTRACTED_PLUGINS_DIR/* $XAPI_PLUGIN_DIR
+    rm -rf $EXTRACTED_FILES
+    chmod a+x ${XAPI_PLUGIN_DIR}*
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 0c0e1e2..7c3b839 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -28,6 +28,9 @@
 # Include onexit commands
 . $THIS_DIR/scripts/on_exit.sh
 
+# xapi functions
+. $THIS_DIR/functions
+
 
 #
 # Get Settings
@@ -43,48 +46,26 @@
   xe "$cmd" --minimal "$@"
 }
 
-
 #
 # Prepare Dom0
 # including installing XenAPI plugins
 #
 
 cd $THIS_DIR
-if [ -f ./master ]
-then
-    rm -rf ./master
-    rm -rf ./nova
-fi
 
-# get nova
-NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")}
-wget -nv $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate
-unzip -q -o nova-zipball  -d ./nova
+# Install plugins
 
-# install xapi plugins
-XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/
-if [ ! -d $XAPI_PLUGIN_DIR ]; then
-    # the following is needed when using xcp-xapi
-    XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/
-fi
-cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR
+## Nova plugins
+NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)}
+install_xapi_plugins_from_zipball $NOVA_ZIPBALL_URL
 
-# Install the netwrap xapi plugin to support agent control of dom0 networking
+## Install the netwrap xapi plugin to support agent control of dom0 networking
 if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then
-    if [ -f ./quantum ]; then
-        rm -rf ./quantum
-    fi
-    # get quantum
-    QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")}
-    wget -nv $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate
-    unzip -q -o quantum-zipball  -d ./quantum
-    cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR
+    QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(zip_snapshot_location $QUANTUM_REPO $QUANTUM_BRANCH)}
+    install_xapi_plugins_from_zipball $QUANTUM_ZIPBALL_URL
 fi
 
-chmod a+x ${XAPI_PLUGIN_DIR}*
-
-mkdir -p /boot/guest
-
+create_directory_for_kernels
 
 #
 # Configure Networking
diff --git a/tools/xen/mocks b/tools/xen/mocks
new file mode 100644
index 0000000..b006558
--- /dev/null
+++ b/tools/xen/mocks
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+test ! -e "$LIST_OF_ACTIONS" && {
+    echo "Mocking is not set up properly."
+    echo "LIST_OF_ACTIONS should point to an existing file."
+    exit 1
+}
+
+test ! -e "$LIST_OF_DIRECTORIES" && {
+    echo "Mocking is not set up properly."
+    echo "LIST_OF_DIRECTORIES should point to an existing file."
+    exit 1
+}
+
+function mktemp {
+    if test "${1:-}" = "-d";
+    then
+        echo "tempdir"
+    else
+        echo "tempfile"
+    fi
+}
+
+function wget {
+    echo "wget $@" >> $LIST_OF_ACTIONS
+}
+
+function mkdir {
+    if test "${1:-}" = "-p";
+    then
+        echo "$2" >> $LIST_OF_DIRECTORIES
+    fi
+}
+
+function unzip {
+    echo "Random rubbish from unzip"
+    echo "unzip $@" >> $LIST_OF_ACTIONS
+}
+
+function rm {
+    echo "rm $@" >> $LIST_OF_ACTIONS
+}
+
+function [ {
+    if test "${1:-}" = "-d";
+    then
+        echo "[ $@" >> $LIST_OF_ACTIONS
+        for directory in $(cat $LIST_OF_DIRECTORIES)
+        do
+            if test "$directory" = "$2"
+            then
+                return 0
+            fi
+        done
+        return 1
+    fi
+    echo "Mock test does not implement the requested function"
+    exit 1
+}
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
new file mode 100755
index 0000000..6817ec3
--- /dev/null
+++ b/tools/xen/test_functions.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+
+# Tests for functions.
+#
+# The tests are sourcing the mocks file to mock out various functions. The
+# mocking-out always happens in a sub-shell, thus it does not have impact on
+# the functions defined here.
+
+# To run the tests, please run:
+#
+# ./test_functions.sh run_tests
+#
+# To only print out the discovered test functions, run:
+#
+# ./test_functions.sh
+
+. functions
+
+# Setup
+function before_each_test {
+    LIST_OF_DIRECTORIES=$(mktemp)
+    truncate -s 0 $LIST_OF_DIRECTORIES
+
+    LIST_OF_ACTIONS=$(mktemp)
+    truncate -s 0 $LIST_OF_ACTIONS
+}
+
+# Teardown
+function after_each_test {
+    rm -f $LIST_OF_DIRECTORIES
+    rm -f $LIST_OF_ACTIONS
+}
+
+# Helpers
+function given_directory_exists {
+    echo "$1" >> $LIST_OF_DIRECTORIES
+}
+
+function assert_directory_exists {
+    grep "$1" $LIST_OF_DIRECTORIES
+}
+
+function assert_previous_command_failed {
+    [ "$?" != "0" ] || exit 1
+}
+
+# Tests
+function test_plugin_directory_on_xenserver {
+    given_directory_exists "/etc/xapi.d/plugins/"
+
+    PLUGDIR=$(. mocks && xapi_plugin_location)
+
+    [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ]
+}
+
+function test_plugin_directory_on_xcp {
+    given_directory_exists "/usr/lib/xcp/plugins/"
+
+    PLUGDIR=$(. mocks && xapi_plugin_location)
+
+    [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ]
+}
+
+function test_no_plugin_directory_found {
+    set +e
+
+    local IGNORE
+    IGNORE=$(. mocks && xapi_plugin_location)
+
+    assert_previous_command_failed
+
+    grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS
+    grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
+}
+
+function test_zip_snapshot_location {
+    diff \
+    <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \
+    <(echo "https://github.com/openstack/nova/zipball/master")
+}
+
+function test_create_directory_for_kernels {
+    (. mocks && create_directory_for_kernels)
+
+    assert_directory_exists "/boot/guest"
+}
+
+function test_extract_remote_zipball {
+    local RESULT=$(. mocks && extract_remote_zipball "someurl")
+
+    diff <(cat $LIST_OF_ACTIONS) - << EOF
+wget -nv someurl -O tempfile --no-check-certificate
+unzip -q -o tempfile -d tempdir
+rm -f tempfile
+EOF
+
+    [ "$RESULT" = "tempdir" ]
+}
+
+function test_find_nova_plugins {
+    local tmpdir=$(mktemp -d)
+
+    mkdir -p "$tmpdir/blah/blah/u/xapi.d/plugins"
+
+    [ "$tmpdir/blah/blah/u/xapi.d/plugins" = $(find_xapi_plugins_dir $tmpdir) ]
+
+    rm -rf $tmpdir
+}
+
+# Test runner
+[ "$1" = "" ] && {
+    grep -e "^function *test_" $0 | cut -d" " -f2
+}
+
+[ "$1" = "run_tests" ] && {
+    for testname in $($0)
+    do
+        echo "$testname"
+        before_each_test
+        (
+            set -eux
+            $testname
+        )
+        if [ "$?" != "0" ]
+        then
+            echo "FAIL"
+            exit 1
+        else
+            echo "PASS"
+        fi
+
+        after_each_test
+    done
+}