Merge "Change default PUBLIC_NETWORK_NAME/DEFAULT_FLOATING_POOL name"
diff --git a/files/apts/n-api b/files/apts/n-api
index ad943ff..0f08daa 100644
--- a/files/apts/n-api
+++ b/files/apts/n-api
@@ -1,2 +1 @@
-gcc  # temporary because this pulls in glance to get the client without running the glance prereqs
 python-dateutil
diff --git a/files/apts/trema b/files/apts/trema
new file mode 100644
index 0000000..e33ccd3
--- /dev/null
+++ b/files/apts/trema
@@ -0,0 +1,15 @@
+# Trema
+gcc
+make
+ruby1.8
+rubygems1.8
+ruby1.8-dev
+libpcap-dev
+libsqlite3-dev
+
+# Sliceable Switch
+sqlite3
+libdbi-perl
+libdbd-sqlite3-perl
+apache2
+libjson-perl
diff --git a/files/horizon_settings.py b/files/horizon_settings.py
deleted file mode 100644
index ce92e2c..0000000
--- a/files/horizon_settings.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-PROD = False
-USE_SSL = False
-
-# Set SSL proxy settings:
-# For Django 1.4+ pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
-# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
-
-# Specify a regular expression to validate user passwords.
-# HORIZON_CONFIG = {
-#     "password_validator": {
-#         "regex": '.*',
-#         "help_text": _("Your password does not meet the requirements.")
-#     },
-#    'help_url': "http://docs.openstack.org"
-# }
-
-LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
-
-# FIXME: We need to change this to mysql, instead of sqlite.
-DATABASES = {
-    'default': {
-        'ENGINE': 'django.db.backends.sqlite3',
-        'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
-        'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'),
-    },
-}
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizion generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
-# may be situations where you would want to set this explicitly, e.g. when
-# multiple dashboard instances are distributed on different machines (usually
-# behind a load-balancer). Either you have to make sure that a session gets all
-# requests routed to the same dashboard instance or you set the same SECRET_KEY
-# for all of them.
-from horizon.utils import secret_key
-SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store'))
-
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/'
-CACHE_BACKEND = 'dummy://'
-SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# django-mailer uses a different settings attribute
-MAILER_EMAIL_BACKEND = EMAIL_BACKEND
-
-# Configure these for your outgoing email host
-# EMAIL_HOST = 'smtp.my-company.com'
-# EMAIL_PORT = 25
-# EMAIL_HOST_USER = 'djangomail'
-# EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-# AVAILABLE_REGIONS = [
-#     ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-#     ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-# ]
-
-OPENSTACK_HOST = "127.0.0.1"
-OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-# OPENSTACK_SSL_NO_VERIFY = True
-
-HORIZON_CONFIG = {
-    'dashboards': ('project', 'admin', 'settings',),
-    'default_dashboard': 'project',
-}
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
-    'name': 'native',
-    'can_edit_user': True
-}
-
-OPENSTACK_HYPERVISOR_FEATURES = {
-    'can_set_mount_point': True
-}
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'internalURL'.
-#OPENSTACK_ENDPOINT_TYPE = "publicURL"
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-SWIFT_PAGINATE_LIMIT = 100
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-#LOGGING = {
-#        'version': 1,
-#        # When set to True this will disable all logging except
-#        # for loggers specified in this configuration dictionary. Note that
-#        # if nothing is specified here and disable_existing_loggers is True,
-#        # django.db.backends will still log unless it is disabled explicitly.
-#        'disable_existing_loggers': False,
-#        'handlers': {
-#            'null': {
-#                'level': 'DEBUG',
-#                'class': 'django.utils.log.NullHandler',
-#                },
-#            'console': {
-#                # Set the level to "DEBUG" for verbose output logging.
-#                'level': 'INFO',
-#                'class': 'logging.StreamHandler',
-#                },
-#            },
-#        'loggers': {
-#            # Logging from django.db.backends is VERY verbose, send to null
-#            # by default.
-#            'django.db.backends': {
-#                'handlers': ['null'],
-#                'propagate': False,
-#                },
-#            'horizon': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'openstack_dashboard': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'novaclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'keystoneclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'glanceclient': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            },
-#            'nose.plugins.manager': {
-#                'handlers': ['console'],
-#                'propagate': False,
-#            }
-#        }
-#}
diff --git a/functions b/functions
index fe50547..edc4bf9 100644
--- a/functions
+++ b/functions
@@ -781,9 +781,9 @@
     if is_ubuntu; then
         apt_get purge "$@"
     elif is_fedora; then
-        yum remove -y "$@"
+        sudo yum remove -y "$@"
     elif is_suse; then
-        rpm -e "$@"
+        sudo rpm -e "$@"
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1148,6 +1148,12 @@
             DISK_FORMAT=qcow2
             CONTAINER_FORMAT=bare
             ;;
+        *.iso)
+            IMAGE="$FILES/${IMAGE_FNAME}"
+            IMAGE_NAME=$(basename "$IMAGE" ".iso")
+            DISK_FORMAT=iso
+            CONTAINER_FORMAT=bare
+            ;;
         *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
     esac
 
@@ -1350,5 +1356,5 @@
 
 
 # Local variables:
-# -*- mode: Shell-script -*-
+# mode: shell-script
 # End:
diff --git a/lib/baremetal b/lib/baremetal
index 5326dd1..24cce9f 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -434,3 +434,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/ceilometer b/lib/ceilometer
index d90694c..58cafd1 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -138,3 +138,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder b/lib/cinder
index 7688ad9..deace68 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -123,9 +123,6 @@
 
 # configure_cinder() - Set config files, create data dirs, etc
 function configure_cinder() {
-    setup_develop $CINDER_DIR
-    setup_develop $CINDERCLIENT_DIR
-
     if [[ ! -d $CINDER_CONF_DIR ]]; then
         sudo mkdir -p $CINDER_CONF_DIR
     fi
@@ -367,7 +364,13 @@
 # install_cinder() - Collect source and prepare
 function install_cinder() {
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
+    setup_develop $CINDER_DIR
+}
+
+# install_cinderclient() - Collect source and prepare
+function install_cinderclient() {
     git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
+    setup_develop $CINDERCLIENT_DIR
 }
 
 # apply config.d approach (e.g. Oneiric does not have this)
@@ -431,3 +434,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/database b/lib/database
index 79b77a2..cbe886f 100644
--- a/lib/database
+++ b/lib/database
@@ -114,3 +114,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0633ab0..30450b1 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -139,3 +139,7 @@
 
 # Restore xtrace
 $MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index efc206f..b64de2c 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -90,3 +90,7 @@
 
 # Restore xtrace
 $PG_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/glance b/lib/glance
index edf6982..3376400 100644
--- a/lib/glance
+++ b/lib/glance
@@ -62,15 +62,8 @@
     sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
 }
 
-# configure_glanceclient() - Set config files, create data dirs, etc
-function configure_glanceclient() {
-    setup_develop $GLANCECLIENT_DIR
-}
-
 # configure_glance() - Set config files, create data dirs, etc
 function configure_glance() {
-    setup_develop $GLANCE_DIR
-
     if [[ ! -d $GLANCE_CONF_DIR ]]; then
         sudo mkdir -p $GLANCE_CONF_DIR
     fi
@@ -116,6 +109,15 @@
     iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
 
+    # Store the images in swift if enabled.
+    if is_service_enabled s-proxy; then
+        iniset $GLANCE_API_CONF DEFAULT default_store swift
+        iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
+        iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
+        iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
+        iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
+    fi
+
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
 
     cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
@@ -171,11 +173,13 @@
 # install_glanceclient() - Collect source and prepare
 function install_glanceclient() {
     git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH
+    setup_develop $GLANCECLIENT_DIR
 }
 
 # install_glance() - Collect source and prepare
 function install_glance() {
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
+    setup_develop $GLANCE_DIR
 }
 
 # start_glance() - Start running processes, including screen
@@ -197,3 +201,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/heat b/lib/heat
index 56d6f39..88535c3 100644
--- a/lib/heat
+++ b/lib/heat
@@ -184,3 +184,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/horizon b/lib/horizon
index 9c96b58..b63e1f8 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -29,6 +29,10 @@
 # Set up default directories
 HORIZON_DIR=$DEST/horizon
 
+# local_settings.py is used to customize Dashboard settings.
+# The example file in Horizon repo is used by default.
+HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example}
+
 # Allow overriding the default Apache user and group, default to
 # current user and his default group.
 APACHE_USER=${APACHE_USER:-$USER}
@@ -77,7 +81,7 @@
 
     # ``local_settings.py`` is used to override horizon default settings.
     local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    cp $FILES/horizon_settings.py $local_settings
+    cp $HORIZON_SETTINGS $local_settings
 
     # enable loadbalancer dashboard in case service is enabled
     if is_service_enabled q-lbaas; then
@@ -174,3 +178,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/keystone b/lib/keystone
index 805cb6f..0fbc7d7 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -75,15 +75,8 @@
     :
 }
 
-# configure_keystoneclient() - Set config files, create data dirs, etc
-function configure_keystoneclient() {
-    setup_develop $KEYSTONECLIENT_DIR
-}
-
 # configure_keystone() - Set config files, create data dirs, etc
 function configure_keystone() {
-    setup_develop $KEYSTONE_DIR
-
     if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
         sudo mkdir -p $KEYSTONE_CONF_DIR
     fi
@@ -305,6 +298,7 @@
 # install_keystoneclient() - Collect source and prepare
 function install_keystoneclient() {
     git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
+    setup_develop $KEYSTONECLIENT_DIR
 }
 
 # install_keystone() - Collect source and prepare
@@ -314,6 +308,7 @@
         install_ldap
     fi
     git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+    setup_develop $KEYSTONE_DIR
 }
 
 # start_keystone() - Start running processes, including screen
@@ -346,3 +341,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/ldap b/lib/ldap
index 9d415c5..53f6837 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -76,3 +76,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/nova b/lib/nova
index 4449f81..8d045b5 100644
--- a/lib/nova
+++ b/lib/nova
@@ -156,8 +156,11 @@
         fi
 
         # Logout and delete iscsi sessions
-        sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true
-        sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true
+        tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+        for target in $tgts; do
+            sudo iscsiadm --mode node -T $target --logout || true
+        done
+        sudo iscsiadm --mode node --op delete || true
 
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
@@ -166,11 +169,6 @@
     sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
 }
 
-# configure_novaclient() - Set config files, create data dirs, etc
-function configure_novaclient() {
-    setup_develop $NOVACLIENT_DIR
-}
-
 # configure_nova_rootwrap() - configure Nova's rootwrap
 function configure_nova_rootwrap() {
     # Deploy new rootwrap filters files (owned by root).
@@ -201,8 +199,6 @@
 
 # configure_nova() - Set config files, create data dirs, etc
 function configure_nova() {
-    setup_develop $NOVA_DIR
-
     # Put config files in ``/etc/nova`` for everyone to find
     if [[ ! -d $NOVA_CONF_DIR ]]; then
         sudo mkdir -p $NOVA_CONF_DIR
@@ -594,6 +590,7 @@
 # install_novaclient() - Collect source and prepare
 function install_novaclient() {
     git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
+    setup_develop $NOVACLIENT_DIR
 }
 
 # install_nova() - Collect source and prepare
@@ -624,6 +621,7 @@
     fi
 
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
+    setup_develop $NOVA_DIR
 }
 
 # start_nova_api() - Start the API process ahead of other things
@@ -660,6 +658,11 @@
     screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
     screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
     screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth"
+
+    # Starting the nova-objectstore only if swift3 service is not enabled.
+    # Swift will act as s3 objectstore.
+    is_service_enabled swift3 || \
+        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
 }
 
 # stop_nova() - Stop running processes (non-screen)
@@ -672,3 +675,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/quantum b/lib/quantum
index efdd43d..9ad1538 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -179,7 +179,7 @@
 # Agent loadbalancer service plugin functions
 # -------------------------------------------
 # Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer
 
 # Use security group or not
 if has_quantum_plugin_security_group; then
@@ -761,3 +761,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
new file mode 100644
index 0000000..f61f50b
--- /dev/null
+++ b/lib/quantum_plugins/nec
@@ -0,0 +1,122 @@
+# Quantum NEC OpenFlow plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Configuration parameters
+OFC_HOST=${OFC_HOST:-127.0.0.1}
+OFC_PORT=${OFC_PORT:-8888}
+
+OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST}
+OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT}
+OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST}
+OFC_OFP_PORT=${OFC_OFP_PORT:-6633}
+OFC_DRIVER=${OFC_DRIVER:-trema}
+OFC_RETRY_MAX=${OFC_RETRY_MAX:-0}
+OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1}
+
+OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+
+# Main logic
+# ---------------------------
+
+source $TOP_DIR/lib/quantum_plugins/ovs_base
+
+function quantum_plugin_create_nova_conf() {
+    _quantum_ovs_base_configure_nova_vif_driver
+}
+
+function quantum_plugin_install_agent_packages() {
+    # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose
+    # version is different from the version provided by the distribution.
+    if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
+        echo "You need to install Open vSwitch manually."
+        return
+    fi
+    _quantum_ovs_base_install_agent_packages
+}
+
+function quantum_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/quantum/plugins/nec
+    Q_PLUGIN_CONF_FILENAME=nec.ini
+    Q_DB_NAME="quantum_nec"
+    Q_PLUGIN_CLASS="quantum.plugins.nec.nec_plugin.NECPluginV2"
+}
+
+function quantum_plugin_configure_debug_command() {
+    _quantum_ovs_base_configure_debug_command
+}
+
+function quantum_plugin_configure_dhcp_agent() {
+    :
+}
+
+function quantum_plugin_configure_l3_agent() {
+    _quantum_ovs_base_configure_l3_agent
+}
+
+function quantum_plugin_configure_plugin_agent() {
+    if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then
+        return
+    fi
+    # Set up integration bridge
+    _quantum_ovs_base_setup_bridge $OVS_BRIDGE
+    sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT
+    # Generate datapath ID from HOST_IP
+    local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ })
+    sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid
+    sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure
+    if [ -n "$OVS_INTERFACE" ]; then
+        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE
+    fi
+    _quantum_setup_ovs_tunnels $OVS_BRIDGE
+    AGENT_BINARY="$QUANTUM_DIR/bin/quantum-nec-agent"
+
+    _quantum_ovs_base_configure_firewall_driver
+}
+
+function quantum_plugin_configure_service() {
+    iniset $QUANTUM_CONF DEFAULT api_extensions_path quantum/plugins/nec/extensions/
+    iniset /$Q_PLUGIN_CONF_FILE OFC host $OFC_API_HOST
+    iniset /$Q_PLUGIN_CONF_FILE OFC port $OFC_API_PORT
+    iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER
+    iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX
+    iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL
+}
+
+function quantum_plugin_setup_interface_driver() {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT ovs_use_veth True
+}
+
+# Utility functions
+# ---------------------------
+
+# Setup OVS tunnel manually
+function _quantum_setup_ovs_tunnels() {
+    local bridge=$1
+    local id=0
+    GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP}
+    if [ -n "$GRE_REMOTE_IPS" ]; then
+         for ip in ${GRE_REMOTE_IPS//:/ }
+         do
+             if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then
+                 continue
+             fi
+             sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \
+                 set Interface gre$id type=gre options:remote_ip=$ip
+             id=`expr $id + 1`
+         done
+    fi
+}
+
+function has_quantum_plugin_security_group() {
+    # 0 means True here
+    return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/agent_loadbalancer b/lib/quantum_plugins/services/agent_loadbalancer
similarity index 60%
rename from lib/quantum_plugins/agent_loadbalancer
rename to lib/quantum_plugins/services/agent_loadbalancer
index 87e7aaa..b6528b0 100644
--- a/lib/quantum_plugins/agent_loadbalancer
+++ b/lib/quantum_plugins/services/agent_loadbalancer
@@ -7,6 +7,7 @@
 
 
 AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent"
+AGENT_LBAAS_PLUGIN=quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin
 
 function quantum_agent_lbaas_install_agent_packages() {
     if is_ubuntu || is_fedora; then
@@ -19,9 +20,9 @@
 
 function quantum_agent_lbaas_configure_common() {
     if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
-        Q_SERVICE_PLUGIN_CLASSES="quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+        Q_SERVICE_PLUGIN_CLASSES=$AGENT_LBAAS_PLUGIN
     else
-        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$AGENT_LBAAS_PLUGIN"
     fi
 }
 
@@ -31,13 +32,9 @@
 
     LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
 
-    cp $QUANTUM_DIR/etc/lbaas_agent.ini /$LBAAS_AGENT_CONF_FILENAME
+    cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
 
-    if [[ $Q_PLUGIN == 'linuxbridge' || $Q_PLUGIN == 'brocade' ]]; then
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.BridgeInterfaceDriver"
-    else
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.OVSInterfaceDriver"
-    fi
+    quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
 
     if is_fedora; then
         iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
diff --git a/lib/quantum_thirdparty/trema b/lib/quantum_thirdparty/trema
new file mode 100644
index 0000000..09dc46b
--- /dev/null
+++ b/lib/quantum_thirdparty/trema
@@ -0,0 +1,113 @@
+# Trema Sliceable Switch
+# ----------------------
+
+# Trema is a Full-Stack OpenFlow Framework in Ruby and C
+# https://github.com/trema/trema
+#
+# Trema Sliceable Switch is an OpenFlow controller which provides
+# virtual layer-2 network slices.
+# https://github.com/trema/apps/wiki
+
+# Trema Sliceable Switch (OpenFlow Controller)
+TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git}
+TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master}
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+TREMA_DIR=${TREMA_DIR:-$DEST/trema}
+TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch"
+
+TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema}
+TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc
+TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db
+TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script
+TREMA_TMP_DIR=$TREMA_DATA_DIR/trema
+
+TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info}
+
+TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf
+TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch
+
+# configure_trema - Set config files, create data dirs, etc
+function configure_trema() {
+    # prepare dir
+    for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do
+        sudo mkdir -p $d
+        sudo chown -R `whoami` $d
+    done
+    sudo mkdir -p $TREMA_TMP_DIR
+}
+
+# init_trema - Initialize databases, etc.
+function init_trema() {
+    local _pwd=$(pwd)
+
+    # Initialize databases for Sliceable Switch
+    cd $TREMA_SS_DIR
+    rm -f filter.db slice.db
+    ./create_tables.sh
+    mv filter.db slice.db $TREMA_SS_DB_DIR
+    # Make sure that apache cgi has write access to the databases
+    sudo chown -R www-data.www-data $TREMA_SS_DB_DIR
+    cd $_pwd
+
+    # Setup HTTP Server for sliceable_switch
+    cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR
+    sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \
+        $TREMA_SS_SCRIPT_DIR/config.cgi
+
+    sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG
+    sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \
+        $TREMA_SS_APACHE_CONFIG
+    sudo a2enmod rewrite actions
+    sudo a2ensite sliceable_switch
+
+    cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
+    sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
+           -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
+           $TREMA_SS_CONFIG
+}
+
+function gem_install() {
+    [[ "$OFFLINE" = "True" ]] && return
+    [ -n "$RUBYGEMS_CMD" ] || get_gem_command
+
+    local pkg=$1
+    $RUBYGEMS_CMD list | grep "^${pkg} " && return
+    sudo $RUBYGEMS_CMD install $pkg
+}
+
+function get_gem_command() {
+    # Trema requires ruby 1.8, so gem1.8 is checked first
+    RUBYGEMS_CMD=$(which gem1.8 || which gem)
+    if [ -z "$RUBYGEMS_CMD" ]; then
+        echo "Warning: ruby gems command not found."
+    fi
+}
+
+function install_trema() {
+    # Trema
+    gem_install trema
+    # Sliceable Switch
+    git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH
+    make -C $TREMA_DIR/apps/topology
+    make -C $TREMA_DIR/apps/flow_manager
+    make -C $TREMA_DIR/apps/sliceable_switch
+}
+
+function start_trema() {
+    # APACHE_NAME is defined in init_horizon (in lib/horizon)
+    restart_service $APACHE_NAME
+
+    sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \
+        trema run -d -c $TREMA_SS_CONFIG
+}
+
+function stop_trema() {
+    sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/rpc_backend b/lib/rpc_backend
index bbd51f0..d08cb01 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -159,3 +159,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/swift b/lib/swift
index d50b554..eb57477 100644
--- a/lib/swift
+++ b/lib/swift
@@ -79,9 +79,9 @@
 # Port bases used in port number calclution for the service "nodes"
 # The specified port number will be used, the additinal ports calculated by
 # base_port + node_num * 10
-OBJECT_PORT_BASE=6013
-CONTAINER_PORT_BASE=6011
-ACCOUNT_PORT_BASE=6012
+OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013}
+CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
+ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
 
 
 # Entry Points
@@ -106,59 +106,11 @@
     local swift_node_config
     local swift_log_dir
 
-    setup_develop $SWIFT_DIR
-
     # Make sure to kill all swift processes first
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
-    # First do a bit of setup by creating the directories and
-    # changing the permissions so we can run it as our user.
-
-    USER_GROUP=$(id -g)
-    sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
-    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
-
-    # Create a loopback disk and format it to XFS.
-    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
-        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
-            sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img
-        fi
-    fi
-
-    mkdir -p  ${SWIFT_DATA_DIR}/drives/images
-    sudo touch  ${SWIFT_DATA_DIR}/drives/images/swift.img
-    sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-    dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
-        bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
-
-    # Make a fresh XFS filesystem
-    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-    # Mount the disk with mount options to make it as efficient as possible
-    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
-    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
-            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
-    fi
-
-    # Create a link to the above mount and
-    # create all of the directories needed to emulate a few different servers
-    for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
-        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
-        node=${SWIFT_DATA_DIR}/${node_number}/node
-        node_device=${node}/sdb1
-        [[ -d $node ]] && continue
-        [[ -d $drive ]] && continue
-        sudo install -o ${USER} -g $USER_GROUP -d $drive
-        sudo install -o ${USER} -g $USER_GROUP -d $node_device
-        sudo chown -R $USER: ${node}
-    done
-
-   sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
-   sudo chown -R $USER: ${SWIFT_CONF_DIR}
+    sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
+    sudo chown -R $USER: ${SWIFT_CONF_DIR}
 
     if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
         # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
@@ -327,9 +279,55 @@
         tee /etc/rsyslog.d/10-swift.conf
 }
 
-# configure_swiftclient() - Set config files, create data dirs, etc
-function configure_swiftclient() {
-    setup_develop $SWIFTCLIENT_DIR
+# create_swift_disk - Create Swift backing disk
+function create_swift_disk() {
+    local node_number
+
+    # First do a bit of setup by creating the directories and
+    # changing the permissions so we can run it as our user.
+
+    USER_GROUP=$(id -g)
+    sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
+
+    # Create a loopback disk and format it to XFS.
+    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
+            sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img
+        fi
+    fi
+
+    mkdir -p ${SWIFT_DATA_DIR}/drives/images
+    sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img
+    sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+    dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
+        bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
+
+    # Make a fresh XFS filesystem
+    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+    # Mount the disk with mount options to make it as efficient as possible
+    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
+    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
+    fi
+
+    # Create a link to the above mount and
+    # create all of the directories needed to emulate a few different servers
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
+        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
+        node=${SWIFT_DATA_DIR}/${node_number}/node
+        node_device=${node}/sdb1
+        [[ -d $node ]] && continue
+        [[ -d $drive ]] && continue
+        sudo install -o ${USER} -g $USER_GROUP -d $drive
+        sudo install -o ${USER} -g $USER_GROUP -d $node_device
+        sudo chown -R $USER: ${node}
+    done
 }
 
 # init_swift() - Initialize rings
@@ -338,6 +336,9 @@
     # Make sure to kill all swift processes first
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
+    # Forcibly re-create the backing filesystem
+    create_swift_disk
+
     # This is where we create three different rings for swift with
     # different object servers binding on different ports.
     pushd ${SWIFT_CONF_DIR} >/dev/null && {
@@ -366,13 +367,14 @@
 
 function install_swift() {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
+    setup_develop $SWIFT_DIR
 }
 
 function install_swiftclient() {
     git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
+    setup_develop $SWIFTCLIENT_DIR
 }
 
-
 # start_swift() - Start running processes, including screen
 function start_swift() {
     # (re)start rsyslog
@@ -421,3 +423,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/tempest b/lib/tempest
index 85e643e..c1dc3a3 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -294,3 +294,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/template b/lib/template
index 02de5ce..72904fe 100644
--- a/lib/template
+++ b/lib/template
@@ -78,3 +78,7 @@
 
 # Restore xtrace
 $XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/tls b/lib/tls
index 202edef..fb8f4b9 100644
--- a/lib/tls
+++ b/lib/tls
@@ -316,3 +316,7 @@
 
     stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
 }
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/openrc b/openrc
index 3ef44fd..8af2854 100644
--- a/openrc
+++ b/openrc
@@ -27,8 +27,8 @@
 source $RC_DIR/stackrc
 
 # Load the last env variables if available
-if [[ -r $TOP_DIR/.stackenv ]]; then
-    source $TOP_DIR/.stackenv
+if [[ -r $RC_DIR/.stackenv ]]; then
+    source $RC_DIR/.stackenv
 fi
 
 # Get some necessary configuration
diff --git a/stack.sh b/stack.sh
index cfce6be..1010b4e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -564,9 +564,11 @@
 # Grab clients first
 install_keystoneclient
 install_glanceclient
+install_cinderclient
 install_novaclient
 # Check out the client libs that are used most
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
+setup_develop $OPENSTACKCLIENT_DIR
 
 # glance, swift middleware and nova api needs keystone middleware
 if is_service_enabled key g-api n-api s-proxy; then
@@ -627,9 +629,6 @@
 echo_summary "Configuring OpenStack projects"
 
 # Set up our checkouts so they are installed in the python path
-configure_keystoneclient
-configure_novaclient
-setup_develop $OPENSTACKCLIENT_DIR
 
 if is_service_enabled key g-api n-api s-proxy; then
     configure_keystone
@@ -637,7 +636,6 @@
 
 if is_service_enabled s-proxy; then
     configure_swift
-    configure_swiftclient
     if is_service_enabled swift3; then
         setup_develop $SWIFT3_DIR
     fi
@@ -647,10 +645,6 @@
     configure_glance
 fi
 
-# Do this _after_ glance is installed to override the old binary
-# TODO(dtroyer): figure out when this is no longer necessary
-configure_glanceclient
-
 if is_service_enabled nova; then
     # First clean up old instances
     cleanup_nova
@@ -826,17 +820,7 @@
 
 if is_service_enabled g-reg; then
     echo_summary "Configuring Glance"
-
     init_glance
-
-    # Store the images in swift if enabled.
-    if is_service_enabled s-proxy; then
-        iniset $GLANCE_API_CONF DEFAULT default_store swift
-        iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
-        iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
-        iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
-        iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
-    fi
 fi
 
 
@@ -1017,7 +1001,10 @@
     iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
 fi
 
-screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
+if is_service_enabled zeromq; then
+    echo_summary "Starting zermomq receiver"
+    screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
+fi
 
 # Launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
@@ -1062,12 +1049,6 @@
     start_ceilometer
 fi
 
-# Starting the nova-objectstore only if swift3 service is not enabled.
-# Swift will act as s3 objectstore.
-is_service_enabled swift3 || \
-    screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
-
-
 # Configure and launch heat engine, api and metadata
 if is_service_enabled heat; then
     # Initialize heat, including replacing nova flavors
diff --git a/stackrc b/stackrc
index 5f8b6ef..7c4fa68 100644
--- a/stackrc
+++ b/stackrc
@@ -221,3 +221,7 @@
 
 # Compatibility until it's eradicated from CI
 USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/tools/xen/functions b/tools/xen/functions
new file mode 100644
index 0000000..5b4a661
--- /dev/null
+++ b/tools/xen/functions
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+function xapi_plugin_location {
+    for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"
+    do
+        if [ -d $PLUGIN_DIR ]
+        then
+            echo $PLUGIN_DIR
+            return 0
+        fi
+    done
+    return 1
+}
+
+function zip_snapshot_location {
+    echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g"
+}
+
+function create_directory_for_kernels {
+    mkdir -p "/boot/guest"
+}
+
+function extract_remote_zipball {
+    local ZIPBALL_URL=$1
+
+    local LOCAL_ZIPBALL=$(mktemp)
+    local EXTRACTED_FILES=$(mktemp -d)
+
+    (
+        wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate
+        unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES
+        rm -f $LOCAL_ZIPBALL
+    ) >&2
+
+    echo "$EXTRACTED_FILES"
+}
+
+function find_xapi_plugins_dir {
+    find $1 -path '*/xapi.d/plugins' -type d -print
+}
+
+function install_xapi_plugins_from_zipball {
+    local XAPI_PLUGIN_DIR
+    local EXTRACTED_FILES
+    local EXTRACTED_PLUGINS_DIR
+
+    XAPI_PLUGIN_DIR=$(xapi_plugin_location)
+
+    EXTRACTED_FILES=$(extract_remote_zipball $1)
+    EXTRACTED_PLUGINS_DIR=$(find_xapi_plugins_dir $EXTRACTED_FILES)
+
+    cp -pr $EXTRACTED_PLUGINS_DIR/* $XAPI_PLUGIN_DIR
+    rm -rf $EXTRACTED_FILES
+    chmod a+x ${XAPI_PLUGIN_DIR}*
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 0c0e1e2..7c3b839 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -28,6 +28,9 @@
 # Include onexit commands
 . $THIS_DIR/scripts/on_exit.sh
 
+# xapi functions
+. $THIS_DIR/functions
+
 
 #
 # Get Settings
@@ -43,48 +46,26 @@
   xe "$cmd" --minimal "$@"
 }
 
-
 #
 # Prepare Dom0
 # including installing XenAPI plugins
 #
 
 cd $THIS_DIR
-if [ -f ./master ]
-then
-    rm -rf ./master
-    rm -rf ./nova
-fi
 
-# get nova
-NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")}
-wget -nv $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate
-unzip -q -o nova-zipball  -d ./nova
+# Install plugins
 
-# install xapi plugins
-XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/
-if [ ! -d $XAPI_PLUGIN_DIR ]; then
-    # the following is needed when using xcp-xapi
-    XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/
-fi
-cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR
+## Nova plugins
+NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)}
+install_xapi_plugins_from_zipball $NOVA_ZIPBALL_URL
 
-# Install the netwrap xapi plugin to support agent control of dom0 networking
+## Install the netwrap xapi plugin to support agent control of dom0 networking
 if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then
-    if [ -f ./quantum ]; then
-        rm -rf ./quantum
-    fi
-    # get quantum
-    QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")}
-    wget -nv $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate
-    unzip -q -o quantum-zipball  -d ./quantum
-    cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR
+    QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(zip_snapshot_location $QUANTUM_REPO $QUANTUM_BRANCH)}
+    install_xapi_plugins_from_zipball $QUANTUM_ZIPBALL_URL
 fi
 
-chmod a+x ${XAPI_PLUGIN_DIR}*
-
-mkdir -p /boot/guest
-
+create_directory_for_kernels
 
 #
 # Configure Networking
diff --git a/tools/xen/mocks b/tools/xen/mocks
new file mode 100644
index 0000000..b006558
--- /dev/null
+++ b/tools/xen/mocks
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+test ! -e "$LIST_OF_ACTIONS" && {
+    echo "Mocking is not set up properly."
+    echo "LIST_OF_ACTIONS should point to an existing file."
+    exit 1
+}
+
+test ! -e "$LIST_OF_DIRECTORIES" && {
+    echo "Mocking is not set up properly."
+    echo "LIST_OF_DIRECTORIES should point to an existing file."
+    exit 1
+}
+
+function mktemp {
+    if test "${1:-}" = "-d";
+    then
+        echo "tempdir"
+    else
+        echo "tempfile"
+    fi
+}
+
+function wget {
+    echo "wget $@" >> $LIST_OF_ACTIONS
+}
+
+function mkdir {
+    if test "${1:-}" = "-p";
+    then
+        echo "$2" >> $LIST_OF_DIRECTORIES
+    fi
+}
+
+function unzip {
+    echo "Random rubbish from unzip"
+    echo "unzip $@" >> $LIST_OF_ACTIONS
+}
+
+function rm {
+    echo "rm $@" >> $LIST_OF_ACTIONS
+}
+
+function [ {
+    if test "${1:-}" = "-d";
+    then
+        echo "[ $@" >> $LIST_OF_ACTIONS
+        for directory in $(cat $LIST_OF_DIRECTORIES)
+        do
+            if test "$directory" = "$2"
+            then
+                return 0
+            fi
+        done
+        return 1
+    fi
+    echo "Mock test does not implement the requested function"
+    exit 1
+}
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
new file mode 100755
index 0000000..6817ec3
--- /dev/null
+++ b/tools/xen/test_functions.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+
+# Tests for functions.
+#
+# The tests are sourcing the mocks file to mock out various functions. The
+# mocking-out always happens in a sub-shell, thus it does not have impact on
+# the functions defined here.
+
+# To run the tests, please run:
+#
+# ./test_functions.sh run_tests
+#
+# To only print out the discovered test functions, run:
+#
+# ./test_functions.sh
+
+. functions
+
+# Setup
+function before_each_test {
+    LIST_OF_DIRECTORIES=$(mktemp)
+    truncate -s 0 $LIST_OF_DIRECTORIES
+
+    LIST_OF_ACTIONS=$(mktemp)
+    truncate -s 0 $LIST_OF_ACTIONS
+}
+
+# Teardown
+function after_each_test {
+    rm -f $LIST_OF_DIRECTORIES
+    rm -f $LIST_OF_ACTIONS
+}
+
+# Helpers
+function given_directory_exists {
+    echo "$1" >> $LIST_OF_DIRECTORIES
+}
+
+function assert_directory_exists {
+    grep "$1" $LIST_OF_DIRECTORIES
+}
+
+function assert_previous_command_failed {
+    [ "$?" != "0" ] || exit 1
+}
+
+# Tests
+function test_plugin_directory_on_xenserver {
+    given_directory_exists "/etc/xapi.d/plugins/"
+
+    PLUGDIR=$(. mocks && xapi_plugin_location)
+
+    [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ]
+}
+
+function test_plugin_directory_on_xcp {
+    given_directory_exists "/usr/lib/xcp/plugins/"
+
+    PLUGDIR=$(. mocks && xapi_plugin_location)
+
+    [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ]
+}
+
+function test_no_plugin_directory_found {
+    set +e
+
+    local IGNORE
+    IGNORE=$(. mocks && xapi_plugin_location)
+
+    assert_previous_command_failed
+
+    grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS
+    grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
+}
+
+function test_zip_snapshot_location {
+    diff \
+    <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \
+    <(echo "https://github.com/openstack/nova/zipball/master")
+}
+
+function test_create_directory_for_kernels {
+    (. mocks && create_directory_for_kernels)
+
+    assert_directory_exists "/boot/guest"
+}
+
+function test_extract_remote_zipball {
+    local RESULT=$(. mocks && extract_remote_zipball "someurl")
+
+    diff <(cat $LIST_OF_ACTIONS) - << EOF
+wget -nv someurl -O tempfile --no-check-certificate
+unzip -q -o tempfile -d tempdir
+rm -f tempfile
+EOF
+
+    [ "$RESULT" = "tempdir" ]
+}
+
+function test_find_nova_plugins {
+    local tmpdir=$(mktemp -d)
+
+    mkdir -p "$tmpdir/blah/blah/u/xapi.d/plugins"
+
+    [ "$tmpdir/blah/blah/u/xapi.d/plugins" = $(find_xapi_plugins_dir $tmpdir) ]
+
+    rm -rf $tmpdir
+}
+
+# Test runner
+[ "$1" = "" ] && {
+    grep -e "^function *test_" $0 | cut -d" " -f2
+}
+
+[ "$1" = "run_tests" ] && {
+    for testname in $($0)
+    do
+        echo "$testname"
+        before_each_test
+        (
+            set -eux
+            $testname
+        )
+        if [ "$?" != "0" ]
+        then
+            echo "FAIL"
+            exit 1
+        else
+            echo "PASS"
+        fi
+
+        after_each_test
+    done
+}