Merge "Add unstack.sh"
diff --git a/.gitignore b/.gitignore
index e482090..c8d2560 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
 *.log
 src
 localrc
+local.sh
diff --git a/AUTHORS b/AUTHORS
index 8645615..820a677 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -17,6 +17,7 @@
 Jay Pipes <jaypipes@gmail.com>
 Jesse Andrews <anotherjesse@gmail.com>
 Johannes Erdfelt <johannes.erdfelt@rackspace.com>
+Josh Kearney <josh@jk0.org>
 Justin Shepherd <galstrom21@gmail.com>
 Ken Pepple <ken.pepple@rabbityard.com>
 Kiall Mac Innes <kiall@managedit.ie>
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
new file mode 100755
index 0000000..38fac12
--- /dev/null
+++ b/exercises/aggregates.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+# **aggregates.sh**
+
+# This script demonstrates how to use host aggregates:
+#  *  Create an Aggregate
+#  *  Updating Aggregate details
+#  *  Testing Aggregate metadata
+#  *  Testing Aggregate delete
+#  *  TODO(johngar) - test adding a host (idealy with two hosts)
+
+echo "**************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "**************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# run test as the admin user
+_OLD_USERNAME=$OS_USERNAME
+OS_USERNAME=admin
+
+
+# Create an aggregate
+# ===================
+
+AGGREGATE_NAME=test_aggregate_$RANDOM
+AGGREGATE_A_ZONE=nova
+
+exit_if_aggregate_present() {
+    aggregate_name=$1
+
+    if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then
+        echo "SUCCESS $aggregate_name not present"
+    else
+        echo "ERROR found aggregate: $aggregate_name"
+        exit -1
+    fi
+}
+
+exit_if_aggregate_present $AGGREGATE_NAME
+
+AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1`
+
+# check aggregate created
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
+
+
+# Ensure creating a duplicate fails
+# =================================
+
+if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
+    echo "ERROR could create duplicate aggregate"
+    exit -1
+fi
+
+
+# Test aggregate-update (and aggregate-details)
+# =============================================
+AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
+
+nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
+
+nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
+
+
+# Test aggregate-set-metadata
+# ===========================
+META_DATA_1_KEY=asdf
+META_DATA_2_KEY=foo
+META_DATA_3_KEY=bar
+
+#ensure no metadata is set
+nova aggregate-details $AGGREGATE_ID | grep {}
+
+nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep 123
+
+nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
+
+nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
+
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
+
+nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep {}
+
+
+# Test aggregate-add/remove-host
+# ==============================
+if [ "$VIRT_DRIVER" == "xenserver" ]; then
+    echo "TODO(johngarbutt) add tests for add/remove host from aggregate"
+fi
+
+
+# Test aggregate-delete
+# =====================
+nova aggregate-delete $AGGREGATE_ID
+exit_if_aggregate_present $AGGREGATE_NAME
+
+
+# Test complete
+# =============
+OS_USERNAME=$_OLD_USERNAME
+echo "AGGREGATE TEST PASSED"
+
+set +o xtrace
+echo "**************************************************"
+echo "End DevStack Exercise: $0"
+echo "**************************************************"
diff --git a/files/apts/nova b/files/apts/nova
index f2059ba..66640c5 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -1,5 +1,5 @@
 dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:oneiric
+dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise
 kpartx
 parted
 arping # only available in dist:natty
diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini
deleted file mode 100644
index 5cfd22f..0000000
--- a/files/glance-api-paste.ini
+++ /dev/null
@@ -1,39 +0,0 @@
-[pipeline:glance-api]
-#pipeline = versionnegotiation context apiv1app
-# NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken context apiv1app
-
-# To enable Image Cache Management API replace pipeline with below:
-# pipeline = versionnegotiation context imagecache apiv1app
-# NOTE: use the following pipeline for keystone auth (with caching)
-# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
-
-[app:apiv1app]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.api.v1.router:API
-
-[filter:versionnegotiation]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
-
-[filter:cache]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache:CacheFilter
-
-[filter:cachemanage]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
-
-[filter:context]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USERNAME%
-admin_password = %SERVICE_PASSWORD%
diff --git a/files/glance-api.conf b/files/glance-api.conf
deleted file mode 100644
index b4ba098..0000000
--- a/files/glance-api.conf
+++ /dev/null
@@ -1,139 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = True
-
-# Which backend store should Glance use by default is not specified
-# in a request to add a new image to Glance? Default: 'file'
-# Available choices are 'file', 'swift', and 's3'
-default_store = file
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9292
-
-# Address to find the registry server
-registry_host = 0.0.0.0
-
-# Port the registry server is listening on
-registry_port = 9191
-
-# Log to this file. Make sure you do not set the same log
-# file for both the API and registry servers!
-#log_file = %DEST%/glance/api.log
-
-# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
-use_syslog = %SYSLOG%
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when images are create, updated or deleted.
-# There are three methods of sending notifications, logging (via the
-# log_file directive), rabbit (via a rabbitmq queue) or noop (no
-# notifications sent, the default)
-notifier_strategy = noop
-
-# Configuration options if sending notifications via rabbitmq (these are
-# the defaults)
-rabbit_host = localhost
-rabbit_port = 5672
-rabbit_use_ssl = false
-rabbit_userid = guest
-rabbit_password = guest
-rabbit_virtual_host = /
-rabbit_notification_topic = glance_notifications
-
-# ============ Filesystem Store Options ========================
-
-# Directory that the Filesystem backend store
-# writes image data to
-filesystem_store_datadir = %DEST%/glance/images/
-
-# ============ Swift Store Options =============================
-
-# Address where the Swift authentication service lives
-swift_store_auth_address = 127.0.0.1:8080/v1.0/
-
-# User to authenticate against the Swift authentication service
-swift_store_user = jdoe
-
-# Auth key for the user authenticating against the
-# Swift authentication service
-swift_store_key = a86850deb2742ec3cb41518e26aa2d89
-
-# Container within the account that the account should use
-# for storing images in Swift
-swift_store_container = glance
-
-# Do we create the container if it does not exist?
-swift_store_create_container_on_put = False
-
-# What size, in MB, should Glance start chunking image files
-# and do a large object manifest in Swift? By default, this is
-# the maximum object size in Swift, which is 5GB
-swift_store_large_object_size = 5120
-
-# When doing a large object manifest, what size, in MB, should
-# Glance write chunks to Swift? This amount of data is written
-# to a temporary disk buffer during the process of chunking
-# the image file, and the default is 200MB
-swift_store_large_object_chunk_size = 200
-
-# Whether to use ServiceNET to communicate with the Swift storage servers.
-# (If you aren't RACKSPACE, leave this False!)
-#
-# To use ServiceNET for authentication, prefix hostname of
-# `swift_store_auth_address` with 'snet-'.
-# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
-swift_enable_snet = False
-
-# ============ S3 Store Options =============================
-
-# Address where the S3 authentication service lives
-s3_store_host = 127.0.0.1:8080/v1.0/
-
-# User to authenticate against the S3 authentication service
-s3_store_access_key = <20-char AWS access key>
-
-# Auth key for the user authenticating against the
-# S3 authentication service
-s3_store_secret_key = <40-char AWS secret key>
-
-# Container within the account that the account should use
-# for storing images in S3. Note that S3 has a flat namespace,
-# so you need a unique bucket name for your glance images. An
-# easy way to do this is append your AWS access key to "glance".
-# S3 buckets in AWS *must* be lowercased, so remember to lowercase
-# your AWS access key if you use it in your bucket name below!
-s3_store_bucket = <lowercased 20-char aws access key>glance
-
-# Do we create the bucket if it does not exist?
-s3_store_create_bucket_on_put = False
-
-# ============ Image Cache Options ========================
-
-image_cache_enabled = False
-
-# Directory that the Image Cache writes data to
-# Make sure this is also set in glance-pruner.conf
-image_cache_datadir = /var/lib/glance/image-cache/
-
-# Number of seconds after which we should consider an incomplete image to be
-# stalled and eligible for reaping
-image_cache_stall_timeout = 86400
-
-# ============ Delayed Delete Options =============================
-
-# Turn on/off delayed delete
-delayed_delete = False
-
-# Delayed delete time in seconds
-scrub_time = 43200
-
-# Directory that the scrubber will use to remind itself of what to delete
-# Make sure this is also set in glance-scrubber.conf
-scrubber_datadir = /var/lib/glance/scrubber
diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini
deleted file mode 100644
index b792aa8..0000000
--- a/files/glance-registry-paste.ini
+++ /dev/null
@@ -1,23 +0,0 @@
-[pipeline:glance-registry]
-#pipeline = context registryapp
-# NOTE: use the following pipeline for keystone
-pipeline = authtoken context registryapp
-
-[app:registryapp]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.registry.api.v1:API
-
-[filter:context]
-context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USERNAME%
-admin_password = %SERVICE_PASSWORD%
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
deleted file mode 100644
index 2c32745..0000000
--- a/files/glance-registry.conf
+++ /dev/null
@@ -1,44 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = True
-
-# Address to bind the registry server
-bind_host = 0.0.0.0
-
-# Port the bind the registry server to
-bind_port = 9191
-
-# Log to this file. Make sure you do not set the same log
-# file for both the API and registry servers!
-#log_file = %DEST%/glance/registry.log
-
-# Where to store images
-filesystem_store_datadir = %DEST%/glance/images
-
-# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
-use_syslog = %SYSLOG%
-
-# SQLAlchemy connection string for the reference implementation
-# registry server. Any valid SQLAlchemy connection string is fine.
-# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
-sql_connection = %SQL_CONN%
-
-# Period in seconds after which SQLAlchemy should reestablish its connection
-# to the database.
-#
-# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
-# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
-# notice this, you can lower this value to ensure that SQLAlchemy reconnects
-# before MySQL can drop the connection.
-sql_idle_timeout = 3600
-
-# Limit the api to return `param_limit_max` items in a call to a container. If
-# a larger `limit` query param is provided, it will be reduced to this value.
-api_limit_max = 1000
-
-# If a `limit` query param is not provided in an api request, it will
-# default to `limit_param_default`
-limit_param_default = 25
diff --git a/files/keystone.conf b/files/keystone.conf
deleted file mode 100644
index 1a924ed..0000000
--- a/files/keystone.conf
+++ /dev/null
@@ -1,99 +0,0 @@
-[DEFAULT]
-bind_host = 0.0.0.0
-public_port = 5000
-admin_port = 35357
-admin_token = %SERVICE_TOKEN%
-compute_port = 3000
-verbose = True
-debug = True
-# commented out so devstack logs to stdout
-# log_file = %DEST%/keystone/keystone.log
-
-# ================= Syslog Options ============================
-# Send logs to syslog (/dev/log) instead of to file specified
-# by `log-file`
-use_syslog = False
-
-# Facility to use. If unset defaults to LOG_USER.
-# syslog_log_facility = LOG_LOCAL0
-
-[sql]
-connection = %SQL_CONN%
-idle_timeout = 30
-min_pool_size = 5
-max_pool_size = 10
-pool_timeout = 200
-
-[identity]
-driver = keystone.identity.backends.sql.Identity
-
-[catalog]
-driver = keystone.catalog.backends.templated.TemplatedCatalog
-template_file = %KEYSTONE_DIR%/etc/default_catalog.templates
-
-[token]
-driver = keystone.token.backends.kvs.Token
-
-[policy]
-driver = keystone.policy.backends.rules.Policy
-
-[ec2]
-driver = keystone.contrib.ec2.backends.sql.Ec2
-
-[filter:debug]
-paste.filter_factory = keystone.common.wsgi:Debug.factory
-
-[filter:token_auth]
-paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
-
-[filter:admin_token_auth]
-paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
-
-[filter:xml_body]
-paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
-
-[filter:json_body]
-paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
-
-[filter:crud_extension]
-paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
-
-[filter:ec2_extension]
-paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
-
-[filter:s3_extension]
-paste.filter_factory = keystone.contrib.s3:S3Extension.factory
-
-[app:public_service]
-paste.app_factory = keystone.service:public_app_factory
-
-[app:admin_service]
-paste.app_factory = keystone.service:admin_app_factory
-
-[pipeline:public_api]
-pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
-
-[pipeline:admin_api]
-pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service
-
-[app:public_version_service]
-paste.app_factory = keystone.service:public_version_app_factory
-
-[app:admin_version_service]
-paste.app_factory = keystone.service:admin_version_app_factory
-
-[pipeline:public_version_api]
-pipeline = xml_body public_version_service
-
-[pipeline:admin_version_api]
-pipeline = xml_body admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/v2.0 = public_api
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/v2.0 = admin_api
-/ = admin_version_api
diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf
index db0f097..763c306 100644
--- a/files/swift/account-server.conf
+++ b/files/swift/account-server.conf
@@ -4,7 +4,7 @@
 bind_port = %BIND_PORT%
 user = %USER%
 log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
 
 [pipeline:main]
 pipeline = account-server
diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf
index bdc3e3a..106dcab 100644
--- a/files/swift/container-server.conf
+++ b/files/swift/container-server.conf
@@ -4,7 +4,7 @@
 bind_port = %BIND_PORT%
 user = %USER%
 log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
 
 [pipeline:main]
 pipeline = container-server
diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf
index 2f888a2..7eea67d 100644
--- a/files/swift/object-server.conf
+++ b/files/swift/object-server.conf
@@ -4,7 +4,7 @@
 bind_port = %BIND_PORT%
 user = %USER%
 log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
 
 [pipeline:main]
 pipeline = object-server
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 1627af0..ce5473b 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -1,7 +1,7 @@
 [DEFAULT]
 bind_port = 8080
 user = %USER%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
 workers = 1
 log_name = swift
 log_facility = LOG_LOCAL1
diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf
index 66215c7..4e0dcbf 100644
--- a/files/swift/rsyncd.conf
+++ b/files/swift/rsyncd.conf
@@ -6,74 +6,74 @@
 
 [account6012]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
 read only = false
 lock file = /var/lock/account6012.lock
 
 [account6022]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
 read only = false
 lock file = /var/lock/account6022.lock
 
 [account6032]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
 read only = false
 lock file = /var/lock/account6032.lock
 
 [account6042]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
 read only = false
 lock file = /var/lock/account6042.lock
 
 
 [container6011]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
 read only = false
 lock file = /var/lock/container6011.lock
 
 [container6021]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
 read only = false
 lock file = /var/lock/container6021.lock
 
 [container6031]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
 read only = false
 lock file = /var/lock/container6031.lock
 
 [container6041]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
 read only = false
 lock file = /var/lock/container6041.lock
 
 
 [object6010]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
 read only = false
 lock file = /var/lock/object6010.lock
 
 [object6020]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
 read only = false
 lock file = /var/lock/object6020.lock
 
 [object6030]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
 read only = false
 lock file = /var/lock/object6030.lock
 
 [object6040]
 max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
 read only = false
 lock file = /var/lock/object6040.lock
diff --git a/functions b/functions
index 75c20d7..5114de1 100644
--- a/functions
+++ b/functions
@@ -1,4 +1,7 @@
 # functions - Common functions used by DevStack components
+#
+# ENABLED_SERVICES is used by is_service_enabled()
+
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -6,7 +9,7 @@
 
 
 # apt-get wrapper to set arguments correctly
-# apt_get package [package ...]
+# apt_get operation package [package ...]
 function apt_get() {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
     local sudo="sudo"
@@ -70,6 +73,71 @@
 }
 
 
+# Determine OS Vendor, Release and Update
+# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
+# Returns results in global variables:
+# os_VENDOR - vendor name
+# os_RELEASE - release
+# os_UPDATE - update
+# os_PACKAGE - package type
+# os_CODENAME - vendor's codename for release
+# GetOSVersion
+GetOSVersion() {
+    # Figure out which vendor we are
+    if [[ -n "`which sw_vers 2>/dev/null`" ]]; then
+        # OS/X
+        os_VENDOR=`sw_vers -productName`
+        os_RELEASE=`sw_vers -productVersion`
+        os_UPDATE=${os_RELEASE##*.}
+        os_RELEASE=${os_RELEASE%.*}
+        os_PACKAGE=""
+        if [[ "$os_RELEASE" =~ "10.7" ]]; then
+            os_CODENAME="lion"
+        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
+            os_CODENAME="snow leopard"
+        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
+            os_CODENAME="leopard"
+        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
+            os_CODENAME="tiger"
+        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
+            os_CODENAME="panther"
+        else
+            os_CODENAME=""
+        fi
+    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
+        os_VENDOR=$(lsb_release -i -s)
+        os_RELEASE=$(lsb_release -r -s)
+        os_UPDATE=""
+        if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then
+            os_PACKAGE="deb"
+        else
+            os_PACKAGE="rpm"
+        fi
+        os_CODENAME=$(lsb_release -c -s)
+    elif [[ -r /etc/redhat-release ]]; then
+        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
+        # CentOS release 5.5 (Final)
+        # CentOS Linux release 6.0 (Final)
+        # Fedora release 16 (Verne)
+        os_CODENAME=""
+        for r in "Red Hat" CentOS Fedora; do
+            os_VENDOR=$r
+            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
+                ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
+                os_CODENAME=${ver#*|}
+                os_RELEASE=${ver%|*}
+                os_UPDATE=${os_RELEASE##*.}
+                os_RELEASE=${os_RELEASE%.*}
+                break
+            fi
+            os_VENDOR=""
+        done
+        os_PACKAGE="rpm"
+    fi
+    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
+}
+
+
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
@@ -115,6 +183,51 @@
 }
 
 
+# Comment an option in an INI file
+# iniset config-file section option
+function inicomment() {
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file
+}
+
+
+# Get an option from an INI file
+# iniget config-file section option
+function iniget() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file)
+    echo ${line#*=}
+}
+
+
+# Set an option in an INI file
+# iniset config-file section option value
+function iniset() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+    if ! grep -q "^\[$section\]" $file; then
+        # Add section at the end
+        echo -e "\n[$section]" >>$file
+    fi
+    if [[ -z "$(iniget $file $section $option)" ]]; then
+        # Add it
+        sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" $file
+    else
+        # Replace it
+        sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file
+    fi
+}
+
+
 # is_service_enabled() checks if the service(s) specified as arguments are
 # enabled by the user in **ENABLED_SERVICES**.
 #
@@ -138,6 +251,20 @@
 }
 
 
+# Distro-agnostic package installer
+# install_package package [package ...]
+function install_package() {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        apt_get install "$@"
+    else
+        yum_install "$@"
+    fi
+}
+
+
 # Test if the named environment variable is set and not zero length
 # is_set env-var
 function is_set() {
@@ -153,10 +280,39 @@
 # pip_install package [package ...]
 function pip_install {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        CMD_PIP=/usr/bin/pip
+    else
+        CMD_PIP=/usr/bin/pip-python
+    fi
     sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \
         HTTP_PROXY=$http_proxy \
         HTTPS_PROXY=$https_proxy \
-        pip install --use-mirrors $@
+        $CMD_PIP install --use-mirrors $@
+}
+
+
+# Service wrapper to restart services
+# restart_service service-name
+function restart_service() {
+    sudo /usr/sbin/service $1 restart
+}
+
+
+# Service wrapper to start services
+# start_service service-name
+function start_service() {
+    sudo /usr/sbin/service $1 start
+}
+
+
+# Service wrapper to stop services
+# stop_service service-name
+function stop_service() {
+    sudo /usr/sbin/service $1 stop
 }
 
 
@@ -172,5 +328,17 @@
     echo "$default"
 }
 
+
+# yum wrapper to set arguments correctly
+# yum_install package [package ...]
+function yum_install() {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        yum install -y "$@"
+}
+
+
 # Restore xtrace
 $XTRACE
diff --git a/samples/local.sh b/samples/local.sh
new file mode 100755
index 0000000..83637f9
--- /dev/null
+++ b/samples/local.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+# Sample ``local.sh`` for user-configurable tasks to run automatically
+# at the sucessful conclusion of ``stack.sh``.
+
+# NOTE: Copy this file to the root ``devstack`` directory for it to
+# work properly.
+
+# This is a collection of some of the things we have found to be useful to run
+# after stack.sh to tweak the OpenStack configuration that DevStack produces.
+# These should be considered as samples and are unsupported DevStack code.
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Use openrc + stackrc + localrc for settings
+source $TOP_DIR/stackrc
+
+# Destination path for installation ``DEST``
+DEST=${DEST:-/opt/stack}
+
+
+# Import ssh keys
+# ---------------
+
+# Import keys from the current user into the default OpenStack user (usually
+# ``demo``)
+
+# Get OpenStack auth
+source $TOP_DIR/openrc
+
+# Add first keypair found in localhost:$HOME/.ssh
+for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
+    if [[ -f $i ]]; then
+        nova keypair-add --pub_key=$i `hostname`
+        break
+    fi
+done
+
+
+# Create A Flavor
+# ---------------
+
+# Get OpenStack admin auth
+source $TOP_DIR/openrc admin admin
+
+# Name of new flavor
+# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro``
+MI_NAME=m1.micro
+
+# Create micro flavor if not present
+if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
+    nova flavor-create $MI_NAME 6 128 0 1
+fi
+# Other Uses
+# ----------
+
+# Add tcp/22 to default security group
+
diff --git a/samples/localrc b/samples/localrc
new file mode 100644
index 0000000..4fb093d
--- /dev/null
+++ b/samples/localrc
@@ -0,0 +1,77 @@
+# Sample ``localrc`` for user-configurable variables in ``stack.sh``
+
+# NOTE: Copy this file to the root ``devstack`` directory for it to work properly.
+
+# ``localrc`` is a user-maintained setings file that is sourced at the end of
+# ``stackrc``. This gives it the ability to override any variables set in ``stackrc``.
+# Also, most of the settings in ``stack.sh`` are written to only be set if no
+# value has already been set; this lets ``localrc`` effectively override the
+# default values.
+
+# This is a collection of some of the settings we have found to be useful
+# in our DevStack development environments. Additional settings are described
+# in http://devstack.org/localrc.html
+# These should be considered as samples and are unsupported DevStack code.
+
+
+# Minimal Contents
+# ----------------
+
+# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when
+# there are a few minimal variables set:
+
+# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
+# values for them by ``stack.sh``.
+ADMIN_PASSWORD=nomoresecrete
+MYSQL_PASSWORD=stackdb
+RABBIT_PASSWORD=stackqueue
+SERVICE_PASSWORD=$ADMIN_PASSWORD
+
+# HOST_IP should be set manually for best results.  It is auto-detected during the
+# first run of ``stack.sh`` but often is indeterminate on later runs due to the IP
+# being moved from an Ethernet interface to a bridge on the host. Setting it here
+# also makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``.
+# ``HOST_IP`` is not set by default.
+HOST_IP=w.x.y.z
+
+
+# Set DevStack Install Directory
+# ------------------------------
+
+# The DevStack install directory is set by the ``DEST`` variable. By setting it
+# early in ``localrc`` you can reference it in later variables. The default value
+# is ``/opt/stack``. It can be useful to set it even though it is not changed from
+# the default value.
+DEST=/opt/stack
+
+
+# Using milestone-proposed branches
+# ---------------------------------
+
+# Uncomment these to grab the milestone-proposed branches from the repos:
+#GLANCE_BRANCH=milestone-proposed
+#HORIZON_BRANCH=milestone-proposed
+#KEYSTONE_BRANCH=milestone-proposed
+#KEYSTONECLIENT_BRANCH=milestone-proposed
+#NOVA_BRANCH=milestone-proposed
+#NOVACLIENT_BRANCH=milestone-proposed
+#SWIFT_BRANCH=milestone-proposed
+
+
+# Swift
+# -----
+
+# Swift is now used as the back-end for the S3-like object store. If Nova's
+# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT
+# run if Swift is enabled. Setting the hash value is required and you will
+# be prompted for it if Swift is enabled so just set it to something already:
+SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
+
+# For development purposes the default of 3 replicas is usually not required.
+# Set this to 1 to save some resources:
+SWIFT_REPLICAS=1
+
+# The data for Swift is stored in the source tree by default (``$DEST/swift/data``)
+# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created
+# if it does not exist.
+SWIFT_DATA_DIR=$DEST/data
diff --git a/stack.sh b/stack.sh
index 3a7fc5d..8a93608 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,9 +1,8 @@
 #!/usr/bin/env bash
 
-# **stack.sh** is an opinionated OpenStack developer installation.
-
-# This script installs and configures various combinations of *Glance*,
-# *Horizon*, *Keystone*, *Melange*, *Nova*, *Quantum* and *Swift*
+# ``stack.sh`` is an opinionated OpenStack developer installation.  It
+# installs and configures various combinations of **Glance**, **Horizon**,
+# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift**
 
 # This script allows you to specify configuration options of what git
 # repositories to use, enabled services, network configuration and various
@@ -12,42 +11,30 @@
 # developer install.
 
 # To keep this script simple we assume you are running on an **Ubuntu 11.10
-# Oneiric** machine.  It should work in a VM or physical server.  Additionally
-# we put the list of *apt* and *pip* dependencies and other configuration files
-# in this repo.  So start by grabbing this script and the dependencies.
+# Oneiric** or **Ubuntu 12.04 Precise** machine.  It should work in a VM or
+# physical server.  Additionally we put the list of ``apt`` and ``pip``
+# dependencies and other configuration files in this repo.  So start by
+# grabbing this script and the dependencies.
 
 # Learn more and get the most recent version at http://devstack.org
 
-
-# Sanity Check
-# ============
-
-# Warn users who aren't on oneiric, but allow them to override check and attempt
-# installation with ``FORCE=yes ./stack``
-DISTRO=$(lsb_release -c -s)
-
-if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then
-    echo "WARNING: this script has only been tested on oneiric"
-    if [[ "$FORCE" != "yes" ]]; then
-        echo "If you wish to run this script anyway run with FORCE=yes"
-        exit 1
-    fi
-fi
-
-# Keep track of the current devstack directory.
+# Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
 # Import common functions
-. $TOP_DIR/functions
+source $TOP_DIR/functions
 
-# stack.sh keeps the list of **apt** and **pip** dependencies in external
-# files, along with config templates and other useful files.  You can find these
-# in the ``files`` directory (next to this script).  We will reference this
-# directory using the ``FILES`` variable in this script.
-FILES=$TOP_DIR/files
-if [ ! -d $FILES ]; then
-    echo "ERROR: missing devstack/files - did you grab more than just stack.sh?"
-    exit 1
+# Determine what system we are running on.  This provides ``os_VENDOR``,
+# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+GetOSVersion
+
+# Translate the OS version values into common nomenclature
+if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then
+    # 'Everyone' refers to Ubuntu releases by the code name adjective
+    DISTRO=$os_CODENAME
+else
+    # Catch-all for now is Vendor + Release + Update
+    DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
 fi
 
 
@@ -72,21 +59,49 @@
 #
 # DevStack distributes ``stackrc`` which contains locations for the OpenStack
 # repositories and branches to configure.  ``stackrc`` sources ``localrc`` to
-# allow you to override those settings and not have your changes overwritten
+# allow you to safely override those settings without being overwritten
 # when updating DevStack.
 
-# We support HTTP and HTTPS proxy servers via the usual environment variables
-# **http_proxy** and **https_proxy**.  They can be set in ``localrc`` if necessary or
-# on the command line::
+# HTTP and HTTPS proxy servers are supported via the usual environment variables
+# ``http_proxy`` and ``https_proxy``.  They can be set in ``localrc`` if necessary
+# or on the command line::
 #
 #     http_proxy=http://proxy.example.com:3128/ ./stack.sh
 
+if [[ ! -r $TOP_DIR/stackrc ]]; then
+    echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
+    exit 1
+fi
 source ./stackrc
 
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
 
-# Check to see if we are already running a stack.sh
+
+# Sanity Check
+# ============
+
+# Warn users who aren't on an explicitly supported distro, but allow them to
+# override check and attempt installation with ``FORCE=yes ./stack``
+if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then
+    echo "WARNING: this script has only been tested on oneiric and precise"
+    if [[ "$FORCE" != "yes" ]]; then
+        echo "If you wish to run this script anyway run with FORCE=yes"
+        exit 1
+    fi
+fi
+
+# stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external
+# files, along with config templates and other useful files.  You can find these
+# in the ``files`` directory (next to this script).  We will reference this
+# directory using the ``FILES`` variable in this script.
+FILES=$TOP_DIR/files
+if [ ! -d $FILES ]; then
+    echo "ERROR: missing devstack/files - did you grab more than just stack.sh?"
+    exit 1
+fi
+
+# Check to see if we are already running DevStack
 if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then
     echo "You are already running a stack.sh session."
     echo "To rejoin this session type 'screen -x stack'."
@@ -107,7 +122,7 @@
 
     # since this script runs as a normal user, we need to give that user
     # ability to run sudo
-    dpkg -l sudo || apt_get update && apt_get install sudo
+    dpkg -l sudo || apt_get update && install_package sudo
 
     if ! getent passwd stack >/dev/null; then
         echo "Creating a user called stack"
@@ -268,6 +283,7 @@
     set -o xtrace
 }
 
+
 # Nova Network Configuration
 # --------------------------
 
@@ -375,13 +391,13 @@
 # TODO: add logging to different location.
 
 # By default the location of swift drives and objects is located inside
-# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine
+# the swift source directory. SWIFT_DATA_DIR variable allow you to redefine
 # this.
-SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data}
+SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift}
 
 # We are going to have the configuration files inside the source
-# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that.
-SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config}
+# directory, change SWIFT_CONFIG_DIR if you want to adjust that.
+SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
 
 # devstack will create a loop-back disk formatted as XFS to store the
 # swift data. By default the disk size is 1 gigabyte. The variable
@@ -590,7 +606,7 @@
 
 # install apt requirements
 apt_get update
-apt_get install $(get_packages $FILES/apts)
+install_package $(get_packages $FILES/apts)
 
 # install python requirements
 pip_install $(get_packages $FILES/pips | sort -u)
@@ -677,7 +693,7 @@
 # ------
 
 if [[ $SYSLOG != "False" ]]; then
-    apt_get install -y rsyslog-relp
+    install_package rsyslog-relp
     if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then
         # Configure the master host to receive
         cat <<EOF >/tmp/90-stack-m.conf
@@ -692,7 +708,7 @@
 EOF
         sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
     fi
-    sudo /usr/sbin/service rsyslog restart
+    restart_service rsyslog
 fi
 
 
@@ -703,7 +719,7 @@
     # Install and start rabbitmq-server
     # the temp file is necessary due to LP: #878600
     tfile=$(mktemp)
-    apt_get install rabbitmq-server > "$tfile" 2>&1
+    install_package rabbitmq-server > "$tfile" 2>&1
     cat "$tfile"
     rm -f "$tfile"
     # change the rabbit password since the default is "guest"
@@ -738,15 +754,64 @@
     fi
 
     # Install and start mysql-server
-    apt_get install mysql-server
+    install_package mysql-server
     # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases:
     sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';"
 
     # Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service:
     sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
-    sudo service mysql restart
+    restart_service mysql
 fi
 
+if [ -z "$SCREEN_HARDSTATUS" ]; then
+    SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+fi
+
+# Our screenrc file builder
+function screen_rc {
+    SCREENRC=$TOP_DIR/stack-screenrc
+    if [[ ! -e $SCREENRC ]]; then
+        # Name the screen session
+        echo "sessionname stack" > $SCREENRC
+        # Set a reasonable statusbar
+        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+        echo "screen -t stack bash" >> $SCREENRC
+    fi
+    # If this service doesn't already exist in the screenrc file
+    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+        NL=`echo -ne '\015'`
+        echo "screen -t $1 bash" >> $SCREENRC
+        echo "stuff \"$2$NL\"" >> $SCREENRC
+    fi
+}
+
+# Our screen helper to launch a service in a hidden named screen
+function screen_it {
+    NL=`echo -ne '\015'`
+    if is_service_enabled $1; then
+        # Append the service to the screen rc file
+        screen_rc "$1" "$2"
+
+        screen -S stack -X screen -t $1
+        # sleep to allow bash to be ready to be send the command - we are
+        # creating a new window in screen and then sends characters, so if
+        # bash isn't running by the time we send the command, nothing happens
+        sleep 1.5
+
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+            screen -S stack -p $1 -X log on
+            ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+        fi
+        screen -S stack -p $1 -X stuff "$2$NL"
+    fi
+}
+
+# create a new named screen to run processes in
+screen -d -m -S stack -t stack -s /bin/bash
+sleep 1
+# set a reasonable statusbar
+screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
 
 # Horizon
 # -------
@@ -756,7 +821,7 @@
 if is_service_enabled horizon; then
 
     # Install apache2, which is NOPRIME'd
-    apt_get install apache2 libapache2-mod-wsgi
+    install_package apache2 libapache2-mod-wsgi
 
 
     # Remove stale session database.
@@ -781,7 +846,7 @@
         s,%GROUP%,$APACHE_GROUP,g;
         s,%HORIZON_DIR%,$HORIZON_DIR,g;
     " -i /etc/apache2/sites-enabled/000-default
-    sudo service apache2 restart
+    restart_service apache2
 fi
 
 
@@ -803,49 +868,128 @@
 
     # (re)create glance database
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
-    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;'
-
-    function glance_config {
-        sudo sed -e "
-            s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g;
-            s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
-            s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
-            s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
-            s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g;
-            s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
-            s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
-            s,%SQL_CONN%,$BASE_SQL_CONN/glance,g;
-            s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
-            s,%SERVICE_USERNAME%,glance,g;
-            s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
-            s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
-            s,%DEST%,$DEST,g;
-            s,%SYSLOG%,$SYSLOG,g;
-        " -i $1
-    }
+    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;'
 
     # Copy over our glance configurations and update them
     GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
-    cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF
-    glance_config $GLANCE_REGISTRY_CONF
+    cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
+    iniset $GLANCE_REGISTRY_CONF DEFAULT debug True
+    inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
+    iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
+    iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
+    iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
 
-    if [[ -e $FILES/glance-registry-paste.ini ]]; then
-        GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
-        cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
-        glance_config $GLANCE_REGISTRY_PASTE_INI
-    fi
+    GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
+    cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_user glance
+    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
 
     GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
-    cp $FILES/glance-api.conf $GLANCE_API_CONF
-    glance_config $GLANCE_API_CONF
+    cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
+    iniset $GLANCE_API_CONF DEFAULT debug True
+    inicomment $GLANCE_API_CONF DEFAULT log_file
+    iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
+    iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+    iniset $GLANCE_API_CONF paste_deploy flavor keystone
 
-    if [[ -e $FILES/glance-api-paste.ini ]]; then
-        GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
-        cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI
-        glance_config $GLANCE_API_PASTE_INI
-    fi
+    GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
+    cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
+    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance
+    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
 fi
 
+# Quantum
+# -------
+
+# Quantum service
+if is_service_enabled q-svc; then
+    QUANTUM_CONF_DIR=/etc/quantum
+    if [[ ! -d $QUANTUM_CONF_DIR ]]; then
+        sudo mkdir -p $QUANTUM_CONF_DIR
+    fi
+    sudo chown `whoami` $QUANTUM_CONF_DIR
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+        # Install deps
+        # FIXME add to files/apts/quantum, but don't install if not needed!
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+        # Create database for the plugin/agent
+        if is_service_enabled mysql; then
+            mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
+            mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;'
+        else
+            echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+            exit 1
+        fi
+        QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini
+        # must remove this file from existing location, otherwise Quantum will prefer it
+        if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then
+            sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE
+        fi
+        # Make sure we're using the openvswitch plugin
+        sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
+    fi
+    if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then
+        sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf
+    fi
+    screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf"
+fi
+
+# Quantum agent (for compute nodes)
+if is_service_enabled q-agt; then
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+        # Set up integration bridge
+        OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+        sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
+        sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
+        sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
+
+        # Start up the quantum <-> openvswitch agent
+        QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch
+        mkdir -p $QUANTUM_OVS_CONF_DIR
+        QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini
+        if [[ -e $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini ]]; then
+            sudo mv $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE
+        fi
+        sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE
+        screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
+    fi
+
+fi
+
+# Melange service
+if is_service_enabled m-svc; then
+    if is_service_enabled mysql; then
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;'
+    else
+        echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+        exit 1
+    fi
+    MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
+    cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
+    sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE
+    cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
+    screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
+    echo "Waiting for melange to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
+      echo "melange-server did not start"
+      exit 1
+    fi
+    melange mac_address_range create cidr=$M_MAC_RANGE
+fi
+
+
 
 # Nova
 # ----
@@ -900,7 +1044,7 @@
 
     # Virtualization Configuration
     # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-    apt_get install libvirt-bin
+    install_package libvirt-bin
 
     # Force IP forwarding on, just on case
     sudo sysctl -w net.ipv4.ip_forward=1
@@ -924,7 +1068,7 @@
     # to simulate multiple systems.
     if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
         if [[ "$DISTRO" > natty ]]; then
-            apt_get install cgroup-lite
+            install_package cgroup-lite
         else
             cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
             sudo mkdir -p /cgroup
@@ -943,7 +1087,7 @@
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
-    sudo /etc/init.d/libvirt-bin restart
+    restart_service libvirt-bin
 
 
     # Instance Storage
@@ -994,45 +1138,45 @@
 # Storage Service
 if is_service_enabled swift; then
     # Install memcached for swift.
-    apt_get install memcached
+    install_package memcached
 
     # We first do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
     USER_GROUP=$(id -g)
-    sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
-    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}
+    sudo mkdir -p ${SWIFT_DATA_DIR}/drives
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
 
     # We then create a loopback disk and format it to XFS.
     # TODO: Reset disks on new pass.
-    if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then
-        mkdir -p  ${SWIFT_DATA_LOCATION}/drives/images
-        sudo touch  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
-        sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
+    if [[ ! -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+        mkdir -p  ${SWIFT_DATA_DIR}/drives/images
+        sudo touch  ${SWIFT_DATA_DIR}/drives/images/swift.img
+        sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
 
-        dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
+        dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
             bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
-        mkfs.xfs -f -i size=1024  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
+        mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
     fi
 
     # After the drive being created we mount the disk with a few mount
     # options to make it most efficient as possible for swift.
-    mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1
-    if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts; then
+    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
+    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
         sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
-            ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1
+            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
     fi
 
     # We then create link to that mounted location so swift would know
     # where to go.
     for x in $(seq ${SWIFT_REPLICAS}); do
-        sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
+        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done
 
     # We now have to emulate a few different servers into one we
     # create all the directories needed for swift
     for x in $(seq ${SWIFT_REPLICAS}); do
-            drive=${SWIFT_DATA_LOCATION}/drives/sdb1/${x}
-            node=${SWIFT_DATA_LOCATION}/${x}/node
+            drive=${SWIFT_DATA_DIR}/drives/sdb1/${x}
+            node=${SWIFT_DATA_DIR}/${x}/node
             node_device=${node}/sdb1
             [[ -d $node ]] && continue
             [[ -d $drive ]] && continue
@@ -1041,17 +1185,23 @@
             sudo chown -R $USER: ${node}
     done
 
-   sudo mkdir -p ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server /var/run/swift
-   sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} /var/run/swift
+   sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift
+   sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift
 
-   # swift-init has a bug using /etc/swift until bug #885595 is fixed
-   # we have to create a link
-   sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift
+    if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
+        # Some swift tools are hard-coded to use /etc/swift and are apparenty not going to be fixed.
+        # Create a symlink if the config dir is moved
+        sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
+    fi
 
-   # Swift use rsync to syncronize between all the different
-   # partitions (which make more sense when you have a multi-node
-   # setup) we configure it with our version of rsync.
-   sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
+    # Swift use rsync to syncronize between all the different
+    # partitions (which make more sense when you have a multi-node
+    # setup) we configure it with our version of rsync.
+    sed -e "
+        s/%GROUP%/${USER_GROUP}/;
+        s/%USER%/$USER/;
+        s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
+    " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
    sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
 
    # By default Swift will be installed with the tempauth middleware
@@ -1066,7 +1216,7 @@
    # We do the install of the proxy-server and swift configuration
    # replacing a few directives to match our configuration.
    sed -e "
-       s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g;
+       s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},g;
        s,%USER%,$USER,g;
        s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
        s,%SERVICE_USERNAME%,swift,g;
@@ -1081,35 +1231,40 @@
        s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
        s/%AUTH_SERVER%/${swift_auth_server}/g;
     " $FILES/swift/proxy-server.conf | \
-       sudo tee  ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
+       sudo tee ${SWIFT_CONFIG_DIR}/proxy-server.conf
 
-   sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
+    sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_DIR}/swift.conf
 
-   # We need to generate a object/account/proxy configuration
-   # emulating 4 nodes on different ports we have a little function
-   # that help us doing that.
-   function generate_swift_configuration() {
-       local server_type=$1
-       local bind_port=$2
-       local log_facility=$3
-       local node_number
+    # We need to generate a object/account/proxy configuration
+    # emulating 4 nodes on different ports we have a little function
+    # that help us doing that.
+    function generate_swift_configuration() {
+        local server_type=$1
+        local bind_port=$2
+        local log_facility=$3
+        local node_number
 
-       for node_number in $(seq ${SWIFT_REPLICAS}); do
-           node_path=${SWIFT_DATA_LOCATION}/${node_number}
-           sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
-               $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf
-           bind_port=$(( ${bind_port} + 10 ))
-           log_facility=$(( ${log_facility} + 1 ))
-       done
-   }
-   generate_swift_configuration object 6010 2
-   generate_swift_configuration container 6011 2
-   generate_swift_configuration account 6012 2
+        for node_number in $(seq ${SWIFT_REPLICAS}); do
+            node_path=${SWIFT_DATA_DIR}/${node_number}
+            sed -e "
+                s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},;
+                s,%USER%,$USER,;
+                s,%NODE_PATH%,${node_path},;
+                s,%BIND_PORT%,${bind_port},;
+                s,%LOG_FACILITY%,${log_facility},
+            " $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf
+            bind_port=$(( ${bind_port} + 10 ))
+            log_facility=$(( ${log_facility} + 1 ))
+        done
+    }
+    generate_swift_configuration object 6010 2
+    generate_swift_configuration container 6011 2
+    generate_swift_configuration account 6012 2
 
 
    # We have some specific configuration for swift for rsyslog. See
    # the file /etc/rsyslog.d/10-swift.conf for more info.
-   swift_log_dir=${SWIFT_DATA_LOCATION}/logs
+   swift_log_dir=${SWIFT_DATA_DIR}/logs
    rm -rf ${swift_log_dir}
    mkdir -p ${swift_log_dir}/hourly
    sudo chown -R syslog:adm ${swift_log_dir}
@@ -1119,7 +1274,7 @@
 
    # This is where we create three different rings for swift with
    # different object servers binding on different ports.
-   pushd ${SWIFT_CONFIG_LOCATION} >/dev/null && {
+   pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
 
        rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
 
@@ -1178,7 +1333,7 @@
     # By default, the backing file is 2G in size, and is stored in /opt/stack.
 
     # install the package
-    apt_get install tgt
+    install_package tgt
 
     if ! sudo vgs $VOLUME_GROUP; then
         VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
@@ -1262,7 +1417,7 @@
 if [ -n "$FLAT_INTERFACE" ]; then
     add_nova_opt "flat_interface=$FLAT_INTERFACE"
 fi
-add_nova_opt "sql_connection=$BASE_SQL_CONN/nova"
+add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8"
 add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
 add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
 # All nova-compute workers need to know the vnc configuration options
@@ -1362,52 +1517,6 @@
 # so send the start command by forcing text into the window.
 # Only run the services specified in ``ENABLED_SERVICES``
 
-# Our screenrc file builder
-function screen_rc {
-    SCREENRC=$TOP_DIR/stack-screenrc
-    if [[ ! -e $SCREENRC ]]; then
-        # Name the screen session
-        echo "sessionname stack" > $SCREENRC
-        # Set a reasonable statusbar
-        echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC
-        echo "screen -t stack bash" >> $SCREENRC
-    fi
-    # If this service doesn't already exist in the screenrc file
-    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
-        NL=`echo -ne '\015'`
-        echo "screen -t $1 bash" >> $SCREENRC
-        echo "stuff \"$2$NL\"" >> $SCREENRC
-    fi
-}
-
-# Our screen helper to launch a service in a hidden named screen
-function screen_it {
-    NL=`echo -ne '\015'`
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
-        screen -S stack -X screen -t $1
-        # sleep to allow bash to be ready to be send the command - we are
-        # creating a new window in screen and then sends characters, so if
-        # bash isn't running by the time we send the command, nothing happens
-        sleep 1.5
-
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-            screen -S stack -p $1 -X log on
-            ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-        fi
-        screen -S stack -p $1 -X stuff "$2$NL"
-    fi
-}
-
-# create a new named screen to run processes in
-screen -d -m -S stack -t stack -s /bin/bash
-sleep 1
-# set a reasonable statusbar
-screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
-
 # launch the glance registry service
 if is_service_enabled g-reg; then
     screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
@@ -1426,18 +1535,44 @@
 if is_service_enabled key; then
     # (re)create keystone database
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
-    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;'
+    mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
 
-    # Configure keystone.conf
-    KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf
-    cp $FILES/keystone.conf $KEYSTONE_CONF
-    sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF
-    sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF
-    sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF
-    sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF
+    KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
+    KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
+    KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
 
-    KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates
-    cp $FILES/default_catalog.templates $KEYSTONE_CATALOG
+    if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
+        sudo mkdir -p $KEYSTONE_CONF_DIR
+        sudo chown `whoami` $KEYSTONE_CONF_DIR
+    fi
+
+    if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
+        # FIXME(dtroyer): etc/keystone.conf causes trouble if the config files
+        #                 are located anywhere else (say, /etc/keystone).
+        #                 LP 966670 fixes this in keystone, we fix it
+        #                 here until the bug fix is committed.
+        if [[ -r $KEYSTONE_DIR/etc/keystone.conf ]]; then
+            # Get the sample config file out of the way
+            mv $KEYSTONE_DIR/etc/keystone.conf $KEYSTONE_DIR/etc/keystone.conf.sample
+        fi
+        cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
+        cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
+    fi
+    cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+
+    # Rewrite stock keystone.conf:
+    iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
+    iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
+    iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+    iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
+    # Configure keystone.conf to use templates
+    iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+    iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+    sed -e "
+        /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
+    " -i $KEYSTONE_CONF
+    # Append the S3 bits
+    iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
 
     # Add swift endpoints to service catalog if swift is enabled
     if is_service_enabled swift; then
@@ -1455,34 +1590,32 @@
         echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
     fi
 
-    sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG
+    sudo sed -e "
+        s,%SERVICE_HOST%,$SERVICE_HOST,g;
+        s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+    " -i $KEYSTONE_CATALOG
 
-    sudo sed -e "s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g" -i $KEYSTONE_CATALOG
-
+    # Set up logging
+    LOGGING_ROOT="devel"
     if [ "$SYSLOG" != "False" ]; then
-        cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf
-        sed -i -e '/^handlers=devel$/s/=devel/=production/' \
-            $KEYSTONE_DIR/etc/logging.conf
-        sed -i -e "/^log_file/s/log_file/\#log_file/" \
-            $KEYSTONE_DIR/etc/keystone.conf
-        KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.conf"
+        LOGGING_ROOT="$LOGGING_ROOT,production"
     fi
-fi
+    KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
+    cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
+    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
+    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
 
-# launch the keystone and wait for it to answer before continuing
-if is_service_enabled key; then
+    # initialize keystone database
+    $KEYSTONE_DIR/bin/keystone-manage db_sync
+
+    # launch keystone and wait for it to answer before continuing
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then
       echo "keystone did not start"
       exit 1
     fi
 
-    # initialize keystone with default users/endpoints
-    pushd $KEYSTONE_DIR
-    $KEYSTONE_DIR/bin/keystone-manage db_sync
-    popd
-
     # keystone_data.sh creates services, admin and demo users, and roles.
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
@@ -1509,79 +1642,11 @@
     fi
 fi
 
-# Quantum service
-if is_service_enabled q-svc; then
-    QUANTUM_CONF_DIR=/etc/quantum
-    if [[ ! -d $QUANTUM_CONF_DIR ]]; then
-        sudo mkdir -p $QUANTUM_CONF_DIR
-    fi
-    sudo chown `whoami` $QUANTUM_CONF_DIR
-    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
-        # Install deps
-        # FIXME add to files/apts/quantum, but don't install if not needed!
-        apt_get install openvswitch-switch openvswitch-datapath-dkms
-        # Create database for the plugin/agent
-        if is_service_enabled mysql; then
-            mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
-            mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
-        else
-            echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
-            exit 1
-        fi
-        QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini
-        sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE
-        # Make sure we're using the openvswitch plugin
-        sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
-    fi
-   sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf
-   screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf"
-fi
-
-# Quantum agent (for compute nodes)
-if is_service_enabled q-agt; then
-    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
-        # Set up integration bridge
-        OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-        sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
-        sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
-        sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-
-       # Start up the quantum <-> openvswitch agent
-       QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini
-       sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE
-       sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE
-       screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
-    fi
-
-fi
-
-# Melange service
-if is_service_enabled m-svc; then
-    if is_service_enabled mysql; then
-        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
-        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange;'
-    else
-        echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
-        exit 1
-    fi
-    MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
-    cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
-    sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange/g" $MELANGE_CONFIG_FILE
-    cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
-    screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
-    echo "Waiting for melange to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
-      echo "melange-server did not start"
-      exit 1
-    fi
-    melange mac_address_range create cidr=$M_MAC_RANGE
-fi
-
 # If we're using Quantum (i.e. q-svc is enabled), network creation has to
 # happen after we've started the Quantum service.
 if is_service_enabled mysql && is_service_enabled nova; then
     # create a small network
-    $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
+    $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
 
     # create some floating ips
     $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
@@ -1605,7 +1670,7 @@
 screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
 screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
 screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
-screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v"
+screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
 
 # Starting the nova-objectstore only if swift service is not enabled.
 # Swift will act as s3 objectstore.
@@ -1637,17 +1702,8 @@
     TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
 
     # Option to upload legacy ami-tty, which works with xenserver
-    if [ $UPLOAD_LEGACY_TTY ]; then
-        if [ ! -f $FILES/tty.tgz ]; then
-            wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz
-        fi
-
-        tar -zxf $FILES/tty.tgz -C $FILES/images
-        RVAL=`glance add --silent-upload -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image`
-        KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
-        RVAL=`glance add --silent-upload -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image`
-        RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
-        glance add -A $TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image
+    if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+        IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz"
     fi
 
     for image_url in ${IMAGE_URLS//,/ }; do
@@ -1659,6 +1715,8 @@
 
         KERNEL=""
         RAMDISK=""
+        DISK_FORMAT=""
+        CONTAINER_FORMAT=""
         case "$IMAGE_FNAME" in
             *.tar.gz|*.tgz)
                 # Extract ami and aki files
@@ -1669,43 +1727,68 @@
                 rm -Rf "$xdir";
                 mkdir "$xdir"
                 tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
-                KERNEL=$(for f in "$xdir/"*-vmlinuz*; do
+                KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
                          [ -f "$f" ] && echo "$f" && break; done; true)
-                RAMDISK=$(for f in "$xdir/"*-initrd*; do
+                RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
                          [ -f "$f" ] && echo "$f" && break; done; true)
-                IMAGE=$(for f in "$xdir/"*.img; do
+                IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
                          [ -f "$f" ] && echo "$f" && break; done; true)
-                [ -n "$IMAGE_NAME" ]
-                IMAGE_NAME=$(basename "$IMAGE" ".img")
+                if [[ -z "$IMAGE_NAME" ]]; then
+                    IMAGE_NAME=$(basename "$IMAGE" ".img")
+                fi
                 ;;
             *.img)
                 IMAGE="$FILES/$IMAGE_FNAME";
                 IMAGE_NAME=$(basename "$IMAGE" ".img")
+                DISK_FORMAT=raw
+                CONTAINER_FORMAT=bare
                 ;;
             *.img.gz)
                 IMAGE="$FILES/${IMAGE_FNAME}"
                 IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
+                DISK_FORMAT=raw
+                CONTAINER_FORMAT=bare
+                ;;
+            *.qcow2)
+                IMAGE="$FILES/${IMAGE_FNAME}"
+                IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
+                DISK_FORMAT=qcow2
+                CONTAINER_FORMAT=bare
                 ;;
             *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
         esac
 
-        # Use glance client to add the kernel the root filesystem.
-        # We parse the results of the first upload to get the glance ID of the
-        # kernel for use when uploading the root filesystem.
-        KERNEL_ID=""; RAMDISK_ID="";
-        if [ -n "$KERNEL" ]; then
-            RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
-            KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+        if [ "$CONTAINER_FORMAT" = "bare" ]; then
+            glance add --silent-upload -A $TOKEN name="$IMAGE_NAME" is_public=true container_format=$CONTAINER_FORMAT disk_format=$DISK_FORMAT < <(zcat --force "${IMAGE}")
+        else
+            # Use glance client to add the kernel the root filesystem.
+            # We parse the results of the first upload to get the glance ID of the
+            # kernel for use when uploading the root filesystem.
+            KERNEL_ID=""; RAMDISK_ID="";
+            if [ -n "$KERNEL" ]; then
+                RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
+                KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+            fi
+            if [ -n "$RAMDISK" ]; then
+                RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
+                RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+            fi
+            glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
         fi
-        if [ -n "$RAMDISK" ]; then
-            RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
-            RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
-        fi
-        glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
     done
 fi
 
 
+# Run local script
+# ================
+
+# Run ``local.sh`` if it exists to perform user-managed tasks
+if [[ -x $TOP_DIR/local.sh ]]; then
+    echo "Running user script $TOP_DIR/local.sh"
+    $TOP_DIR/local.sh
+fi
+
+
 # Fin
 # ===
 
diff --git a/tests/functions.sh b/tests/functions.sh
index 69e8c0a..e7fbe0c 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -37,3 +37,107 @@
     echo "die_if_not_set [X='' false] Failed"
 fi
 
+
+echo "Testing INI functions"
+
+cat >test.ini <<EOF
+[default]
+# comment an option
+#log_file=./log.conf
+log_file=/etc/log.conf
+handlers=do not disturb
+
+[aaa]
+# the commented option should not change
+#handlers=cc,dd
+handlers = aa, bb
+
+[bbb]
+handlers=ee,ff
+EOF
+
+# Test with spaces
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "aa, bb" ]]; then
+    echo "OK: $VAL"
+else
+    echo "iniget failed: $VAL"
+fi
+
+iniset test.ini aaa handlers "11, 22"
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "11, 22" ]]; then
+    echo "OK: $VAL"
+else
+    echo "iniget failed: $VAL"
+fi
+
+
+# Test without spaces, end of file
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "ee,ff" ]]; then
+    echo "OK: $VAL"
+else
+    echo "iniget failed: $VAL"
+fi
+
+iniset test.ini bbb handlers "33,44"
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "33,44" ]]; then
+    echo "OK: $VAL"
+else
+    echo "iniget failed: $VAL"
+fi
+
+
+# Test section not exist
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -z "$VAL" ]]; then
+    echo "OK: zzz not present"
+else
+    echo "iniget failed: $VAL"
+fi
+
+iniset test.ini zzz handlers "999"
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -n "$VAL" ]]; then
+    echo "OK: zzz not present"
+else
+    echo "iniget failed: $VAL"
+fi
+
+
+# Test option not exist
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -z "$VAL" ]]; then
+    echo "OK aaa.debug not present"
+else
+    echo "iniget failed: $VAL"
+fi
+
+iniset test.ini aaa debug "999"
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -n "$VAL" ]]; then
+    echo "OK aaa.debug present"
+else
+    echo "iniget failed: $VAL"
+fi
+
+# Test comments
+
+inicomment test.ini aaa handlers
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ -z "$VAL" ]]; then
+    echo "OK"
+else
+    echo "inicomment failed: $VAL"
+fi
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 9b25b7e..01849ad 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -125,22 +125,75 @@
     IMAGE_UUID=$(echo $IMAGE_UUID)
 fi
 
-# Create tempest.conf from tempest.conf.sample
+# Create tempest.conf from tempest.conf.tpl
 
 if [[ ! -r $TEMPEST_CONF ]]; then
-    cp $TEMPEST_CONF.sample $TEMPEST_CONF
+    cp $TEMPEST_CONF.tpl $TEMPEST_CONF
 fi
 
+IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False}
+IDENTITY_PORT=${IDENTITY_PORT:-5000}
+IDENTITY_API_VERSION={$IDENTITY_API_VERSION:-v2.0} # Note: need v for now...
+# TODO(jaypipes): This is dumb and needs to be removed
+# from the Tempest configuration file entirely...
+IDENTITY_PATH=${IDENTITY_PATH:-tokens}
+IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone}
+
+# We use regular, non-admin users in Tempest for the USERNAME
+# substitutions and use ADMIN_USERNAME et al for the admin stuff.
+# OS_USERNAME et all should be defined in openrc.
+OS_USERNAME=${OS_USERNAME:-demo}
+OS_TENANT_NAME=${OS_TENANT_NAME:-demo}
+OS_PASSWORD=${OS_PASSWORD:-secrete}
+
+# TODO(jaypipes): Support multiple regular user accounts instead
+# of using the same regular user account for the alternate user...
+ALT_USERNAME=$OS_USERNAME
+ALT_PASSWORD=$OS_PASSWORD
+ALT_TENANT_NAME=$OS_TENANT_NAME
+
+# TODO(jaypipes): Support multiple images instead of plopping
+# the IMAGE_UUID into both the image_ref and image_ref_alt slots
+IMAGE_UUID_ALT=$IMAGE_UUID
+
+# TODO(jaypipes): Support configurable flavor refs here...
+FLAVOR_REF=1
+FLAVOR_REF_ALT=2
+
+ADMIN_USERNAME={$ADMIN_USERNAME:-admin}
+ADMIN_PASSWORD={$ADMIN_PASSWORD:-secrete}
+ADMIN_TENANT_NAME={$ADMIN_TENANT:-admin}
+
+# Do any of the following need to be configurable?
+COMPUTE_CATALOG_TYPE=compute
+COMPUTE_CREATE_IMAGE_ENABLED=True
+COMPUTE_RESIZE_AVAILABLE=True
+COMPUTE_LOG_LEVEL=ERROR
+
 sed -e "
-    /^api_key=/s|=.*\$|=$ADMIN_PASSWORD|;
-    /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/|;
-    /^host=/s|=.*\$|=$HOST_IP|;
-    /^image_ref=/s|=.*\$|=$IMAGE_UUID|;
-    /^password=/s|=.*\$|=$ADMIN_PASSWORD|;
-    /^tenant=/s|=.*\$|=$TENANT|;
-    /^tenant_name=/s|=.*\$|=$TENANT|;
-    /^user=/s|=.*\$|=$USERNAME|;
-    /^username=/s|=.*\$|=$USERNAME|;
+    s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g;
+    s,%IDENTITY_HOST%,$HOST_IP,g;
+    s,%IDENTITY_PORT%,$IDENTITY_PORT,g;
+    s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g;
+    s,%IDENTITY_PATH%,$IDENTITY_PATH,g;
+    s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g;
+    s,%USERNAME%,$OS_USERNAME,g;
+    s,%PASSWORD%,$OS_PASSWORD,g;
+    s,%TENANT_NAME%,$OS_TENANT_NAME,g;
+    s,%ALT_USERNAME%,$ALT_USERNAME,g;
+    s,%ALT_PASSWORD%,$ALT_PASSWORD,g;
+    s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g;
+    s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g;
+    s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g;
+    s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g;
+    s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g;
+    s,%IMAGE_ID%,$IMAGE_UUID,g;
+    s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g;
+    s,%FLAVOR_REF%,$FLAVOR_REF,g;
+    s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g;
+    s,%ADMIN_USERNAME%,$ADMIN_USERNAME,g;
+    s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g;
+    s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g;
 " -i $TEMPEST_CONF
 
 # Create config.ini