Merge "README updates"
diff --git a/AUTHORS b/AUTHORS
index 561826c..a3a4b6b 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -23,3 +23,4 @@
 Tres Henry <tres@treshenry.net>
 Vishvananda Ishaya <vishvananda@gmail.com>
 Yun Mao <yunmao@gmail.com>
+Zhongyue Luo <lzyeval@gmail.com>
diff --git a/README.md b/README.md
index 0fb8581..34eb45f 100644
--- a/README.md
+++ b/README.md
@@ -47,3 +47,13 @@
 # Customizing
 
 You can override environment variables used in stack.sh by creating file name 'localrc'.  It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
+
+# Swift
+
+Swift is not installed by default, you need to add the **swift** keyword in the ENABLED_SERVICES variable to get it installed.
+
+If you have keystone enabled, Swift will authenticate against it, make sure to use the keystone URL to auth against.
+
+At this time Swift is not started in a screen session but as daemon you need to use the **swift-init** CLI to manage the swift daemons.
+
+By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable SWIFT_REPLICAS in your localrc.
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 233313e..b559965 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -195,8 +195,11 @@
 # shutdown the server
 nova delete $VM_UUID
 
+# make sure the VM shuts down within a reasonable time
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+    echo "server didn't shut down!"
+    exit 1
+fi
+
 # Delete a secgroup
 nova secgroup-delete $SECGROUP
-
-# FIXME: validate shutdown within 5 seconds
-# (nova show $NAME returns 1 or status != ACTIVE)?
diff --git a/exercises/swift.sh b/exercises/swift.sh
index f7be099..95443df 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -24,17 +24,17 @@
 # =============
 
 # Check if we have to swift via keystone
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
+swift stat
 
 # We start by creating a test container
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
+swift post testcontainer
 
 # add some files into it.
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
+swift upload testcontainer /etc/issue
 
 # list them
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
+swift list testcontainer
 
 # And we may want to delete them now that we have tested that
 # everything works.
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
+swift delete testcontainer
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 1fcc034..622fb18 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -55,18 +55,33 @@
 # determinine instance type
 # -------------------------
 
+# Helper function to grab a numbered field from python novaclient cli result
+# Fields are numbered starting with 1
+# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
+function get_field () {
+    while read data
+    do
+        if [ "$1" -lt 0 ]; then
+            field="(\$(NF$1))"
+        else
+            field="\$$(($1 + 1))"
+        fi
+        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
+    done
+}
+
 # List of instance types:
 nova flavor-list
 
-INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1`
 if [[ -z "$INSTANCE_TYPE" ]]; then
     # grab the first flavor in the list to launch if default doesn't exist
-   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1`
 fi
 
 NAME="myserver"
 
-VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2`
 
 # Testing
 # =======
@@ -85,7 +100,7 @@
 fi
 
 # get the IP of the server
-IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
+IP=`nova show $VM_UUID | grep "private network" | get_field 2`
 
 # for single node deployments, we can ping private ips
 MULTI_HOST=${MULTI_HOST:-0}
@@ -108,7 +123,7 @@
 VOL_NAME="myvol-$(openssl rand -hex 4)"
 
 # Verify it doesn't exist
-if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f3 | sed 's/ //g'`" ]]; then
+if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then
     echo "Volume $VOL_NAME already exists"
     exit 1
 fi
@@ -121,7 +136,7 @@
 fi
 
 # Get volume ID
-VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
+VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1`
 
 # Attach to server
 DEVICE=/dev/vdb
@@ -131,7 +146,7 @@
     exit 1
 fi
 
-VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f6 | sed 's/ //g'`
+VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1`
 if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
     echo "Volume not attached to correct instance"
     exit 1
diff --git a/files/000-default.template b/files/000-default.template
index d97f365..f499ea0 100644
--- a/files/000-default.template
+++ b/files/000-default.template
@@ -1,12 +1,12 @@
 <VirtualHost *:80>
-    WSGIScriptAlias / %HORIZON_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi
+    WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
     WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10
     SetEnv APACHE_RUN_USER %USER%
     SetEnv APACHE_RUN_GROUP %GROUP%
     WSGIProcessGroup horizon
 
     DocumentRoot %HORIZON_DIR%/.blackhole/
-    Alias /media %HORIZON_DIR%/openstack-dashboard/dashboard/static
+    Alias /media %HORIZON_DIR%/openstack_dashboard/static
     Alias /vpn /opt/stack/vpn
 
     <Directory />
diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini
index b8832ad..583b70a 100644
--- a/files/glance-api-paste.ini
+++ b/files/glance-api-paste.ini
@@ -30,6 +30,7 @@
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
+# FIXME(dtroyer): remove these service_* entries after auth_token is updated
 service_host = %KEYSTONE_SERVICE_HOST%
 service_port = %KEYSTONE_SERVICE_PORT%
 service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
@@ -37,7 +38,11 @@
 auth_port = %KEYSTONE_AUTH_PORT%
 auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
 auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
+# FIXME(dtroyer): remove admin_token after auth_token is updated
 admin_token = %SERVICE_TOKEN%
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
 
 [filter:auth-context]
 paste.filter_factory = glance.common.wsgi:filter_factory
diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini
index f4130ec..fe460d9 100644
--- a/files/glance-registry-paste.ini
+++ b/files/glance-registry-paste.ini
@@ -14,6 +14,7 @@
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
+# FIXME(dtroyer): remove these service_* entries after auth_token is updated
 service_host = %KEYSTONE_SERVICE_HOST%
 service_port = %KEYSTONE_SERVICE_PORT%
 service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
@@ -21,7 +22,11 @@
 auth_port = %KEYSTONE_AUTH_PORT%
 auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
 auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
+# FIXME(dtroyer): remove admin_token after auth_token is updated
 admin_token = %SERVICE_TOKEN%
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
 
 [filter:auth-context]
 context_class = glance.registry.context.RequestContext
diff --git a/files/horizon_settings.py b/files/horizon_settings.py
index bbff08f..2d1d1f8 100644
--- a/files/horizon_settings.py
+++ b/files/horizon_settings.py
@@ -37,7 +37,7 @@
 HORIZON_CONFIG = {
     'dashboards': ('nova', 'syspanel', 'settings',),
     'default_dashboard': 'nova',
-    'user_home': 'dashboard.views.user_home',
+    'user_home': 'openstack_dashboard.views.user_home',
 }
 
 OPENSTACK_HOST = "127.0.0.1"
@@ -98,6 +98,3 @@
 #            }
 #        }
 #}
-
-# How much ram on each compute host?
-COMPUTE_HOST_RAM_GB = 16
diff --git a/files/keystone.conf b/files/keystone.conf
index b26e70d..76c618a 100644
--- a/files/keystone.conf
+++ b/files/keystone.conf
@@ -57,6 +57,9 @@
 [filter:ec2_extension]
 paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
 
+[filter:s3_extension]
+paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+
 [app:public_service]
 paste.app_factory = keystone.service:public_app_factory
 
@@ -64,7 +67,7 @@
 paste.app_factory = keystone.service:admin_app_factory
 
 [pipeline:public_api]
-pipeline = token_auth admin_token_auth json_body debug ec2_extension public_service
+pipeline = token_auth admin_token_auth json_body debug ec2_extension s3_extension public_service
 
 [pipeline:admin_api]
 pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 3f4841f..e292811 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -17,6 +17,7 @@
 fi
 
 ADMIN_TENANT=`get_id keystone tenant-create --name=admin`
+SERVICE_TENANT=`get_id keystone tenant-create --name=$SERVICE_TENANT_NAME`
 DEMO_TENANT=`get_id keystone tenant-create --name=demo`
 INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin`
 
@@ -73,6 +74,14 @@
                                  --name=nova \
                                  --type=compute \
                                  --description="Nova Compute Service"
+NOVA_USER=`get_id keystone user-create \
+                                 --name=nova \
+                                 --pass="$SERVICE_PASSWORD" \
+                                 --tenant_id $SERVICE_TENANT \
+                                 --email=nova@example.com`
+keystone user-role-add --tenant_id $SERVICE_TENANT \
+                                 --user $NOVA_USER \
+                                 --role $ADMIN_ROLE
 
 keystone service-create \
                                  --name=ec2 \
@@ -83,6 +92,14 @@
                                  --name=glance \
                                  --type=image \
                                  --description="Glance Image Service"
+GLANCE_USER=`get_id keystone user-create \
+                                 --name=glance \
+                                 --pass="$SERVICE_PASSWORD" \
+                                 --tenant_id $SERVICE_TENANT \
+                                 --email=glance@example.com`
+keystone user-role-add --tenant_id $SERVICE_TENANT \
+                                 --user $GLANCE_USER \
+                                 --role $ADMIN_ROLE
 
 keystone service-create \
                                  --name=keystone \
@@ -101,12 +118,28 @@
                                  --name=swift \
                                  --type="object-store" \
                                  --description="Swift Service"
+    SWIFT_USER=`get_id keystone user-create \
+                                 --name=swift \
+                                 --pass="$SERVICE_PASSWORD" \
+                                 --tenant_id $SERVICE_TENANT \
+                                 --email=swift@example.com`
+    keystone user-role-add --tenant_id $SERVICE_TENANT \
+                                 --user $SWIFT_USER \
+                                 --role $ADMIN_ROLE
 fi
 if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
     keystone service-create \
                                  --name=quantum \
                                  --type=network \
                                  --description="Quantum Service"
+    QUANTUM_USER=`get_id keystone user-create \
+                                 --name=quantum \
+                                 --pass="$SERVICE_PASSWORD" \
+                                 --tenant_id $SERVICE_TENANT \
+                                 --email=quantum@example.com`
+    keystone user-role-add --tenant_id $SERVICE_TENANT \
+                                 --user $QUANTUM_USER \
+                                 --role $ADMIN_ROLE
 fi
 
 # create ec2 creds and parse the secret and access key returned
diff --git a/files/pips/keystone b/files/pips/keystone
index fef9f8b..09636e4 100644
--- a/files/pips/keystone
+++ b/files/pips/keystone
@@ -1,2 +1 @@
 PassLib
-pycli
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 3ef0276..e80c1d5 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -8,7 +8,7 @@
 log_level = DEBUG
 
 [pipeline:main]
-pipeline = healthcheck cache %AUTH_SERVER% proxy-server
+pipeline = healthcheck cache swift3 %AUTH_SERVER% proxy-server
 
 [app:proxy-server]
 use = egg:swift#proxy
@@ -16,10 +16,37 @@
 account_autocreate = true
 
 [filter:keystone]
-use = egg:swiftkeystone2#keystone2
-keystone_admin_token = %SERVICE_TOKEN%
-keystone_url = http://localhost:35357/v2.0
-keystone_swift_operator_roles = Member,admin
+paste.filter_factory = keystone.middleware.swift_auth:filter_factory
+operator_roles = Member,admin
+
+[filter:s3token]
+paste.filter_factory = keystone.middleware.s3_token:filter_factory
+service_port = %KEYSTONE_SERVICE_PORT%
+service_host = %KEYSTONE_SERVICE_HOST%
+auth_port = %KEYSTONE_AUTH_PORT%
+auth_host = %KEYSTONE_AUTH_HOST%
+auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
+auth_token = %SERVICE_TOKEN%
+admin_token = %SERVICE_TOKEN%
+
+[filter:tokenauth]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+# FIXME(dtroyer): remove these service_* entries after auth_token is updated
+service_port = %KEYSTONE_SERVICE_PORT%
+service_host = %KEYSTONE_SERVICE_HOST%
+auth_port = %KEYSTONE_AUTH_PORT%
+auth_host = %KEYSTONE_AUTH_HOST%
+auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
+auth_token = %SERVICE_TOKEN%
+# FIXME(dtroyer): remove admin_token after auth_token is updated
+admin_token = %SERVICE_TOKEN%
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
+cache = swift.cache
+
+[filter:swift3]
+use = egg:swift#swift3
 
 [filter:tempauth]
 use = egg:swift#tempauth
diff --git a/files/swift/swift-remakerings b/files/swift/swift-remakerings
deleted file mode 100755
index c65353c..0000000
--- a/files/swift/swift-remakerings
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-cd %SWIFT_CONFIG_LOCATION%
-
-rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
-
-swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
-swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
-swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
-swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
-swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
-swift-ring-builder object.builder rebalance
-
-swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
-swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
-swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
-swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
-swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
-swift-ring-builder container.builder rebalance
-
-swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
-swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
-swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
-swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
-swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
-swift-ring-builder account.builder rebalance
diff --git a/files/swift/swift-startmain b/files/swift/swift-startmain
deleted file mode 100755
index 69efebd..0000000
--- a/files/swift/swift-startmain
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-swift-init all restart
diff --git a/openrc b/openrc
index d742ced..9b3d7ba 100644
--- a/openrc
+++ b/openrc
@@ -62,7 +62,7 @@
 export EC2_SECRET_KEY=${DEMO_SECRET}
 
 # Euca2ools Certificate stuff for uploading bundles
-# You can get your certs using ./tools/get_certs.sh
+# See exercises/bundle.sh to see how to get certs using nova cli
 NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
         NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
 NOVA_KEY_DIR=${NOVARC%/*}
diff --git a/stack.sh b/stack.sh
index c744853..9242182 100755
--- a/stack.sh
+++ b/stack.sh
@@ -162,7 +162,6 @@
 KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
 NOVNC_DIR=$DEST/noVNC
 SWIFT_DIR=$DEST/swift
-SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2
 QUANTUM_DIR=$DEST/quantum
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
 MELANGE_DIR=$DEST/melange
@@ -403,6 +402,12 @@
 # By default we define 9 for the partition count (which mean 512).
 SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
 
+# This variable allows you to configure how many replicas you want to be
+# configured for your Swift cluster.  By default the three replicas would need a
+# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
+# only some quick testing.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
+
 # We only ask for Swift Hash if we have enabled swift service.
 if is_service_enabled swift; then
     # SWIFT_HASH is a random unique string for a swift cluster that
@@ -416,10 +421,16 @@
 # Service Token - Openstack components need to have an admin token
 # to validate user tokens.
 read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+# Services authenticate to Identity with servicename/SERVICE_PASSWORD
+read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
 # Horizon currently truncates usernames and passwords at 20 characters
 read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
 
+# Set the tenant for service accounts in Keystone
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
+
 # Set Keystone interface configuration
+KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
 KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
 KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
 KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
@@ -564,8 +575,6 @@
 if is_service_enabled swift; then
     # storage service
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
-    # swift + keystone middleware
-    git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH
 fi
 if is_service_enabled g-api n-api; then
     # image catalog service
@@ -609,15 +618,13 @@
 fi
 if is_service_enabled swift; then
     cd $SWIFT_DIR; sudo python setup.py develop
-    cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop
 fi
 if is_service_enabled g-api n-api; then
     cd $GLANCE_DIR; sudo python setup.py develop
 fi
 cd $NOVA_DIR; sudo python setup.py develop
 if is_service_enabled horizon; then
-    cd $HORIZON_DIR/horizon; sudo python setup.py develop
-    cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
+    cd $HORIZON_DIR; sudo python setup.py develop
 fi
 if is_service_enabled q-svc; then
     cd $QUANTUM_DIR; sudo python setup.py develop
@@ -716,14 +723,14 @@
     apt_get install apache2 libapache2-mod-wsgi
 
     # Link to quantum client directory.
-    rm -fr ${HORIZON_DIR}/openstack-dashboard/quantum
-    ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack-dashboard/quantum
+    rm -fr ${HORIZON_DIR}/openstack_dashboard/quantum
+    ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack_dashboard/quantum
 
     # Remove stale session database.
-    rm -f $HORIZON_DIR/openstack-dashboard/local/dashboard_openstack.sqlite3
+    rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3
 
     # ``local_settings.py`` is used to override horizon default settings.
-    local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py
+    local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $FILES/horizon_settings.py $local_settings
 
     # Enable quantum in dashboard, if requested
@@ -733,7 +740,7 @@
 
     # Initialize the horizon database (it stores sessions and notices shown to
     # users).  The user system is external (keystone).
-    cd $HORIZON_DIR/openstack-dashboard
+    cd $HORIZON_DIR
     python manage.py syncdb
 
     # create an empty directory that apache uses as docroot
@@ -767,6 +774,7 @@
 
     function glance_config {
         sudo sed -e "
+            s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g;
             s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
             s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
             s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
@@ -774,6 +782,9 @@
             s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
             s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
             s,%SQL_CONN%,$BASE_SQL_CONN/glance,g;
+            s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+            s,%SERVICE_USERNAME%,glance,g;
+            s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
             s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
             s,%DEST%,$DEST,g;
             s,%SYSLOG%,$SYSLOG,g;
@@ -824,7 +835,14 @@
     cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF
 
     # Then we add our own service token to the configuration
-    sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_CONF/api-paste.ini
+    sed -e "
+        /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME
+        /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/;
+        /admin_user/s/^.*$/admin_user = nova/;
+        /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/;
+        s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+        s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
+    " -i $NOVA_CONF/api-paste.ini
 
     # Finally, we change the pipelines in nova to use keystone
     function replace_pipeline() {
@@ -832,8 +850,10 @@
     }
     replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor"
     replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor"
-    replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2"
-    replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1"
+    # allow people to turn off rate limiting for testing, like when using tempest, by setting OSAPI_RATE_LIMIT=" "
+    OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"}
+    replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_compute_app_v2"
+    replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_volume_app_v1"
 fi
 
 # Helper to clean iptables rules
@@ -968,21 +988,24 @@
 
     # We then create link to that mounted location so swift would know
     # where to go.
-    for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
+    for x in $(seq ${SWIFT_REPLICAS}); do
+        sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
 
     # We now have to emulate a few different servers into one we
     # create all the directories needed for swift
-    tmpd=""
-    for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
-        ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
-        ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift; do
-        [[ -d $d ]] && continue
-        sudo install -o ${USER} -g $USER_GROUP -d $d
+    for x in $(seq ${SWIFT_REPLICAS}); do
+            drive=${SWIFT_DATA_LOCATION}/drives/sdb1/${x}
+            node=${SWIFT_DATA_LOCATION}/${x}/node
+            node_device=${node}/sdb1
+            [[ -d $node ]] && continue
+            [[ -d $drive ]] && continue
+            sudo install -o ${USER} -g $USER_GROUP -d $drive
+            sudo install -o ${USER} -g $USER_GROUP -d $node_device
+            sudo chown -R $USER: ${node}
     done
 
-   # We do want to make sure this is all owned by our user.
-   sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node
-   sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION}
+   sudo mkdir -p ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server /var/run/swift
+   sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} /var/run/swift
 
    # swift-init has a bug using /etc/swift until bug #885595 is fixed
    # we have to create a link
@@ -998,19 +1021,29 @@
    # which has some default username and password if you have
    # configured keystone it will checkout the directory.
    if is_service_enabled key; then
-       swift_auth_server=keystone
-
-       # We install the memcache server as this is will be used by the
-       # middleware to cache the tokens auths for a long this is needed.
-       apt_get install memcached
+       swift_auth_server="s3token tokenauth keystone"
    else
        swift_auth_server=tempauth
    fi
 
    # We do the install of the proxy-server and swift configuration
    # replacing a few directives to match our configuration.
-   sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \
-       $FILES/swift/proxy-server.conf|sudo tee  ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
+   sed -e "
+       s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g;
+       s,%USER%,$USER,g;
+       s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+       s,%SERVICE_USERNAME%,swift,g;
+       s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
+       s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g;
+       s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g;
+       s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g;
+       s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g;
+       s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g;
+       s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g;
+       s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
+       s/%AUTH_SERVER%/${swift_auth_server}/g;
+    " $FILES/swift/proxy-server.conf | \
+       sudo tee  ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
 
    sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
 
@@ -1023,7 +1056,7 @@
        local log_facility=$3
        local node_number
 
-       for node_number in {1..4}; do
+       for node_number in $(seq ${SWIFT_REPLICAS}); do
            node_path=${SWIFT_DATA_LOCATION}/${node_number}
            sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
                $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf
@@ -1046,29 +1079,48 @@
        tee /etc/rsyslog.d/10-swift.conf
    sudo restart rsyslog
 
-   # We create two helper scripts :
-   #
-   # - swift-remakerings
-   #   Allow to recreate rings from scratch.
-   # - swift-startmain
-   #   Restart your full cluster.
-   #
-   sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \
-       sudo tee /usr/local/bin/swift-remakerings
-   sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/
+   # This is where we create three different rings for swift with
+   # different object servers binding on different ports.
+   pushd ${SWIFT_CONFIG_LOCATION} >/dev/null && {
+
+       rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
+
+       port_number=6010
+       swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+       for x in $(seq ${SWIFT_REPLICAS}); do
+           swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+           port_number=$[port_number + 10]
+       done
+       swift-ring-builder object.builder rebalance
+
+       port_number=6011
+       swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+       for x in $(seq ${SWIFT_REPLICAS}); do
+           swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+           port_number=$[port_number + 10]
+       done
+       swift-ring-builder container.builder rebalance
+
+       port_number=6012
+       swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+       for x in $(seq ${SWIFT_REPLICAS}); do
+           swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+           port_number=$[port_number + 10]
+       done
+       swift-ring-builder account.builder rebalance
+
+   } && popd >/dev/null
+
    sudo chmod +x /usr/local/bin/swift-*
 
    # We then can start rsync.
    sudo /etc/init.d/rsync restart || :
 
-   # Create our ring for the object/container/account.
-   /usr/local/bin/swift-remakerings
+   # TODO: Bring some services in foreground.
+   # Launch all services.
+   swift-init all start
 
-   # And now we launch swift-startmain to get our cluster running
-   # ready to be tested.
-   /usr/local/bin/swift-startmain || :
-
-   unset s swift_hash swift_auth_server tmpd
+   unset s swift_hash swift_auth_server
 fi
 
 # Volume Service
@@ -1359,7 +1411,7 @@
 if is_service_enabled key; then
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then
       echo "keystone did not start"
       exit 1
     fi
@@ -1371,7 +1423,8 @@
 
     # keystone_data.sh creates services, admin and demo users, and roles.
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES bash $FILES/keystone_data.sh
+    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
+        bash $FILES/keystone_data.sh
 fi
 
 
@@ -1600,7 +1653,7 @@
 
 # If keystone is present, you can point nova cli to this server
 if is_service_enabled key; then
-    echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
+    echo "keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/"
     echo "examples on using novaclient command line is in exercise.sh"
     echo "the default users are: admin and demo"
     echo "the password: $ADMIN_PASSWORD"
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index 1d71a4a..ea943e1 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -5,8 +5,8 @@
 
 
 def print_usage():
-    print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
-          % sys.argv[0]
+    print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
+           % sys.argv[0])
     sys.exit()