Merge "Reuse existing libvirt setup functions for Ironic"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index dff8e7a..d756685 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,6 +44,9 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
+# Ironic does not support boot from volume.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 3768b56..f9c4752 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -36,6 +36,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Import project functions
+source $TOP_DIR/lib/neutron
+
 # If nova api is not enabled we exit with exitcode 55 so that
 # the exercise is skipped
 is_service_enabled n-api || exit 55
@@ -82,7 +85,7 @@
 
 # Volumes
 # -------
-if is_service_enabled c-vol && ! is_service_enabled n-cell; then
+if is_service_enabled c-vol && ! is_service_enabled n-cell && [ "$VIRT_DRIVER" != "ironic" ]; then
     VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
     die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 1416d4d..7e90e5a 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -180,7 +180,7 @@
 fi
 
 # FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
+if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
     ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail
 fi
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0d556df..1dff6a4 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -41,6 +41,9 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
+# Ironic does not currently support volume attachment.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 0186e36..74f4c60 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -9,7 +9,7 @@
         install_tempest
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
         # Tempest config must come after layer 2 services are running
-        :
+        create_tempest_accounts
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
deleted file mode 100755
index fc1e813..0000000
--- a/files/keystone_data.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-#
-# Initial data for Keystone using python-keystoneclient
-#
-# Tenant               User         Roles
-# ------------------------------------------------------------------
-# service              glance       service
-# service              glance-swift ResellerAdmin
-# service              heat         service        # if enabled
-# service              ceilometer   admin          # if enabled
-# Tempest Only:
-# alt_demo             alt_demo     Member
-#
-# Variables set before calling this script:
-# SERVICE_TOKEN - aka admin_token in keystone.conf
-# SERVICE_ENDPOINT - local Keystone admin endpoint
-# SERVICE_TENANT_NAME - name of tenant containing service accounts
-# SERVICE_HOST - host used for endpoint creation
-# ENABLED_SERVICES - stack.sh's list of services to start
-# DEVSTACK_DIR - Top-level DevStack directory
-# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation
-
-# Defaults
-# --------
-
-ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
-SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
-export SERVICE_TOKEN=$SERVICE_TOKEN
-export SERVICE_ENDPOINT=$SERVICE_ENDPOINT
-SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
-# Roles
-# -----
-
-# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
-# The admin role in swift allows a user to act as an admin for their tenant,
-# but ResellerAdmin is needed for a user to act as any tenant. The name of this
-# role is also configurable in swift-proxy.conf
-keystone role-create --name=ResellerAdmin
-# Service role, so service users do not have to be admins
-keystone role-create --name=service
-
-
-# Services
-# --------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
-    # Nova needs ResellerAdmin role to download images when accessing
-    # swift through the s3 api.
-    keystone user-role-add \
-        --tenant $SERVICE_TENANT_NAME \
-        --user nova \
-        --role ResellerAdmin
-fi
-
-# Glance
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    keystone user-create \
-        --name=glance \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=glance@example.com
-    keystone user-role-add \
-        --tenant $SERVICE_TENANT_NAME \
-        --user glance \
-        --role service
-    # required for swift access
-    if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-        keystone user-create \
-            --name=glance-swift \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant $SERVICE_TENANT_NAME \
-            --email=glance-swift@example.com
-        keystone user-role-add \
-            --tenant $SERVICE_TENANT_NAME \
-            --user glance-swift \
-            --role ResellerAdmin
-    fi
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=glance \
-            --type=image \
-            --description="Glance Image Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service glance \
-            --publicurl "http://$SERVICE_HOST:9292" \
-            --adminurl "http://$SERVICE_HOST:9292" \
-            --internalurl "http://$SERVICE_HOST:9292"
-    fi
-fi
-
-# Ceilometer
-if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
-    # Ceilometer needs ResellerAdmin role to access swift account stats.
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user ceilometer \
-        --role ResellerAdmin
-fi
-
-# EC2
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=ec2 \
-            --type=ec2 \
-            --description="EC2 Compatibility Layer"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service ec2 \
-            --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
-            --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
-            --internalurl "http://$SERVICE_HOST:8773/services/Cloud"
-    fi
-fi
-
-# S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=s3 \
-            --type=s3 \
-            --description="S3"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service s3 \
-            --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-            --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-            --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
-    fi
-fi
-
-if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
-    # Tempest has some tests that validate various authorization checks
-    # between two regular users in separate tenants
-    keystone tenant-create \
-        --name=alt_demo
-    keystone user-create \
-        --name=alt_demo \
-        --pass="$ADMIN_PASSWORD" \
-        --email=alt_demo@example.com
-    keystone user-role-add \
-        --tenant alt_demo \
-        --user alt_demo \
-        --role Member
-fi
diff --git a/functions b/functions
index e0d2b01..17c6e77 100644
--- a/functions
+++ b/functions
@@ -273,7 +273,7 @@
     esac
 
     if is_arch "ppc64"; then
-        IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi"
+        IMG_PROPERTY="--property hw_cdrom_bus=scsi"
     fi
 
     if [ "$CONTAINER_FORMAT" = "bare" ]; then
diff --git a/functions-common b/functions-common
index c6fd5c7..e6caaa3 100644
--- a/functions-common
+++ b/functions-common
@@ -824,6 +824,10 @@
             if [[ ! $file_to_parse =~ neutron ]]; then
                 file_to_parse="${file_to_parse} neutron"
             fi
+        elif [[ $service == ir-* ]]; then
+            if [[ ! $file_to_parse =~ ironic ]]; then
+                file_to_parse="${file_to_parse} ironic"
+            fi
         fi
     done
 
diff --git a/lib/ceilometer b/lib/ceilometer
index 692ce1d..5030b3c 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -69,6 +69,11 @@
 
 # create_ceilometer_accounts() - Set up common required ceilometer accounts
 
+# Project              User         Roles
+# ------------------------------------------------------------------
+# SERVICE_TENANT_NAME  ceilometer   admin
+# SERVICE_TENANT_NAME  ceilometer   ResellerAdmin (if Swift is enabled)
+
 create_ceilometer_accounts() {
 
     SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
@@ -99,6 +104,13 @@
                 --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
                 --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
         fi
+        if is_service_enabled swift; then
+            # Ceilometer needs ResellerAdmin role to access swift account stats.
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user ceilometer \
+                ResellerAdmin
+        fi
     fi
 }
 
diff --git a/lib/databases/mysql b/lib/databases/mysql
index f5ee3c0..7a0145a 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -23,6 +23,7 @@
         stop_service $MYSQL
         apt_get purge -y mysql*
         sudo rm -rf /var/lib/mysql
+        sudo rm -rf /etc/mysql
         return
     elif is_fedora; then
         if [[ $DISTRO =~ (rhel7) ]]; then
diff --git a/lib/glance b/lib/glance
index 8a4c21b..51e4399 100644
--- a/lib/glance
+++ b/lib/glance
@@ -159,6 +159,49 @@
     cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
 }
 
+# create_glance_accounts() - Set up common required glance accounts
+
+# Project              User         Roles
+# ------------------------------------------------------------------
+# SERVICE_TENANT_NAME  glance       service
+# SERVICE_TENANT_NAME  glance-swift ResellerAdmin (if Swift is enabled)
+
+function create_glance_accounts {
+    if is_service_enabled g-api; then
+        openstack user create \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT_NAME \
+            glance
+        openstack role add \
+            --project $SERVICE_TENANT_NAME \
+            --user glance \
+            service
+        # required for swift access
+        if is_service_enabled s-proxy; then
+            openstack user create \
+                --password "$SERVICE_PASSWORD" \
+                --project $SERVICE_TENANT_NAME \
+                glance-swift
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user glance-swift \
+                ResellerAdmin
+        fi
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            openstack service create \
+                --type image \
+                --description "Glance Image Service" \
+                glance
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$GLANCE_HOSTPORT" \
+                --adminurl "http://$GLANCE_HOSTPORT" \
+                --internalurl "http://$GLANCE_HOSTPORT" \
+                glance
+        fi
+    fi
+}
+
 # create_glance_cache_dir() - Part of the init_glance() process
 function create_glance_cache_dir {
     # Create cache dir
diff --git a/lib/ironic b/lib/ironic
index 9b4f668..e5dc117 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -52,7 +52,11 @@
 IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP}
 IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
 IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
-IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256}
+# NOTE(agordeev): both ubuntu and fedora deploy images won't work with 256MB of RAM.
+#                 System halts and throws kernel panic during initramfs unpacking.
+#                 Ubuntu needs at least 384MB, but fedora requires 448.
+#                 So placing 512 here to satisfy both.
+IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-512}
 IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
 IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64}
 IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm}
diff --git a/lib/keystone b/lib/keystone
index c6856c9..b31cc57 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -266,9 +266,11 @@
 
 # Tenant               User       Roles
 # ------------------------------------------------------------------
-# service              --         --
-# --                   --         Member
 # admin                admin      admin
+# service              --         --
+# --                   --         service
+# --                   --         ResellerAdmin
+# --                   --         Member
 # demo                 admin      admin
 # demo                 demo       Member, anotherrole
 # invisible_to_admin   demo       Member
@@ -294,10 +296,17 @@
         --project $ADMIN_TENANT \
         --user $ADMIN_USER
 
-    # service
-    SERVICE_TENANT=$(openstack project create \
-        $SERVICE_TENANT_NAME \
-        | grep " id " | get_field 2)
+    # Create service project/role
+    openstack project create $SERVICE_TENANT_NAME
+
+    # Service role, so service users do not have to be admins
+    openstack role create service
+
+    # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
+    # The admin role in swift allows a user to act as an admin for their tenant,
+    # but ResellerAdmin is needed for a user to act as any tenant. The name of this
+    # role is also configurable in swift-proxy.conf
+    openstack role create ResellerAdmin
 
     # The Member role is used by Horizon and Swift so we need to keep it:
     MEMBER_ROLE=$(openstack role create \
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 4cb0da8..b1b77d7 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -38,7 +38,12 @@
 }
 
 function neutron_plugin_configure_plugin_agent {
-    :
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE
+    AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py"
+
+    _neutron_ovs_base_configure_firewall_driver
 }
 
 function neutron_plugin_configure_service {
@@ -61,7 +66,7 @@
 
 function has_neutron_plugin_security_group {
     # 1 means False here
-    return 1
+    return 0
 }
 
 function neutron_plugin_check_adv_test_requirements {
diff --git a/lib/nova b/lib/nova
index b01d107..8240813 100644
--- a/lib/nova
+++ b/lib/nova
@@ -316,9 +316,10 @@
 
 # create_nova_accounts() - Set up common required nova accounts
 
-# Tenant               User       Roles
+# Project              User         Roles
 # ------------------------------------------------------------------
-# service              nova       admin, [ResellerAdmin (swift only)]
+# SERVICE_TENANT_NAME  nova         admin
+# SERVICE_TENANT_NAME  nova         ResellerAdmin (if Swift is enabled)
 
 # Migrated from keystone_data.sh
 create_nova_accounts() {
@@ -363,6 +364,48 @@
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
         fi
     fi
+
+    if is_service_enabled n-api; then
+        # Swift
+        if is_service_enabled swift; then
+            # Nova needs ResellerAdmin role to download images when accessing
+            # swift through the s3 api.
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user nova \
+                ResellerAdmin
+        fi
+
+        # EC2
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+            openstack service create \
+                --type ec2 \
+                --description "EC2 Compatibility Layer" \
+                ec2
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
+                --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
+                --internalurl "http://$SERVICE_HOST:8773/services/Cloud" \
+                ec2
+        fi
+    fi
+
+    # S3
+    if is_service_enabled n-obj swift3; then
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            openstack service create \
+                --type s3 \
+                --description "S3" \
+                s3
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                s3
+        fi
+    fi
 }
 
 # create_nova_conf() - Create a new nova.conf file
diff --git a/lib/opendaylight b/lib/opendaylight
index ca81c20..1022e2c 100644
--- a/lib/opendaylight
+++ b/lib/opendaylight
@@ -134,7 +134,7 @@
     # The flags to ODL have the following meaning:
     #   -of13: runs ODL using OpenFlow 1.3 protocol support.
     #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
-    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
 
     # Sleep a bit to let OpenDaylight finish starting up
     sleep $ODL_BOOT_WAIT
diff --git a/lib/sahara b/lib/sahara
index 4cb04ec..7b592b0 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -46,10 +46,6 @@
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,sahara
 
-# For backward compatibility with current tests in Tempest
-TEMPEST_SERVICES+=,savanna
-
-
 # Functions
 # ---------
 
@@ -106,8 +102,7 @@
     sudo chown $STACK_USER $SAHARA_CONF_DIR
 
     # Copy over sahara configuration file and configure common parameters.
-    # TODO(slukjanov): rename when sahara internals will be updated
-    cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE
+    cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE
 
     # Create auth cache dir
     sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
diff --git a/lib/swift b/lib/swift
index b655440..26ee7d6 100644
--- a/lib/swift
+++ b/lib/swift
@@ -334,11 +334,12 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
 
-    # Configure Ceilometer
-    if is_service_enabled ceilometer; then
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
-        SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
-    fi
+    # Skipped due to bug 1294789
+    ## Configure Ceilometer
+    #if is_service_enabled ceilometer; then
+    #    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
+    #    SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
+    #fi
 
     # Restrict the length of auth tokens in the swift proxy-server logs.
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
@@ -454,6 +455,9 @@
     sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
     sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
         tee /etc/rsyslog.d/10-swift.conf
+    # restart syslog to take the changes
+    sudo killall -HUP rsyslogd
+
     if is_apache_enabled_service swift; then
         _config_swift_apache_wsgi
     fi
@@ -627,8 +631,6 @@
 
 # start_swift() - Start running processes, including screen
 function start_swift {
-    # (re)start rsyslog
-    restart_service rsyslog
     # (re)start memcached to make sure we have a clean memcache.
     restart_service memcached
 
diff --git a/lib/tempest b/lib/tempest
index c74f00d..b164455 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -293,6 +293,9 @@
     iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
+    # Compute Features
+    iniset $TEMPEST_CONFIG compute-feature-enabled resize True
+
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG "compute-admin" password "$password"
@@ -310,6 +313,9 @@
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
     iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
+    iniset $TEMPEST_CONFIG boto ari_manifest cirros-0.3.1-x86_64-initrd.manifest.xml
+    iniset $TEMPEST_CONFIG boto ami_manifest cirros-0.3.1-x86_64-blank.img.manifest.xml
+    iniset $TEMPEST_CONFIG boto aki_manifest cirros-0.3.1-x86_64-vmlinuz.manifest.xml
     iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
     iniset $TEMPEST_CONFIG boto http_socket_timeout 30
     iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
@@ -369,6 +375,30 @@
     $errexit
 }
 
+# create_tempest_accounts() - Set up common required tempest accounts
+
+# Project              User         Roles
+# ------------------------------------------------------------------
+# alt_demo             alt_demo     Member
+
+# Migrated from keystone_data.sh
+function create_tempest_accounts {
+    if is_service_enabled tempest; then
+        # Tempest has some tests that validate various authorization checks
+        # between two regular users in separate tenants
+        openstack project create \
+            alt_demo
+        openstack user create \
+            --project alt_demo \
+            --password "$ADMIN_PASSWORD" \
+            alt_demo
+        openstack role add \
+            --project alt_demo \
+            --user alt_demo \
+            Member
+    fi
+}
+
 # install_tempest() - Collect source and prepare
 function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
diff --git a/stack.sh b/stack.sh
index 7fa4d37..6bf5bd7 100755
--- a/stack.sh
+++ b/stack.sh
@@ -195,6 +195,7 @@
 # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will
 # see them by forcing PATH
 echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE
+echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE
 chmod 0440 $TEMPFILE
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
@@ -908,14 +909,13 @@
         SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
     fi
 
-    # Do the keystone-specific bits from keystone_data.sh
-    export OS_SERVICE_TOKEN=$SERVICE_TOKEN
-    export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
-    # Add temporarily to make openstackclient work
+    # Setup OpenStackclient token-flow auth
     export OS_TOKEN=$SERVICE_TOKEN
     export OS_URL=$SERVICE_ENDPOINT
+
     create_keystone_accounts
     create_nova_accounts
+    create_glance_accounts
     create_cinder_accounts
     create_neutron_accounts
 
@@ -923,7 +923,7 @@
         create_ceilometer_accounts
     fi
 
-    if is_service_enabled swift || is_service_enabled s-proxy; then
+    if is_service_enabled swift; then
         create_swift_accounts
     fi
 
@@ -931,20 +931,14 @@
         create_heat_accounts
     fi
 
-    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
-    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
-    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
-        bash -x $FILES/keystone_data.sh
-
-    # Set up auth creds now that keystone is bootstrapped
+    # Begone token-flow auth
     unset OS_TOKEN OS_URL
+
+    # Set up password-flow auth creds now that keystone is bootstrapped
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
     export OS_USERNAME=admin
     export OS_PASSWORD=$ADMIN_PASSWORD
-    unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
 fi
 
 
@@ -1135,15 +1129,9 @@
 
 # Create an access key and secret key for nova ec2 register image
 if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
-    NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
-    die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova"
-    NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
-    die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME"
-    CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID)
-    ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
-    SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
-    iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY"
-    iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY"
+    eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret)
+    iniset $NOVA_CONF DEFAULT s3_access_key "$access"
+    iniset $NOVA_CONF DEFAULT s3_secret_key "$secret"
     iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
 fi
 
diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes
index dc5a19d..adeca5c 100755
--- a/tools/ironic/scripts/cleanup-nodes
+++ b/tools/ironic/scripts/cleanup-nodes
@@ -8,10 +8,13 @@
 set -exu
 
 LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
 
 VM_COUNT=$1
 NETWORK_BRIDGE=$2
 
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
+
 for (( idx=0; idx<$VM_COUNT; idx++ )); do
     NAME="baremetal${NETWORK_BRIDGE}_${idx}"
     VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2"
diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes
index 3232b50..d81113a 100755
--- a/tools/ironic/scripts/create-nodes
+++ b/tools/ironic/scripts/create-nodes
@@ -27,6 +27,9 @@
 
 LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"}
 LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
 
 if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
     virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2
diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network
index 8c3ea90..e326bf8 100755
--- a/tools/ironic/scripts/setup-network
+++ b/tools/ironic/scripts/setup-network
@@ -7,11 +7,15 @@
 
 set -exu
 
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
 BRIDGE_SUFFIX=${1:-''}
 BRIDGE_NAME=brbm$BRIDGE_SUFFIX
 
+export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI"
+
 # Only add bridge if missing
 (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
 
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index ee3790f..8be500b 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -18,8 +18,8 @@
 
 
 def print_usage():
-    print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
-           % sys.argv[0])
+    print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
+          % sys.argv[0])
     sys.exit()