Merge "D) Reorganize top of stack.sh"
diff --git a/AUTHORS b/AUTHORS
index 8645615..820a677 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -17,6 +17,7 @@
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
+Josh Kearney <josh@jk0.org>
Justin Shepherd <galstrom21@gmail.com>
Ken Pepple <ken.pepple@rabbityard.com>
Kiall Mac Innes <kiall@managedit.ie>
diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini
deleted file mode 100644
index 5cfd22f..0000000
--- a/files/glance-api-paste.ini
+++ /dev/null
@@ -1,39 +0,0 @@
-[pipeline:glance-api]
-#pipeline = versionnegotiation context apiv1app
-# NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken context apiv1app
-
-# To enable Image Cache Management API replace pipeline with below:
-# pipeline = versionnegotiation context imagecache apiv1app
-# NOTE: use the following pipeline for keystone auth (with caching)
-# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
-
-[app:apiv1app]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.api.v1.router:API
-
-[filter:versionnegotiation]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
-
-[filter:cache]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache:CacheFilter
-
-[filter:cachemanage]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
-
-[filter:context]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USERNAME%
-admin_password = %SERVICE_PASSWORD%
diff --git a/files/glance-api.conf b/files/glance-api.conf
deleted file mode 100644
index b4ba098..0000000
--- a/files/glance-api.conf
+++ /dev/null
@@ -1,139 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = True
-
-# Which backend store should Glance use by default is not specified
-# in a request to add a new image to Glance? Default: 'file'
-# Available choices are 'file', 'swift', and 's3'
-default_store = file
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9292
-
-# Address to find the registry server
-registry_host = 0.0.0.0
-
-# Port the registry server is listening on
-registry_port = 9191
-
-# Log to this file. Make sure you do not set the same log
-# file for both the API and registry servers!
-#log_file = %DEST%/glance/api.log
-
-# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
-use_syslog = %SYSLOG%
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when images are create, updated or deleted.
-# There are three methods of sending notifications, logging (via the
-# log_file directive), rabbit (via a rabbitmq queue) or noop (no
-# notifications sent, the default)
-notifier_strategy = noop
-
-# Configuration options if sending notifications via rabbitmq (these are
-# the defaults)
-rabbit_host = localhost
-rabbit_port = 5672
-rabbit_use_ssl = false
-rabbit_userid = guest
-rabbit_password = guest
-rabbit_virtual_host = /
-rabbit_notification_topic = glance_notifications
-
-# ============ Filesystem Store Options ========================
-
-# Directory that the Filesystem backend store
-# writes image data to
-filesystem_store_datadir = %DEST%/glance/images/
-
-# ============ Swift Store Options =============================
-
-# Address where the Swift authentication service lives
-swift_store_auth_address = 127.0.0.1:8080/v1.0/
-
-# User to authenticate against the Swift authentication service
-swift_store_user = jdoe
-
-# Auth key for the user authenticating against the
-# Swift authentication service
-swift_store_key = a86850deb2742ec3cb41518e26aa2d89
-
-# Container within the account that the account should use
-# for storing images in Swift
-swift_store_container = glance
-
-# Do we create the container if it does not exist?
-swift_store_create_container_on_put = False
-
-# What size, in MB, should Glance start chunking image files
-# and do a large object manifest in Swift? By default, this is
-# the maximum object size in Swift, which is 5GB
-swift_store_large_object_size = 5120
-
-# When doing a large object manifest, what size, in MB, should
-# Glance write chunks to Swift? This amount of data is written
-# to a temporary disk buffer during the process of chunking
-# the image file, and the default is 200MB
-swift_store_large_object_chunk_size = 200
-
-# Whether to use ServiceNET to communicate with the Swift storage servers.
-# (If you aren't RACKSPACE, leave this False!)
-#
-# To use ServiceNET for authentication, prefix hostname of
-# `swift_store_auth_address` with 'snet-'.
-# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
-swift_enable_snet = False
-
-# ============ S3 Store Options =============================
-
-# Address where the S3 authentication service lives
-s3_store_host = 127.0.0.1:8080/v1.0/
-
-# User to authenticate against the S3 authentication service
-s3_store_access_key = <20-char AWS access key>
-
-# Auth key for the user authenticating against the
-# S3 authentication service
-s3_store_secret_key = <40-char AWS secret key>
-
-# Container within the account that the account should use
-# for storing images in S3. Note that S3 has a flat namespace,
-# so you need a unique bucket name for your glance images. An
-# easy way to do this is append your AWS access key to "glance".
-# S3 buckets in AWS *must* be lowercased, so remember to lowercase
-# your AWS access key if you use it in your bucket name below!
-s3_store_bucket = <lowercased 20-char aws access key>glance
-
-# Do we create the bucket if it does not exist?
-s3_store_create_bucket_on_put = False
-
-# ============ Image Cache Options ========================
-
-image_cache_enabled = False
-
-# Directory that the Image Cache writes data to
-# Make sure this is also set in glance-pruner.conf
-image_cache_datadir = /var/lib/glance/image-cache/
-
-# Number of seconds after which we should consider an incomplete image to be
-# stalled and eligible for reaping
-image_cache_stall_timeout = 86400
-
-# ============ Delayed Delete Options =============================
-
-# Turn on/off delayed delete
-delayed_delete = False
-
-# Delayed delete time in seconds
-scrub_time = 43200
-
-# Directory that the scrubber will use to remind itself of what to delete
-# Make sure this is also set in glance-scrubber.conf
-scrubber_datadir = /var/lib/glance/scrubber
diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini
deleted file mode 100644
index b792aa8..0000000
--- a/files/glance-registry-paste.ini
+++ /dev/null
@@ -1,23 +0,0 @@
-[pipeline:glance-registry]
-#pipeline = context registryapp
-# NOTE: use the following pipeline for keystone
-pipeline = authtoken context registryapp
-
-[app:registryapp]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.registry.api.v1:API
-
-[filter:context]
-context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USERNAME%
-admin_password = %SERVICE_PASSWORD%
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
deleted file mode 100644
index 2c32745..0000000
--- a/files/glance-registry.conf
+++ /dev/null
@@ -1,44 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-verbose = True
-
-# Show debugging output in logs (sets DEBUG log level output)
-debug = True
-
-# Address to bind the registry server
-bind_host = 0.0.0.0
-
-# Port the bind the registry server to
-bind_port = 9191
-
-# Log to this file. Make sure you do not set the same log
-# file for both the API and registry servers!
-#log_file = %DEST%/glance/registry.log
-
-# Where to store images
-filesystem_store_datadir = %DEST%/glance/images
-
-# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
-use_syslog = %SYSLOG%
-
-# SQLAlchemy connection string for the reference implementation
-# registry server. Any valid SQLAlchemy connection string is fine.
-# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
-sql_connection = %SQL_CONN%
-
-# Period in seconds after which SQLAlchemy should reestablish its connection
-# to the database.
-#
-# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
-# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
-# notice this, you can lower this value to ensure that SQLAlchemy reconnects
-# before MySQL can drop the connection.
-sql_idle_timeout = 3600
-
-# Limit the api to return `param_limit_max` items in a call to a container. If
-# a larger `limit` query param is provided, it will be reduced to this value.
-api_limit_max = 1000
-
-# If a `limit` query param is not provided in an api request, it will
-# default to `limit_param_default`
-limit_param_default = 25
diff --git a/files/keystone.conf b/files/keystone.conf
deleted file mode 100644
index 1a924ed..0000000
--- a/files/keystone.conf
+++ /dev/null
@@ -1,99 +0,0 @@
-[DEFAULT]
-bind_host = 0.0.0.0
-public_port = 5000
-admin_port = 35357
-admin_token = %SERVICE_TOKEN%
-compute_port = 3000
-verbose = True
-debug = True
-# commented out so devstack logs to stdout
-# log_file = %DEST%/keystone/keystone.log
-
-# ================= Syslog Options ============================
-# Send logs to syslog (/dev/log) instead of to file specified
-# by `log-file`
-use_syslog = False
-
-# Facility to use. If unset defaults to LOG_USER.
-# syslog_log_facility = LOG_LOCAL0
-
-[sql]
-connection = %SQL_CONN%
-idle_timeout = 30
-min_pool_size = 5
-max_pool_size = 10
-pool_timeout = 200
-
-[identity]
-driver = keystone.identity.backends.sql.Identity
-
-[catalog]
-driver = keystone.catalog.backends.templated.TemplatedCatalog
-template_file = %KEYSTONE_DIR%/etc/default_catalog.templates
-
-[token]
-driver = keystone.token.backends.kvs.Token
-
-[policy]
-driver = keystone.policy.backends.rules.Policy
-
-[ec2]
-driver = keystone.contrib.ec2.backends.sql.Ec2
-
-[filter:debug]
-paste.filter_factory = keystone.common.wsgi:Debug.factory
-
-[filter:token_auth]
-paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
-
-[filter:admin_token_auth]
-paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
-
-[filter:xml_body]
-paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
-
-[filter:json_body]
-paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
-
-[filter:crud_extension]
-paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
-
-[filter:ec2_extension]
-paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
-
-[filter:s3_extension]
-paste.filter_factory = keystone.contrib.s3:S3Extension.factory
-
-[app:public_service]
-paste.app_factory = keystone.service:public_app_factory
-
-[app:admin_service]
-paste.app_factory = keystone.service:admin_app_factory
-
-[pipeline:public_api]
-pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
-
-[pipeline:admin_api]
-pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service
-
-[app:public_version_service]
-paste.app_factory = keystone.service:public_version_app_factory
-
-[app:admin_version_service]
-paste.app_factory = keystone.service:admin_version_app_factory
-
-[pipeline:public_version_api]
-pipeline = xml_body public_version_service
-
-[pipeline:admin_version_api]
-pipeline = xml_body admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/v2.0 = public_api
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/v2.0 = admin_api
-/ = admin_version_api
diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf
index db0f097..763c306 100644
--- a/files/swift/account-server.conf
+++ b/files/swift/account-server.conf
@@ -4,7 +4,7 @@
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
[pipeline:main]
pipeline = account-server
diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf
index bdc3e3a..106dcab 100644
--- a/files/swift/container-server.conf
+++ b/files/swift/container-server.conf
@@ -4,7 +4,7 @@
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
[pipeline:main]
pipeline = container-server
diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf
index 2f888a2..7eea67d 100644
--- a/files/swift/object-server.conf
+++ b/files/swift/object-server.conf
@@ -4,7 +4,7 @@
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
[pipeline:main]
pipeline = object-server
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 1627af0..ce5473b 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -1,7 +1,7 @@
[DEFAULT]
bind_port = 8080
user = %USER%
-swift_dir = %SWIFT_CONFIG_LOCATION%
+swift_dir = %SWIFT_CONFIG_DIR%
workers = 1
log_name = swift
log_facility = LOG_LOCAL1
diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf
index 66215c7..4e0dcbf 100644
--- a/files/swift/rsyncd.conf
+++ b/files/swift/rsyncd.conf
@@ -6,74 +6,74 @@
[account6012]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
read only = false
lock file = /var/lock/account6012.lock
[account6022]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
read only = false
lock file = /var/lock/account6022.lock
[account6032]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
read only = false
lock file = /var/lock/account6032.lock
[account6042]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
read only = false
lock file = /var/lock/account6042.lock
[container6011]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
read only = false
lock file = /var/lock/container6011.lock
[container6021]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
read only = false
lock file = /var/lock/container6021.lock
[container6031]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
read only = false
lock file = /var/lock/container6031.lock
[container6041]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
read only = false
lock file = /var/lock/container6041.lock
[object6010]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/1/node/
+path = %SWIFT_DATA_DIR%/1/node/
read only = false
lock file = /var/lock/object6010.lock
[object6020]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/2/node/
+path = %SWIFT_DATA_DIR%/2/node/
read only = false
lock file = /var/lock/object6020.lock
[object6030]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/3/node/
+path = %SWIFT_DATA_DIR%/3/node/
read only = false
lock file = /var/lock/object6030.lock
[object6040]
max connections = 25
-path = %SWIFT_DATA_LOCATION%/4/node/
+path = %SWIFT_DATA_DIR%/4/node/
read only = false
lock file = /var/lock/object6040.lock
diff --git a/functions b/functions
index ecfda05..5114de1 100644
--- a/functions
+++ b/functions
@@ -184,7 +184,7 @@
# Comment an option in an INI file
-# optset config-file section option
+# iniset config-file section option
function inicomment() {
local file=$1
local section=$2
@@ -194,7 +194,7 @@
# Get an option from an INI file
-# optget config-file section option
+# iniget config-file section option
function iniget() {
local file=$1
local section=$2
@@ -206,16 +206,25 @@
# Set an option in an INI file
-# This is NOT a complete option setter, it assumes that the section and
-# option already exist in the INI file. If the section does not exist,
-# nothing happens.
-# optset config-file section option value
+# iniset config-file section option value
function iniset() {
local file=$1
local section=$2
local option=$3
local value=$4
- sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file
+ if ! grep -q "^\[$section\]" $file; then
+ # Add section at the end
+ echo -e "\n[$section]" >>$file
+ fi
+ if [[ -z "$(iniget $file $section $option)" ]]; then
+ # Add it
+ sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" $file
+ else
+ # Replace it
+ sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file
+ fi
}
diff --git a/stack.sh b/stack.sh
index 4fee133..8a93608 100755
--- a/stack.sh
+++ b/stack.sh
@@ -391,13 +391,13 @@
# TODO: add logging to different location.
# By default the location of swift drives and objects is located inside
-# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine
+# the swift source directory. SWIFT_DATA_DIR variable allow you to redefine
# this.
-SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data}
+SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift}
# We are going to have the configuration files inside the source
-# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that.
-SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config}
+# directory, change SWIFT_CONFIG_DIR if you want to adjust that.
+SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
# devstack will create a loop-back disk formatted as XFS to store the
# swift data. By default the disk size is 1 gigabyte. The variable
@@ -763,6 +763,10 @@
restart_service mysql
fi
+if [ -z "$SCREEN_HARDSTATUS" ]; then
+ SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+fi
+
# Our screenrc file builder
function screen_rc {
SCREENRC=$TOP_DIR/stack-screenrc
@@ -770,7 +774,7 @@
# Name the screen session
echo "sessionname stack" > $SCREENRC
# Set a reasonable statusbar
- echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC
+ echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
echo "screen -t stack bash" >> $SCREENRC
fi
# If this service doesn't already exist in the screenrc file
@@ -807,7 +811,7 @@
screen -d -m -S stack -t stack -s /bin/bash
sleep 1
# set a reasonable statusbar
-screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
+screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
# Horizon
# -------
@@ -866,45 +870,42 @@
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;'
- function glance_config {
- sudo sed -e "
- s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g;
- s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
- s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
- s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
- s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g;
- s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
- s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
- s,%SQL_CONN%,$BASE_SQL_CONN/glance?charset=utf8,g;
- s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
- s,%SERVICE_USERNAME%,glance,g;
- s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
- s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
- s,%DEST%,$DEST,g;
- s,%SYSLOG%,$SYSLOG,g;
- " -i $1
- }
-
# Copy over our glance configurations and update them
GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
- cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF
- glance_config $GLANCE_REGISTRY_CONF
+ cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
+ iniset $GLANCE_REGISTRY_CONF DEFAULT debug True
+ inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
+ iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
+ iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
+ iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
- if [[ -e $FILES/glance-registry-paste.ini ]]; then
- GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
- cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
- glance_config $GLANCE_REGISTRY_PASTE_INI
- fi
+ GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
+ cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_user glance
+ iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
- cp $FILES/glance-api.conf $GLANCE_API_CONF
- glance_config $GLANCE_API_CONF
+ cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
+ iniset $GLANCE_API_CONF DEFAULT debug True
+ inicomment $GLANCE_API_CONF DEFAULT log_file
+ iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
+ iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ iniset $GLANCE_API_CONF paste_deploy flavor keystone
- if [[ -e $FILES/glance-api-paste.ini ]]; then
- GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
- cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI
- glance_config $GLANCE_API_PASTE_INI
- fi
+ GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
+ cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
+ iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $GLANCE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+ iniset $GLANCE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance
+ iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
fi
# Quantum
@@ -1143,39 +1144,39 @@
# changing the permissions so we can run it as our user.
USER_GROUP=$(id -g)
- sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
- sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}
+ sudo mkdir -p ${SWIFT_DATA_DIR}/drives
+ sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
# We then create a loopback disk and format it to XFS.
# TODO: Reset disks on new pass.
- if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then
- mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
- sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
- sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
+ if [[ ! -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+ mkdir -p ${SWIFT_DATA_DIR}/drives/images
+ sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img
+ sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
- dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
+ dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
- mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
+ mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img
fi
# After the drive being created we mount the disk with a few mount
# options to make it most efficient as possible for swift.
- mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1
- if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts; then
+ mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
+ if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
- ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1
+ ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
fi
# We then create link to that mounted location so swift would know
# where to go.
for x in $(seq ${SWIFT_REPLICAS}); do
- sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
+ sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done
# We now have to emulate a few different servers into one we
# create all the directories needed for swift
for x in $(seq ${SWIFT_REPLICAS}); do
- drive=${SWIFT_DATA_LOCATION}/drives/sdb1/${x}
- node=${SWIFT_DATA_LOCATION}/${x}/node
+ drive=${SWIFT_DATA_DIR}/drives/sdb1/${x}
+ node=${SWIFT_DATA_DIR}/${x}/node
node_device=${node}/sdb1
[[ -d $node ]] && continue
[[ -d $drive ]] && continue
@@ -1184,17 +1185,23 @@
sudo chown -R $USER: ${node}
done
- sudo mkdir -p ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server /var/run/swift
- sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} /var/run/swift
+ sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift
+ sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift
- # swift-init has a bug using /etc/swift until bug #885595 is fixed
- # we have to create a link
- sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift
+ if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
+ # Some swift tools are hard-coded to use /etc/swift and are apparenty not going to be fixed.
+ # Create a symlink if the config dir is moved
+ sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
+ fi
- # Swift use rsync to syncronize between all the different
- # partitions (which make more sense when you have a multi-node
- # setup) we configure it with our version of rsync.
- sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
+ # Swift use rsync to syncronize between all the different
+ # partitions (which make more sense when you have a multi-node
+ # setup) we configure it with our version of rsync.
+ sed -e "
+ s/%GROUP%/${USER_GROUP}/;
+ s/%USER%/$USER/;
+ s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
+ " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
# By default Swift will be installed with the tempauth middleware
@@ -1209,7 +1216,7 @@
# We do the install of the proxy-server and swift configuration
# replacing a few directives to match our configuration.
sed -e "
- s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g;
+ s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},g;
s,%USER%,$USER,g;
s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
s,%SERVICE_USERNAME%,swift,g;
@@ -1224,35 +1231,40 @@
s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
s/%AUTH_SERVER%/${swift_auth_server}/g;
" $FILES/swift/proxy-server.conf | \
- sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
+ sudo tee ${SWIFT_CONFIG_DIR}/proxy-server.conf
- sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
+ sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_DIR}/swift.conf
- # We need to generate a object/account/proxy configuration
- # emulating 4 nodes on different ports we have a little function
- # that help us doing that.
- function generate_swift_configuration() {
- local server_type=$1
- local bind_port=$2
- local log_facility=$3
- local node_number
+ # We need to generate a object/account/proxy configuration
+ # emulating 4 nodes on different ports we have a little function
+ # that help us doing that.
+ function generate_swift_configuration() {
+ local server_type=$1
+ local bind_port=$2
+ local log_facility=$3
+ local node_number
- for node_number in $(seq ${SWIFT_REPLICAS}); do
- node_path=${SWIFT_DATA_LOCATION}/${node_number}
- sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
- $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf
- bind_port=$(( ${bind_port} + 10 ))
- log_facility=$(( ${log_facility} + 1 ))
- done
- }
- generate_swift_configuration object 6010 2
- generate_swift_configuration container 6011 2
- generate_swift_configuration account 6012 2
+ for node_number in $(seq ${SWIFT_REPLICAS}); do
+ node_path=${SWIFT_DATA_DIR}/${node_number}
+ sed -e "
+ s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},;
+ s,%USER%,$USER,;
+ s,%NODE_PATH%,${node_path},;
+ s,%BIND_PORT%,${bind_port},;
+ s,%LOG_FACILITY%,${log_facility},
+ " $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf
+ bind_port=$(( ${bind_port} + 10 ))
+ log_facility=$(( ${log_facility} + 1 ))
+ done
+ }
+ generate_swift_configuration object 6010 2
+ generate_swift_configuration container 6011 2
+ generate_swift_configuration account 6012 2
# We have some specific configuration for swift for rsyslog. See
# the file /etc/rsyslog.d/10-swift.conf for more info.
- swift_log_dir=${SWIFT_DATA_LOCATION}/logs
+ swift_log_dir=${SWIFT_DATA_DIR}/logs
rm -rf ${swift_log_dir}
mkdir -p ${swift_log_dir}/hourly
sudo chown -R syslog:adm ${swift_log_dir}
@@ -1262,7 +1274,7 @@
# This is where we create three different rings for swift with
# different object servers binding on different ports.
- pushd ${SWIFT_CONFIG_LOCATION} >/dev/null && {
+ pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
@@ -1525,16 +1537,42 @@
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
- # Configure keystone.conf
- KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf
- cp $FILES/keystone.conf $KEYSTONE_CONF
- sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone?charset=utf8,g" -i $KEYSTONE_CONF
- sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF
- sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF
- sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF
+ KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
+ KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
+ KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
- KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates
- cp $FILES/default_catalog.templates $KEYSTONE_CATALOG
+ if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
+ sudo mkdir -p $KEYSTONE_CONF_DIR
+ sudo chown `whoami` $KEYSTONE_CONF_DIR
+ fi
+
+ if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
+ # FIXME(dtroyer): etc/keystone.conf causes trouble if the config files
+ # are located anywhere else (say, /etc/keystone).
+ # LP 966670 fixes this in keystone, we fix it
+ # here until the bug fix is committed.
+ if [[ -r $KEYSTONE_DIR/etc/keystone.conf ]]; then
+ # Get the sample config file out of the way
+ mv $KEYSTONE_DIR/etc/keystone.conf $KEYSTONE_DIR/etc/keystone.conf.sample
+ fi
+ cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
+ cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
+ fi
+ cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+
+ # Rewrite stock keystone.conf:
+ iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
+ iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
+ iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+ iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
+ # Configure keystone.conf to use templates
+ iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+ iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+ sed -e "
+ /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
+ " -i $KEYSTONE_CONF
+ # Append the S3 bits
+ iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
# Add swift endpoints to service catalog if swift is enabled
if is_service_enabled swift; then
@@ -1552,34 +1590,32 @@
echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
fi
- sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG
+ sudo sed -e "
+ s,%SERVICE_HOST%,$SERVICE_HOST,g;
+ s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+ " -i $KEYSTONE_CATALOG
- sudo sed -e "s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g" -i $KEYSTONE_CATALOG
-
+ # Set up logging
+ LOGGING_ROOT="devel"
if [ "$SYSLOG" != "False" ]; then
- cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf
- sed -i -e '/^handlers=devel$/s/=devel/=production/' \
- $KEYSTONE_DIR/etc/logging.conf
- sed -i -e "/^log_file/s/log_file/\#log_file/" \
- $KEYSTONE_DIR/etc/keystone.conf
- KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.conf"
+ LOGGING_ROOT="$LOGGING_ROOT,production"
fi
-fi
+ KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
+ cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
+ iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
+ iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
-# launch the keystone and wait for it to answer before continuing
-if is_service_enabled key; then
+ # initialize keystone database
+ $KEYSTONE_DIR/bin/keystone-manage db_sync
+
+ # launch keystone and wait for it to answer before continuing
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then
echo "keystone did not start"
exit 1
fi
- # initialize keystone with default users/endpoints
- pushd $KEYSTONE_DIR
- $KEYSTONE_DIR/bin/keystone-manage db_sync
- popd
-
# keystone_data.sh creates services, admin and demo users, and roles.
SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
@@ -1634,7 +1670,7 @@
screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
-screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v"
+screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
# Starting the nova-objectstore only if swift service is not enabled.
# Swift will act as s3 objectstore.
diff --git a/tests/functions.sh b/tests/functions.sh
index 931cde8..e7fbe0c 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -98,7 +98,7 @@
VAL=$(iniget test.ini zzz handlers)
if [[ -z "$VAL" ]]; then
- echo "OK"
+ echo "OK: zzz not present"
else
echo "iniget failed: $VAL"
fi
@@ -106,13 +106,31 @@
iniset test.ini zzz handlers "999"
VAL=$(iniget test.ini zzz handlers)
-if [[ -z "$VAL" ]]; then
- echo "OK"
+if [[ -n "$VAL" ]]; then
+ echo "OK: zzz not present"
else
echo "iniget failed: $VAL"
fi
+# Test option not exist
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -z "$VAL" ]]; then
+ echo "OK aaa.debug not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini aaa debug "999"
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -n "$VAL" ]]; then
+ echo "OK aaa.debug present"
+else
+ echo "iniget failed: $VAL"
+fi
+
# Test comments
inicomment test.ini aaa handlers
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 9b25b7e..01849ad 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -125,22 +125,75 @@
IMAGE_UUID=$(echo $IMAGE_UUID)
fi
-# Create tempest.conf from tempest.conf.sample
+# Create tempest.conf from tempest.conf.tpl
if [[ ! -r $TEMPEST_CONF ]]; then
- cp $TEMPEST_CONF.sample $TEMPEST_CONF
+ cp $TEMPEST_CONF.tpl $TEMPEST_CONF
fi
+IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False}
+IDENTITY_PORT=${IDENTITY_PORT:-5000}
+IDENTITY_API_VERSION={$IDENTITY_API_VERSION:-v2.0} # Note: need v for now...
+# TODO(jaypipes): This is dumb and needs to be removed
+# from the Tempest configuration file entirely...
+IDENTITY_PATH=${IDENTITY_PATH:-tokens}
+IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone}
+
+# We use regular, non-admin users in Tempest for the USERNAME
+# substitutions and use ADMIN_USERNAME et al for the admin stuff.
+# OS_USERNAME et all should be defined in openrc.
+OS_USERNAME=${OS_USERNAME:-demo}
+OS_TENANT_NAME=${OS_TENANT_NAME:-demo}
+OS_PASSWORD=${OS_PASSWORD:-secrete}
+
+# TODO(jaypipes): Support multiple regular user accounts instead
+# of using the same regular user account for the alternate user...
+ALT_USERNAME=$OS_USERNAME
+ALT_PASSWORD=$OS_PASSWORD
+ALT_TENANT_NAME=$OS_TENANT_NAME
+
+# TODO(jaypipes): Support multiple images instead of plopping
+# the IMAGE_UUID into both the image_ref and image_ref_alt slots
+IMAGE_UUID_ALT=$IMAGE_UUID
+
+# TODO(jaypipes): Support configurable flavor refs here...
+FLAVOR_REF=1
+FLAVOR_REF_ALT=2
+
+ADMIN_USERNAME={$ADMIN_USERNAME:-admin}
+ADMIN_PASSWORD={$ADMIN_PASSWORD:-secrete}
+ADMIN_TENANT_NAME={$ADMIN_TENANT:-admin}
+
+# Do any of the following need to be configurable?
+COMPUTE_CATALOG_TYPE=compute
+COMPUTE_CREATE_IMAGE_ENABLED=True
+COMPUTE_RESIZE_AVAILABLE=True
+COMPUTE_LOG_LEVEL=ERROR
+
sed -e "
- /^api_key=/s|=.*\$|=$ADMIN_PASSWORD|;
- /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/|;
- /^host=/s|=.*\$|=$HOST_IP|;
- /^image_ref=/s|=.*\$|=$IMAGE_UUID|;
- /^password=/s|=.*\$|=$ADMIN_PASSWORD|;
- /^tenant=/s|=.*\$|=$TENANT|;
- /^tenant_name=/s|=.*\$|=$TENANT|;
- /^user=/s|=.*\$|=$USERNAME|;
- /^username=/s|=.*\$|=$USERNAME|;
+ s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g;
+ s,%IDENTITY_HOST%,$HOST_IP,g;
+ s,%IDENTITY_PORT%,$IDENTITY_PORT,g;
+ s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g;
+ s,%IDENTITY_PATH%,$IDENTITY_PATH,g;
+ s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g;
+ s,%USERNAME%,$OS_USERNAME,g;
+ s,%PASSWORD%,$OS_PASSWORD,g;
+ s,%TENANT_NAME%,$OS_TENANT_NAME,g;
+ s,%ALT_USERNAME%,$ALT_USERNAME,g;
+ s,%ALT_PASSWORD%,$ALT_PASSWORD,g;
+ s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g;
+ s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g;
+ s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g;
+ s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g;
+ s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g;
+ s,%IMAGE_ID%,$IMAGE_UUID,g;
+ s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g;
+ s,%FLAVOR_REF%,$FLAVOR_REF,g;
+ s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g;
+ s,%ADMIN_USERNAME%,$ADMIN_USERNAME,g;
+ s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g;
+ s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g;
" -i $TEMPEST_CONF
# Create config.ini