Merge "Fixes for heat keystone registration."
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 8a4f9c1..adc3393 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -125,16 +125,16 @@
if [ "$VIRT_DRIVER" == "xenserver" ]; then
echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
fi
-HOST=`nova host-list | grep compute | get_field 1`
+FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1`
# Make sure can add two aggregates to same host
-nova aggregate-add-host $AGGREGATE_ID $HOST
-nova aggregate-add-host $AGGREGATE2_ID $HOST
-if nova aggregate-add-host $AGGREGATE2_ID $HOST; then
+nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
+nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
+if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
echo "ERROR could add duplicate host to single aggregate"
exit -1
fi
-nova aggregate-remove-host $AGGREGATE2_ID $HOST
-nova aggregate-remove-host $AGGREGATE_ID $HOST
+nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
+nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
# Test aggregate-delete
# =====================
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 7fe81ba..c967e39 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -49,6 +49,10 @@
# Default user
DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros}
+# Security group name
+SECGROUP=${SECGROUP:-boot_secgroup}
+
+
# Launching servers
# =================
@@ -72,7 +76,6 @@
fi
# Configure Security Groups
-SECGROUP=${SECGROUP:-test_secgroup}
nova secgroup-delete $SECGROUP || true
nova secgroup-create $SECGROUP "$SECGROUP description"
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
@@ -246,8 +249,8 @@
die "Failure deleting instance $INSTANCE_NAME"
# Wait for termination
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then
- echo "server didn't terminate!"
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+ echo "Server $NAME not deleted"
exit 1
fi
@@ -256,8 +259,7 @@
die "Failure deleting floating IP $FLOATING_IP"
# Delete a secgroup
-nova secgroup-delete $SECGROUP || \
- die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 9f7aed1..fb052dd 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -43,6 +43,9 @@
# Boot this image, use first AMI-format image if unset
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+# Security group name
+SECGROUP=${SECGROUP:-euca_secgroup}
+
# Launching a server
# ==================
@@ -50,9 +53,6 @@
# Find a machine image to boot
IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
-# Define secgroup
-SECGROUP=euca_secgroup
-
# Add a secgroup
if ! euca-describe-groups | grep -q $SECGROUP; then
euca-add-group -d "$SECGROUP description" $SECGROUP
@@ -119,14 +119,13 @@
die "Failure terminating instance $INSTANCE"
# Assure it has terminated within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
+if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then
echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
exit 1
fi
# Delete group
-euca-delete-group $SECGROUP || \
- die "Failure deleting security group $SECGROUP"
+euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 02259c0..77f020e 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -200,12 +200,12 @@
# Delete second floating IP
nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP"
-# shutdown the server
+# Shutdown the server
nova delete $VM_UUID || die "Failure deleting instance $NAME"
-# make sure the VM shuts down within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server didn't shut down!"
+# Wait for termination
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+ echo "Server $NAME not deleted"
exit 1
fi
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0f25355..5db10d3 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -43,6 +43,9 @@
# Boot this image, use first AMi image if unset
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+# Security group name
+SECGROUP=${SECGROUP:-vol_secgroup}
+
# Launching a server
# ==================
@@ -62,6 +65,25 @@
# Grab the id of the image to launch
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+# Security Groups
+# ---------------
+
+# List of secgroups:
+nova secgroup-list
+
+# Create a secgroup
+if ! nova secgroup-list | grep -q $SECGROUP; then
+ nova secgroup-create $SECGROUP "$SECGROUP description"
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+ echo "Security group not created"
+ exit 1
+ fi
+fi
+
+# Configure Security Group Rules
+nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
+
# determinine instance type
# -------------------------
@@ -171,8 +193,17 @@
exit 1
fi
-# shutdown the server
-nova delete $NAME || die "Failure deleting instance $NAME"
+# Shutdown the server
+nova delete $VM_UUID || die "Failure deleting instance $NAME"
+
+# Wait for termination
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+ echo "Server $NAME not deleted"
+ exit 1
+fi
+
+# Delete a secgroup
+nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/functions b/functions
index af154b0..fa7c805 100644
--- a/functions
+++ b/functions
@@ -585,6 +585,52 @@
}
+# Helper to launch a service in a named screen
+# screen_it service "command-line"
+function screen_it {
+ NL=`echo -ne '\015'`
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ if is_service_enabled $1; then
+ # Append the service to the screen rc file
+ screen_rc "$1" "$2"
+
+ screen -S $SCREEN_NAME -X screen -t $1
+ # sleep to allow bash to be ready to be send the command - we are
+ # creating a new window in screen and then sends characters, so if
+ # bash isn't running by the time we send the command, nothing happens
+ sleep 1.5
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+ screen -S $SCREEN_NAME -p $1 -X log on
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+ fi
+ screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL"
+ fi
+}
+
+
+# Screen rc file builder
+# screen_rc service "command-line"
+function screen_rc {
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
+ if [[ ! -e $SCREENRC ]]; then
+ # Name the screen session
+ echo "sessionname $SCREEN_NAME" > $SCREENRC
+ # Set a reasonable statusbar
+ echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+ echo "screen -t shell bash" >> $SCREENRC
+ fi
+ # If this service doesn't already exist in the screenrc file
+ if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+ NL=`echo -ne '\015'`
+ echo "screen -t $1 bash" >> $SCREENRC
+ echo "stuff \"$2$NL\"" >> $SCREENRC
+ fi
+}
+
+
# ``pip install`` the dependencies of the package before ``setup.py develop``
# so pip and not distutils processes the dependency chain
# Uses globals ``TRACK_DEPENDES``, ``*_proxy`
diff --git a/lib/n-vol b/lib/n-vol
new file mode 100644
index 0000000..30be0cd
--- /dev/null
+++ b/lib/n-vol
@@ -0,0 +1,118 @@
+# lib/n-vol
+# Install and start Nova volume service
+
+# Dependencies:
+# - functions
+# - KEYSTONE_AUTH_* must be defined
+# SERVICE_{TENANT_NAME|PASSWORD} must be defined
+
+# stack.sh
+# ---------
+# install_nvol
+# configure_nvol
+# init_nvol
+# start_nvol
+# stop_nvol
+# cleanup_nvol
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+
+
+# cleanup_nvol() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_nvol() {
+ # kill instances (nova)
+ # delete image files (glance)
+ # This function intentionally left blank
+ :
+}
+
+# configure_nvol() - Set config files, create data dirs, etc
+function configure_nvol() {
+ # sudo python setup.py deploy
+ # iniset $XXX_CONF ...
+ # This function intentionally left blank
+ :
+}
+
+# init_nvol() - Initialize databases, etc.
+function init_nvol() {
+ # Configure a default volume group called '`stack-volumes`' for the volume
+ # service if it does not yet exist. If you don't wish to use a file backed
+ # volume group, create your own volume group called ``stack-volumes`` before
+ # invoking ``stack.sh``.
+ #
+ # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
+
+ if ! sudo vgs $VOLUME_GROUP; then
+ VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+ # Only create if the file doesn't already exists
+ [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+ DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
+ # Only create if the loopback device doesn't contain $VOLUME_GROUP
+ if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+ fi
+
+ mkdir -p $NOVA_DIR/volumes
+
+ if sudo vgs $VOLUME_GROUP; then
+ if [[ "$os_PACKAGE" = "rpm" ]]; then
+ # RPM doesn't start the service
+ start_service tgtd
+ fi
+
+ # Remove nova iscsi targets
+ sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
+ # Clean out existing volumes
+ for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+ # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want
+ if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+ sudo lvremove -f $VOLUME_GROUP/$lv
+ fi
+ done
+ fi
+}
+
+# install_nvol() - Collect source and prepare
+function install_nvol() {
+ # git clone xxx
+ # Install is handled when installing Nova
+ :
+}
+
+# start_nvol() - Start running processes, including screen
+function start_nvol() {
+ # Setup the tgt configuration file
+ if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
+ sudo mkdir -p /etc/tgt/conf.d
+ echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
+ fi
+
+ if [[ "$os_PACKAGE" = "deb" ]]; then
+ # tgt in oneiric doesn't restart properly if tgtd isn't running
+ # do it in two steps
+ sudo stop tgt || true
+ sudo start tgt
+ else
+ restart_service tgtd
+ fi
+
+ screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
+}
+
+# stop_nvol() - Stop running processes (non-screen)
+function stop_nvol() {
+ # FIXME(dtroyer): stop only the n-vol screen window?
+
+ stop_service tgt
+}
diff --git a/stack.sh b/stack.sh
index fd6e9ee..d51a135 100755
--- a/stack.sh
+++ b/stack.sh
@@ -134,8 +134,9 @@
exit 1
fi
+SCREEN_NAME=${SCREEN_NAME:-stack}
# Check to see if we are already running DevStack
-if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then
+if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
echo "You are already running a stack.sh session."
echo "To rejoin this session type 'screen -x stack'."
echo "To destroy this session, type './unstack.sh'."
@@ -266,6 +267,7 @@
# Get project function libraries
source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/n-vol
source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
@@ -976,51 +978,11 @@
SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
fi
-# Our screenrc file builder
-function screen_rc {
- SCREENRC=$TOP_DIR/stack-screenrc
- if [[ ! -e $SCREENRC ]]; then
- # Name the screen session
- echo "sessionname stack" > $SCREENRC
- # Set a reasonable statusbar
- echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
- echo "screen -t stack bash" >> $SCREENRC
- fi
- # If this service doesn't already exist in the screenrc file
- if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
- NL=`echo -ne '\015'`
- echo "screen -t $1 bash" >> $SCREENRC
- echo "stuff \"$2$NL\"" >> $SCREENRC
- fi
-}
-
-# Our screen helper to launch a service in a hidden named screen
-function screen_it {
- NL=`echo -ne '\015'`
- if is_service_enabled $1; then
- # Append the service to the screen rc file
- screen_rc "$1" "$2"
-
- screen -S stack -X screen -t $1
- # sleep to allow bash to be ready to be send the command - we are
- # creating a new window in screen and then sends characters, so if
- # bash isn't running by the time we send the command, nothing happens
- sleep 1.5
-
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
- screen -S stack -p $1 -X log on
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
- fi
- screen -S stack -p $1 -X stuff "$2$NL"
- fi
-}
-
# Create a new named screen to run processes in
-screen -d -m -S stack -t stack -s /bin/bash
+screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
sleep 1
# Set a reasonable statusbar
-screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
# Horizon
@@ -1083,13 +1045,17 @@
sudo mkdir -p $GLANCE_CONF_DIR
fi
sudo chown `whoami` $GLANCE_CONF_DIR
+
GLANCE_IMAGE_DIR=$DEST/glance/images
# Delete existing images
rm -rf $GLANCE_IMAGE_DIR
-
- # Use local glance directories
mkdir -p $GLANCE_IMAGE_DIR
+ GLANCE_CACHE_DIR=$DEST/glance/cache
+ # Delete existing images
+ rm -rf $GLANCE_CACHE_DIR
+ mkdir -p $GLANCE_CACHE_DIR
+
# (re)create glance database
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;'
@@ -1117,7 +1083,8 @@
iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
- iniset $GLANCE_API_CONF paste_deploy flavor keystone
+ iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+ iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
@@ -1141,6 +1108,23 @@
GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
+ GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
+ cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF
+ iniset $GLANCE_CACHE_CONF DEFAULT debug True
+ inicomment $GLANCE_CACHE_CONF DEFAULT log_file
+ iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
+ iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
+ iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name
+ iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME
+ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user
+ iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
+ iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
+ iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
+
+
GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
@@ -1782,57 +1766,7 @@
if is_service_enabled cinder; then
init_cinder
elif is_service_enabled n-vol; then
- # Configure a default volume group called '`stack-volumes`' for the volume
- # service if it does not yet exist. If you don't wish to use a file backed
- # volume group, create your own volume group called ``stack-volumes`` before
- # invoking ``stack.sh``.
- #
- # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
-
- if ! sudo vgs $VOLUME_GROUP; then
- VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
- # Only create if the file doesn't already exists
- [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
- DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
- # Only create if the loopback device doesn't contain $VOLUME_GROUP
- if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
- fi
-
- if sudo vgs $VOLUME_GROUP; then
- if [[ "$os_PACKAGE" = "rpm" ]]; then
- # RPM doesn't start the service
- start_service tgtd
- fi
-
- # Setup tgtd configuration files
- mkdir -p $NOVA_DIR/volumes
-
- # Remove nova iscsi targets
- sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
- # Clean out existing volumes
- for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
- # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want
- if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
- sudo lvremove -f $VOLUME_GROUP/$lv
- fi
- done
- fi
-
- if [[ "$os_PACKAGE" = "deb" ]]; then
-
- # Setup the tgt configuration file
- if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
- sudo mkdir -p /etc/tgt/conf.d
- echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
- fi
-
- # tgt in oneiric doesn't restart properly if tgtd isn't running
- # do it in two steps
- sudo stop tgt || true
- sudo start tgt
- else
- restart_service tgtd
- fi
+ init_nvol
fi
# Support entry points installation of console scripts
@@ -2208,12 +2142,14 @@
# ``screen_it`` checks ``is_service_enabled``, it is not needed here
screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
-screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume"
screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler"
screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
+if is_service_enabled n-vol; then
+ start_nvol
+fi
if is_service_enabled cinder; then
start_cinder
fi
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 0da5597..2df0315 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -162,6 +162,7 @@
COMPUTE_CATALOG_TYPE=compute
COMPUTE_CREATE_IMAGE_ENABLED=True
COMPUTE_ALLOW_TENANT_ISOLATION=True
+COMPUTE_ALLOW_TENANT_REUSE=True
COMPUTE_RESIZE_AVAILABLE=False
COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU...
COMPUTE_LOG_LEVEL=ERROR
@@ -216,6 +217,7 @@
s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g;
s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g;
s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g;
+ s,%COMPUTE_ALLOW_TENANT_REUSE%,$COMPUTE_ALLOW_TENANT_REUSE,g;
s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g;
s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g;
s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g;
diff --git a/unstack.sh b/unstack.sh
index e73cc2d..30ee512 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -67,6 +67,11 @@
if is_service_enabled mysql; then
stop_service mysql
fi
+
+ # Stop rabbitmq-server
+ if is_service_enabled rabbit; then
+ stop_service rabbitmq-server
+ fi
fi
# Quantum dhcp agent runs dnsmasq