Merge "Set flavor id for Ironic's baremetal flavor"
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index bdd9e78..1e915c7 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -55,6 +55,11 @@
* Sergey Lukjanov <slukjanov@mirantis.com>
+Swift
+~~~~~
+
+* Chmouel Boudjnah <chmouel@enovance.com>
+
SUSE
~~~~
diff --git a/docs/source/faq.html b/docs/source/faq.html
index bfac1dc..2c74a66 100644
--- a/docs/source/faq.html
+++ b/docs/source/faq.html
@@ -73,7 +73,7 @@
<dd>A: DevStack is optimized for documentation & developers. As some of us use <a href="https://github.com/dellcloudedge/crowbar">Crowbar</a> for production deployments, we hope developers documenting how they setup systems for new features supports projects like Crowbar.</dd>
<dt>Q: I'd like to help!</dt>
- <dd>A: That isn't a question, but please do! The source for DevStack is <a href="http://github.com/openstack-dev/devstack">github</a> and bug reports go to <a href="http://bugs.launchpad.net/devstack/">LaunchPad</a>. Contributions follow the usual process as described in the <a href="http://wiki.openstack.org/HowToContribute">OpenStack wiki</a>. DevStack is not a core project but a gating project and therefore an official OpenStack project. This site is housed in the CloudBuilder's <a href="http://github.com/cloudbuilders/devstack">github</a> in the gh-pages branch.</dd>
+ <dd>A: That isn't a question, but please do! The source for DevStack is <a href="http://github.com/openstack-dev/devstack">github</a> and bug reports go to <a href="http://bugs.launchpad.net/devstack/">LaunchPad</a>. Contributions follow the usual process as described in the <a href="http://wiki.openstack.org/HowToContribute">OpenStack wiki</a> even though DevStack is not an official OpenStack project. This site is housed in the CloudBuilder's <a href="http://github.com/cloudbuilders/devstack">github</a> in the gh-pages branch.</dd>
<dt>Q: Why not use packages?</dt>
<dd>A: Unlike packages, DevStack leaves your cloud ready to develop - checkouts of the code and services running in screen. However, many people are doing the hard work of packaging and recipes for production deployments. We hope this script serves as a way to communicate configuration changes between developers and packagers.</dd>
@@ -85,7 +85,7 @@
<dd>A: Fedora and CentOS/RHEL are supported via rpm dependency files and specific checks in <code>stack.sh</code>. Support will follow the pattern set with the Ubuntu testing, i.e. only a single release of the distro will receive regular testing, others will be handled on a best-effort basis.</dd>
<dt>Q: Are there any differences between Ubuntu and Fedora support?</dt>
- <dd>A: LXC support is not complete on Fedora; Neutron is not fully supported prior to Fedora 18 due lack of OpenVSwitch packages.</dd>
+ <dd>A: Neutron is not fully supported prior to Fedora 18 due lack of OpenVSwitch packages.</dd>
<dt>Q: How about RHEL 6?</dt>
<dd>A: RHEL 6 has Python 2.6 and many old modules packaged and is a challenge to support. There are a number of specific RHEL6 work-arounds in <code>stack.sh</code> to handle this. But the testing on py26 is valuable so we do it...</dd>
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
index 1a67c29..2a34999 100644
--- a/extras.d/70-sahara.sh
+++ b/extras.d/70-sahara.sh
@@ -4,7 +4,6 @@
if [[ "$1" == "source" ]]; then
# Initial source
source $TOP_DIR/lib/sahara
- source $TOP_DIR/lib/sahara-dashboard
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing sahara"
install_sahara
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index e7b2157..1bdb84c 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -8,7 +8,7 @@
WSGIApplicationGroup %{GLOBAL}
%ERRORLOGFORMAT%
ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/access.log combined
+ CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
</VirtualHost>
<VirtualHost *:%ADMINPORT%>
@@ -18,7 +18,7 @@
WSGIApplicationGroup %{GLOBAL}
%ERRORLOGFORMAT%
ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/access.log combined
+ CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
</VirtualHost>
# Workaround for missing path on RHEL6, see
diff --git a/files/apts/ironic b/files/apts/ironic
index 8674d9f..283d1b2 100644
--- a/files/apts/ironic
+++ b/files/apts/ironic
@@ -1,3 +1,4 @@
+docker.io
ipmitool
iptables
ipxe
diff --git a/files/rpms/ironic b/files/rpms/ironic
index 959ac3c..e646f3a 100644
--- a/files/rpms/ironic
+++ b/files/rpms/ironic
@@ -1,3 +1,4 @@
+docker-io
ipmitool
iptables
ipxe-bootimgs
diff --git a/functions-common b/functions-common
index c096664..bf9447c 100644
--- a/functions-common
+++ b/functions-common
@@ -695,6 +695,13 @@
echo $host_ip
}
+# Generates hex string from ``size`` byte of pseudo random data
+# generate_hex_string size
+function generate_hex_string {
+ local size=$1
+ hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom
+}
+
# Grab a numbered field from python prettytable output
# Fields are numbered starting with 1
# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
@@ -1128,8 +1135,8 @@
# fork. It includes the dirty work of closing extra filehandles and preparing log
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
-# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``
-# _run_process service "command-line"
+# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
+# _old_run_process service "command-line"
function _run_process {
local service=$1
local command="$2"
@@ -1148,8 +1155,12 @@
export PYTHONUNBUFFERED=1
fi
- exec /bin/bash -c "$command"
- die "$service exec failure: $command"
+ # Run under ``setsid`` to force the process to become a session and group leader.
+ # The pid saved can be used with pkill -g to get the entire process group.
+ setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+
+ # Just silently exit this process
+ exit 0
}
# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
@@ -1177,61 +1188,63 @@
return $exitcode
}
-# run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command. This is meant to duplicate the semantics
-# of screen_it() without screen. PIDs are written to
-# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid``
+# Run a single service under screen or directly
+# If the command includes shell metachatacters (;<>*) it must be run using a shell
# run_process service "command-line"
function run_process {
local service=$1
local command="$2"
- # Spawn the child process
- _run_process "$service" "$command" &
- echo $!
+ if is_service_enabled $service; then
+ if [[ "$USE_SCREEN" = "True" ]]; then
+ screen_service "$service" "$command"
+ else
+ # Spawn directly without screen
+ _run_process "$service" "$command" &
+ fi
+ fi
}
# Helper to launch a service in a named screen
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_NAME``, ``SCREEN_LOGDIR``,
# ``SERVICE_DIR``, ``USE_SCREEN``
-# screen_it service "command-line"
-function screen_it {
+# screen_service service "command-line"
+# Run a command in a shell in a screen window
+function screen_service {
+ local service=$1
+ local command="$2"
+
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True $USE_SCREEN)
- if is_service_enabled $1; then
+ if is_service_enabled $service; then
# Append the service to the screen rc file
- screen_rc "$1" "$2"
+ screen_rc "$service" "$command"
- if [[ "$USE_SCREEN" = "True" ]]; then
- screen -S $SCREEN_NAME -X screen -t $1
+ screen -S $SCREEN_NAME -X screen -t $service
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
- screen -S $SCREEN_NAME -p $1 -X log on
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
- fi
-
- # sleep to allow bash to be ready to be send the command - we are
- # creating a new window in screen and then sends characters, so if
- # bash isn't running by the time we send the command, nothing happens
- sleep 3
-
- NL=`echo -ne '\015'`
- # This fun command does the following:
- # - the passed server command is backgrounded
- # - the pid of the background process is saved in the usual place
- # - the server process is brought back to the foreground
- # - if the server process exits prematurely the fg command errors
- # and a message is written to stdout and the service failure file
- # The pid saved can be used in screen_stop() as a process group
- # id to kill off all child processes
- screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
- else
- # Spawn directly without screen
- run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen -S $SCREEN_NAME -p $service -X logfile ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log
+ screen -S $SCREEN_NAME -p $service -X log on
+ ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log
fi
+
+ # sleep to allow bash to be ready to be send the command - we are
+ # creating a new window in screen and then sends characters, so if
+ # bash isn't running by the time we send the command, nothing happens
+ sleep 3
+
+ NL=`echo -ne '\015'`
+ # This fun command does the following:
+ # - the passed server command is backgrounded
+ # - the pid of the background process is saved in the usual place
+ # - the server process is brought back to the foreground
+ # - if the server process exits prematurely the fg command errors
+ # and a message is written to stdout and the service failure file
+ # The pid saved can be used in screen_stop() as a process group
+ # id to kill off all child processes
+ screen -S $SCREEN_NAME -p $service -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${service}.pid; fg || echo \"$service failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${service}.failure\"$NL"
fi
}
@@ -1269,20 +1282,40 @@
# that did not leave a PID behind
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
# screen_stop service
-function screen_stop {
+function screen_stop_service {
+ local service=$1
+
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True $USE_SCREEN)
- if is_service_enabled $1; then
+ if is_service_enabled $service; then
+ # Clean up the screen window
+ screen -S $SCREEN_NAME -p $service -X kill
+ fi
+}
+
+# Stop a service process
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
+# stop_process service
+function stop_process {
+ local service=$1
+
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+ USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+ if is_service_enabled $service; then
# Kill via pid if we have one available
- if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
- pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
- rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
+ if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
+ pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
+ rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
if [[ "$USE_SCREEN" = "True" ]]; then
# Clean up the screen window
- screen -S $SCREEN_NAME -p $1 -X kill
+ screen_stop_service $service
fi
fi
}
@@ -1318,6 +1351,80 @@
}
+# Deprecated Functions
+# --------------------
+
+# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
+# fork. It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it(). The log filename is derived
+# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
+# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
+# _old_run_process service "command-line"
+function _old_run_process {
+ local service=$1
+ local command="$2"
+
+ # Undo logging redirections and close the extra descriptors
+ exec 1>&3
+ exec 2>&3
+ exec 3>&-
+ exec 6>&-
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+ # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+ export PYTHONUNBUFFERED=1
+ fi
+
+ exec /bin/bash -c "$command"
+ die "$service exec failure: $command"
+}
+
+# old_run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command. This is meant to duplicate the semantics
+# of screen_it() without screen. PIDs are written to
+# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
+# old_run_process service "command-line"
+function old_run_process {
+ local service=$1
+ local command="$2"
+
+ # Spawn the child process
+ _old_run_process "$service" "$command" &
+ echo $!
+}
+
+# Compatibility for existing start_XXXX() functions
+# Uses global ``USE_SCREEN``
+# screen_it service "command-line"
+function screen_it {
+ if is_service_enabled $1; then
+ # Append the service to the screen rc file
+ screen_rc "$1" "$2"
+
+ if [[ "$USE_SCREEN" = "True" ]]; then
+ screen_service "$1" "$2"
+ else
+ # Spawn directly without screen
+ old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+ fi
+ fi
+}
+
+# Compatibility for existing stop_XXXX() functions
+# Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# screen_stop service
+function screen_stop {
+ # Clean up the screen window
+ stop_process $1
+}
+
+
# Python Functions
# ================
@@ -1393,6 +1500,19 @@
$cmd_pip install --build=${pip_build_tmp} \
$pip_mirror_opt $@ \
&& $sudo_pip rm -rf ${pip_build_tmp}
+
+ if [[ "$INSTALL_TESTONLY_PACKAGES" == "True" ]]; then
+ local test_req="$@/test-requirements.txt"
+ if [[ -e "$test_req" ]]; then
+ $sudo_pip PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+ http_proxy=$http_proxy \
+ https_proxy=$https_proxy \
+ no_proxy=$no_proxy \
+ $cmd_pip install --build=${pip_build_tmp} \
+ $pip_mirror_opt -r $test_req \
+ && $sudo_pip rm -rf ${pip_build_tmp}
+ fi
+ fi
}
# this should be used if you want to install globally, all libraries should
@@ -1596,6 +1716,7 @@
[[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
[[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
[[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
+ [[ ${service} == key-* && ${ENABLED_SERVICES} =~ "key" ]] && enabled=0
done
$xtrace
return $enabled
diff --git a/lib/cinder b/lib/cinder
index ce13b86..0426dbe 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -247,8 +247,8 @@
if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then
configure_cinder_backend_${be_type} ${be_name}
fi
- if [[ -z "$default_type" ]]; then
- default_name=$be_type
+ if [[ -z "$default_name" ]]; then
+ default_name=$be_name
fi
enabled_backends+=$be_name,
done
@@ -431,15 +431,15 @@
sudo tgtadm --mode system --op update --name debug --value on
fi
- screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
+ run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
echo "Waiting for Cinder API to start..."
if ! wait_for_service $SERVICE_TIMEOUT $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT; then
die $LINENO "c-api did not start"
fi
- screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
- screen_it c-bak "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
- screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+ run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
+ run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
+ run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
# NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received
# by the scheduler start the cinder-volume service last (or restart it) after the scheduler
diff --git a/lib/dstat b/lib/dstat
new file mode 100644
index 0000000..a2c522c
--- /dev/null
+++ b/lib/dstat
@@ -0,0 +1,41 @@
+# lib/apache
+# Functions to start and stop dstat
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - start_dstat
+# - stop_dstat
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+# for DSTAT logging
+DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
+
+
+# start_dstat() - Start running processes, including screen
+function start_dstat {
+ # A better kind of sysstat, with the top process per time slice
+ DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
+ else
+ screen_it dstat "dstat $DSTAT_OPTS"
+ fi
+}
+
+# stop_dstat() stop dstat process
+function stop_dstat {
+ screen_stop dstat
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/glance b/lib/glance
index 1dea6cf..d9c4a20 100644
--- a/lib/glance
+++ b/lib/glance
@@ -34,6 +34,7 @@
GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance}
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
+GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
@@ -81,6 +82,11 @@
fi
sudo chown $STACK_USER $GLANCE_CONF_DIR
+ if [[ ! -d $GLANCE_METADEF_DIR ]]; then
+ sudo mkdir -p $GLANCE_METADEF_DIR
+ fi
+ sudo chown $STACK_USER $GLANCE_METADEF_DIR
+
# Copy over our glance configurations and update them
cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -106,7 +112,6 @@
inicomment $GLANCE_API_CONF DEFAULT log_file
iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
- iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
@@ -125,6 +130,13 @@
iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
fi
+ # Store specific configs
+ iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
+ # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+ # sections.
+ iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
# Store the images in swift if enabled.
if is_service_enabled s-proxy; then
iniset $GLANCE_API_CONF DEFAULT default_store swift
@@ -134,6 +146,15 @@
iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
+
+ # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+ # sections.
+ iniset $GLANCE_API_CONF glance_store default_store swift
+ iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
+ iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift
+ iniset $GLANCE_API_CONF glance_store swift_store_key $SERVICE_PASSWORD
+ iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
+ iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
fi
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -144,7 +165,6 @@
iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
inicomment $GLANCE_CACHE_CONF DEFAULT log_file
iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
- iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v2.0
@@ -155,8 +175,16 @@
iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
+ # Store specific confs
+ # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+ # sections.
+ iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
+
+ cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
}
# create_glance_accounts() - Set up common required glance accounts
@@ -221,6 +249,9 @@
# Migrate glance database
$GLANCE_BIN_DIR/glance-manage db_sync
+ # Load metadata definitions
+ $GLANCE_BIN_DIR/glance-manage db_load_metadefs
+
create_glance_cache_dir
}
diff --git a/lib/heat b/lib/heat
index bd99d6b..14094a9 100644
--- a/lib/heat
+++ b/lib/heat
@@ -98,7 +98,7 @@
iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
iniset $HEAT_CONF database connection `database_connection_url heat`
- iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/urandom`
+ iniset $HEAT_CONF DEFAULT auth_encryption_key $(generate_hex_string 16)
iniset $HEAT_CONF DEFAULT region_name_for_services "$REGION_NAME"
diff --git a/lib/ironic b/lib/ironic
index 56dcd8c..2fad0b5 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -29,6 +29,7 @@
# Set up default directories
IRONIC_DIR=$DEST/ironic
+IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent
IRONIC_DATA_DIR=$DATA_DIR/ironic
IRONIC_STATE_PATH=/var/lib/ironic
IRONICCLIENT_DIR=$DEST/python-ironicclient
@@ -74,7 +75,8 @@
IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
-IRONIC_AGENT_TARBALL=${IRONIC_AGENT_TARBALL:-http://tarballs.openstack.org/ironic-python-agent/coreos/ipa-coreos.tar.gz}
+IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz}
+IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe-oem.cpio.gz}
# Which deploy driver to use - valid choices right now
# are 'pxe_ssh' and 'agent_ssh'.
@@ -562,6 +564,19 @@
ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
}
+function build_ipa_coreos_ramdisk {
+ echo "Building ironic-python-agent deploy ramdisk"
+ local kernel_path=$1
+ local ramdisk_path=$2
+ git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH
+ cd $IRONIC_PYTHON_AGENT_DIR
+ imagebuild/coreos/build_coreos_image.sh
+ cp imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz $ramdisk_path
+ cp imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz $kernel_path
+ sudo rm -rf UPLOAD
+ cd -
+}
+
# build deploy kernel+ramdisk, then upload them to glance
# this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID``
function upload_baremetal_ironic_deploy {
@@ -586,8 +601,8 @@
if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
# we can build them only if we're not offline
if [ "$OFFLINE" != "True" ]; then
- if [ "$IRONIC_DEPLOY_RAMDISK" == "agent_ssh" ]; then
- die $LINENO "Ironic-python-agent build is not yet supported"
+ if [ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]; then
+ build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
else
ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
-o $TOP_DIR/files/ir-deploy
@@ -598,12 +613,8 @@
else
if [ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]; then
# download the agent image tarball
- wget "$IRONIC_AGENT_TARBALL" -O ironic_agent_tarball.tar.gz
- tar zxfv ironic_agent_tarball.tar.gz
- mv UPLOAD/coreos_production_pxe.vmlinuz $IRONIC_DEPLOY_KERNEL_PATH
- mv UPLOAD/coreos_production_pxe_image-oem.cpio.gz $IRONIC_DEPLOY_RAMDISK_PATH
- rm -rf UPLOAD
- rm ironic_agent_tarball.tar.gz
+ wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
+ wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
else
die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
fi
diff --git a/lib/keystone b/lib/keystone
index 42acd50..da5cf89 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -237,11 +237,11 @@
iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
- iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token
+ iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.sql.Token
elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
- iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token
+ iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.memcache.Token
else
- iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token
+ iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.kvs.Token
fi
if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
@@ -357,9 +357,8 @@
# The Member role is used by Horizon and Swift so we need to keep it:
local member_role=$(get_or_create_role "Member")
- # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
+ # another_role demonstrates that an arbitrary role may be created and used
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-
local another_role=$(get_or_create_role "anotherrole")
# invisible tenant - admin can't see this one
@@ -476,6 +475,7 @@
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
restart_apache_server
screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone.log"
+ screen_it key-access "sudo tail -f /var/log/$APACHE_NAME/keystone_access.log"
else
# Start Keystone in a screen window
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
diff --git a/lib/oslo b/lib/oslo
index 025815c..e5fa37e 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -22,10 +22,15 @@
# --------
CLIFF_DIR=$DEST/cliff
OSLOCFG_DIR=$DEST/oslo.config
+OSLOCON_DIR=$DEST/oslo.concurrency
OSLODB_DIR=$DEST/oslo.db
OSLOI18N_DIR=$DEST/oslo.i18n
+OSLOLOG_DIR=$DEST/oslo.log
+OSLOMID_DIR=$DEST/oslo.middleware
OSLOMSG_DIR=$DEST/oslo.messaging
OSLORWRAP_DIR=$DEST/oslo.rootwrap
+OSLOSERIALIZATION_DIR=$DEST/oslo.serialization
+OSLOUTILS_DIR=$DEST/oslo.utils
OSLOVMWARE_DIR=$DEST/oslo.vmware
PYCADF_DIR=$DEST/pycadf
STEVEDORE_DIR=$DEST/stevedore
@@ -45,9 +50,24 @@
git_clone $OSLOI18N_REPO $OSLOI18N_DIR $OSLOI18N_BRANCH
setup_install $OSLOI18N_DIR
+ git_clone $OSLOUTILS_REPO $OSLOUTILS_DIR $OSLOUTILS_BRANCH
+ setup_install $OSLOUTILS_DIR
+
+ git_clone $OSLOSERIALIZATION_REPO $OSLOSERIALIZATION_DIR $OSLOSERIALIZATION_BRANCH
+ setup_install $OSLOSERIALIZATION_DIR
+
git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
setup_install $OSLOCFG_DIR
+ git_clone $OSLOCON_REPO $OSLOCON_DIR $OSLOCON_BRANCH
+ setup_install $OSLOCON_DIR
+
+ git_clone $OSLOLOG_REPO $OSLOLOG_DIR $OSLOLOG_BRANCH
+ setup_install $OSLOLOG_DIR
+
+ git_clone $OSLOMID_REPO $OSLOMID_DIR $OSLOMID_BRANCH
+ setup_install $OSLOMID_DIR
+
git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
setup_install $OSLOMSG_DIR
diff --git a/lib/swift b/lib/swift
index 6b96348..b050b57 100644
--- a/lib/swift
+++ b/lib/swift
@@ -556,6 +556,7 @@
local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
+ local another_role=$(openstack role list | awk "/ anotherrole / { print \$2 }")
local swift_user=$(get_or_create_user "swift" \
"$SERVICE_PASSWORD" $service_tenant)
@@ -582,7 +583,7 @@
local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
"$swift_tenant_test1" "test3@example.com")
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
- get_or_add_user_role $ANOTHER_ROLE $swift_user_test3 $swift_tenant_test1
+ get_or_add_user_role $another_role $swift_user_test3 $swift_tenant_test1
local swift_tenant_test2=$(get_or_create_project swifttenanttest2)
die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
diff --git a/stack.sh b/stack.sh
index 1661b36..7bfd072 100755
--- a/stack.sh
+++ b/stack.sh
@@ -69,20 +69,11 @@
echo "You are running this script as root."
echo "Cut it out."
echo "Really."
- echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:"
+ echo "If you need an account to run DevStack, do this (as root, heh) to create a non-root account:"
echo "$TOP_DIR/tools/create-stack-user.sh"
exit 1
fi
-# Check to see if we are already running DevStack
-# Note that this may fail if USE_SCREEN=False
-if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
- echo "You are already running a stack.sh session."
- echo "To rejoin this session type 'screen -x stack'."
- echo "To destroy this session, type './unstack.sh'."
- exit 1
-fi
-
# Prepare the environment
# -----------------------
@@ -130,6 +121,7 @@
done
fi
+
# ``stack.sh`` is customizable by setting environment variables. Override a
# default setting via export::
#
@@ -158,6 +150,15 @@
fi
source $TOP_DIR/stackrc
+# Check to see if we are already running DevStack
+# Note that this may fail if USE_SCREEN=False
+if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then
+ echo "You are already running a stack.sh session."
+ echo "To rejoin this session type 'screen -x stack'."
+ echo "To destroy this session, type './unstack.sh'."
+ exit 1
+fi
+
# Local Settings
# --------------
@@ -236,7 +237,7 @@
if [[ is_fedora && ( $DISTRO == "rhel6" || $DISTRO == "rhel7" ) ]]; then
# RHEL requires EPEL for many Open Stack dependencies
if [[ $DISTRO == "rhel7" ]]; then
- EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-0.2.noarch.rpm"}
+ EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-1.noarch.rpm"}
elif [[ $DISTRO == "rhel6" ]]; then
EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
fi
@@ -323,9 +324,6 @@
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
SYSLOG_PORT=${SYSLOG_PORT:-516}
-# for DSTAT logging
-DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
-
# Use color for logging output (only available if syslog is not used)
LOG_COLOR=`trueorfalse True $LOG_COLOR`
@@ -369,6 +367,7 @@
source $TOP_DIR/lib/neutron
source $TOP_DIR/lib/baremetal
source $TOP_DIR/lib/ldap
+source $TOP_DIR/lib/dstat
# Extras Source
# --------------
@@ -426,7 +425,7 @@
echo "Invalid chars in password. Try again:"
done
if [ ! $pw ]; then
- pw=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 20)
+ pw=$(generate_hex_string 10)
fi
eval "$var=$pw"
echo "$var=$pw" >> $localrc
@@ -948,12 +947,7 @@
# -------
# A better kind of sysstat, with the top process per time slice
-DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
-if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
-else
- screen_it dstat "dstat $DSTAT_OPTS"
-fi
+start_dstat
# Start Services
# ==============
@@ -1211,11 +1205,7 @@
# Create a randomized default value for the keymgr's fixed_key
if is_service_enabled nova; then
- FIXED_KEY=""
- for i in $(seq 1 64); do
- FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
- done;
- iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY"
+ iniset $NOVA_CONF keymgr fixed_key $(generate_hex_string 32)
fi
if is_service_enabled zeromq; then
diff --git a/stackrc b/stackrc
index ad7da6c..e071132 100644
--- a/stackrc
+++ b/stackrc
@@ -172,9 +172,11 @@
HORIZONAUTH_REPO=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git}
HORIZONAUTH_BRANCH=${HORIZONAUTH_BRANCH:-master}
-# baremetal provisionint service
+# baremetal provisioning service
IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
IRONIC_BRANCH=${IRONIC_BRANCH:-master}
+IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
+IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
# ironic client
IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
@@ -220,6 +222,10 @@
CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
CLIFF_BRANCH=${CLIFF_BRANCH:-master}
+# oslo.concurrency
+OSLOCON_REPO=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
+OSLOCON_BRANCH=${OSLOCON_BRANCH:-master}
+
# oslo.config
OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
@@ -232,14 +238,30 @@
OSLOI18N_REPO=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
OSLOI18N_BRANCH=${OSLOI18N_BRANCH:-master}
+# oslo.log
+OSLOLOG_REPO=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
+OSLOLOG_BRANCH=${OSLOLOG_BRANCH:-master}
+
# oslo.messaging
OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master}
+# oslo.middleware
+OSLOMID_REPO=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
+OSLOMID_BRANCH=${OSLOMID_BRANCH:-master}
+
# oslo.rootwrap
OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
+# oslo.serialization
+OSLOSERIALIZATION_REPO=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
+OSLOSERIALIZATION_BRANCH=${OSLOSERIALIZATION_BRANCH:-master}
+
+# oslo.utils
+OSLOUTILS_REPO=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
+OSLOUTILS_BRANCH=${OSLOUTILS_BRANCH:-master}
+
# oslo.vmware
OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master}
diff --git a/tests/fake-service.sh b/tests/fake-service.sh
new file mode 100755
index 0000000..d4b9b56
--- /dev/null
+++ b/tests/fake-service.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# fake-service.sh - a fake service for start/stop testing
+# $1 - sleep time
+
+SLEEP_TIME=${1:-3}
+
+LOG=/tmp/fake-service.log
+TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+
+# duplicate output
+exec 1> >(tee -a ${LOG})
+
+echo ""
+echo "Starting fake-service for ${SLEEP_TIME}"
+while true; do
+ echo "$(date +${TIMESTAMP_FORMAT}) [$$]"
+ sleep ${SLEEP_TIME}
+done
+
diff --git a/tests/run-process.sh b/tests/run-process.sh
new file mode 100755
index 0000000..cdffc3a
--- /dev/null
+++ b/tests/run-process.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# tests/exec.sh - Test DevStack screen_it() and screen_stop()
+#
+# exec.sh start|stop|status
+#
+# Set USE_SCREEN to change the default
+#
+# This script emulates the basic exec envirnment in ``stack.sh`` to test
+# the process spawn and kill operations.
+
+if [[ -z $1 ]]; then
+ echo "$0 start|stop"
+ exit 1
+fi
+
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+source $TOP_DIR/functions
+
+USE_SCREEN=${USE_SCREEN:-False}
+
+ENABLED_SERVICES=fake-service
+
+SERVICE_DIR=/tmp
+SCREEN_NAME=test
+SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME}
+
+
+# Kill background processes on exit
+trap clean EXIT
+clean() {
+ local r=$?
+ jobs -p
+ kill >/dev/null 2>&1 $(jobs -p)
+ exit $r
+}
+
+
+# Exit on any errors so that errors don't compound
+trap failed ERR
+failed() {
+ local r=$?
+ jobs -p
+ kill >/dev/null 2>&1 $(jobs -p)
+ set +o xtrace
+ [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
+ exit $r
+}
+
+function status {
+ if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then
+ pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid)
+ fi
+ ps -ef | grep fake
+}
+
+function setup_screen {
+if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then
+ rm -rf $SERVICE_DIR/$SCREEN_NAME
+ mkdir -p $SERVICE_DIR/$SCREEN_NAME
+fi
+
+if [[ "$USE_SCREEN" == "True" ]]; then
+ # Create a new named screen to run processes in
+ screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
+ sleep 1
+
+ # Set a reasonable status bar
+ if [ -z "$SCREEN_HARDSTATUS" ]; then
+ SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+ fi
+ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+fi
+
+# Clear screen rc file
+SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc
+if [[ -e $SCREENRC ]]; then
+ echo -n > $SCREENRC
+fi
+}
+
+# Mimic logging
+ # Set up output redirection without log files
+ # Copy stdout to fd 3
+ exec 3>&1
+ if [[ "$VERBOSE" != "True" ]]; then
+ # Throw away stdout and stderr
+ #exec 1>/dev/null 2>&1
+ :
+ fi
+ # Always send summary fd to original stdout
+ exec 6>&3
+
+
+if [[ "$1" == "start" ]]; then
+ echo "Start service"
+ setup_screen
+ screen_it fake-service "$TOP_DIR/tests/fake-service.sh"
+ sleep 1
+ status
+elif [[ "$1" == "stop" ]]; then
+ echo "Stop service"
+ screen_stop fake-service
+ status
+elif [[ "$1" == "status" ]]; then
+ status
+else
+ echo "Unknown command"
+ exit 1
+fi
diff --git a/tools/xen/functions b/tools/xen/functions
index ab0be84..4317796 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -93,7 +93,7 @@
echo $pbd_path
}
-function find_ip_by_name() {
+function find_ip_by_name {
local guest_name="$1"
local interface="$2"
@@ -121,7 +121,7 @@
done
}
-function _vm_uuid() {
+function _vm_uuid {
local vm_name_label
vm_name_label="$1"
@@ -129,14 +129,14 @@
xe vm-list name-label="$vm_name_label" --minimal
}
-function _create_new_network() {
+function _create_new_network {
local name_label
name_label=$1
xe network-create name-label="$name_label"
}
-function _multiple_networks_with_name() {
+function _multiple_networks_with_name {
local name_label
name_label=$1
@@ -144,21 +144,21 @@
xe network-list name-label="$name_label" --minimal | grep -q ","
}
-function _network_exists() {
+function _network_exists {
local name_label
name_label=$1
! [ -z "$(xe network-list name-label="$name_label" --minimal)" ]
}
-function _bridge_exists() {
+function _bridge_exists {
local bridge
bridge=$1
! [ -z "$(xe network-list bridge="$bridge" --minimal)" ]
}
-function _network_uuid() {
+function _network_uuid {
local bridge_or_net_name
bridge_or_net_name=$1
@@ -169,7 +169,7 @@
fi
}
-function add_interface() {
+function add_interface {
local vm_name_label
local bridge_or_network_name
@@ -185,7 +185,7 @@
xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
}
-function setup_network() {
+function setup_network {
local bridge_or_net_name
bridge_or_net_name=$1
@@ -204,7 +204,7 @@
fi
}
-function bridge_for() {
+function bridge_for {
local bridge_or_net_name
bridge_or_net_name=$1
@@ -215,29 +215,28 @@
fi
}
-function xenapi_ip_on() {
+function xenapi_ip_on {
local bridge_or_net_name
bridge_or_net_name=$1
ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"
}
-function xenapi_is_listening_on() {
+function xenapi_is_listening_on {
local bridge_or_net_name
bridge_or_net_name=$1
! [ -z $(xenapi_ip_on "$bridge_or_net_name") ]
}
-function parameter_is_specified() {
+function parameter_is_specified {
local parameter_name
parameter_name=$1
compgen -v | grep "$parameter_name"
}
-function append_kernel_cmdline()
-{
+function append_kernel_cmdline {
local vm_name_label
local kernel_args
@@ -252,8 +251,7 @@
xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
}
-function destroy_all_vifs_of()
-{
+function destroy_all_vifs_of {
local vm_name_label
vm_name_label="$1"
@@ -268,11 +266,11 @@
unset IFS
}
-function have_multiple_hosts() {
+function have_multiple_hosts {
xe host-list --minimal | grep -q ","
}
-function attach_network() {
+function attach_network {
local bridge_or_net_name
bridge_or_net_name="$1"
@@ -286,7 +284,7 @@
xe network-attach uuid=$net host-uuid=$host
}
-function set_vm_memory() {
+function set_vm_memory {
local vm_name_label
local memory
@@ -305,7 +303,7 @@
uuid=$vm
}
-function max_vcpus() {
+function max_vcpus {
local vm_name_label
vm_name_label="$1"
@@ -337,7 +335,7 @@
xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
}
-function get_domid() {
+function get_domid {
local vm_name_label
vm_name_label="$1"
diff --git a/unstack.sh b/unstack.sh
index fe5fc77..0457ef2 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -162,6 +162,8 @@
cleanup_trove
fi
+stop_dstat
+
# Clean up the remainder of the screen processes
SCREEN=$(which screen)
if [[ -n "$SCREEN" ]]; then