Merge "Move setup_develop() to common"
diff --git a/clean.sh b/clean.sh
index b2a9405..e121e4f 100755
--- a/clean.sh
+++ b/clean.sh
@@ -119,4 +119,10 @@
fi
# Clean up files
-rm -f $TOP_DIR/.stackenv
+
+FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN+=".stackenv .prereqs"
+
+for file in FILES_TO_CLEAN; do
+ rm -f $TOP_DIR/$file
+done
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
index 6bbe113..edc1376 100644
--- a/extras.d/70-savanna.sh
+++ b/extras.d/70-savanna.sh
@@ -8,6 +8,7 @@
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Savanna"
install_savanna
+ cleanup_savanna
if is_service_enabled horizon; then
install_savanna_dashboard
fi
@@ -29,4 +30,8 @@
cleanup_savanna_dashboard
fi
fi
+
+ if [[ "$1" == "clean" ]]; then
+ cleanup_savanna
+ fi
fi
diff --git a/files/apts/glance b/files/apts/glance
index 22787bc..6dc878e 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,5 +1,5 @@
gcc
-libffi-dev # testonly
+libffi-dev
libmysqlclient-dev # testonly
libpq-dev # testonly
libssl-dev # testonly
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index b287107..a82304d 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -5,4 +5,4 @@
genisoimage
sysfsutils
sg3-utils
-python-guestfs
+python-guestfs # NOPRIME
diff --git a/files/apts/sysstat b/files/apts/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/apts/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms-suse/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/rpms/glance b/files/rpms/glance
index 785ce25..25c5d39 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,5 @@
gcc
-libffi-devel # testonly
+libffi-devel
libxml2-devel # testonly
libxslt-devel # testonly
mysql-devel # testonly
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index e4fdaf4..32b1546 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -4,4 +4,4 @@
genisoimage
sysfsutils
sg3_utils
-python-libguestfs
+python-libguestfs # NOPRIME
diff --git a/files/rpms/sysstat b/files/rpms/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/functions-common b/functions-common
index d6f71b4..8e6b2b1 100644
--- a/functions-common
+++ b/functions-common
@@ -39,59 +39,76 @@
# Append a new option in an ini file without replacing the old value
# iniadd config-file section option value1 value2 value3 ...
function iniadd() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
shift 3
local values="$(iniget_multiline $file $section $option) $@"
iniset_multiline $file $section $option $values
+ $xtrace
}
# Comment an option in an INI file
# inicomment config-file section option
function inicomment() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+ $xtrace
}
# Get an option from an INI file
# iniget config-file section option
function iniget() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local line
line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
echo ${line#*=}
+ $xtrace
}
# Get a multiple line option from an INI file
# iniget_multiline config-file section option
function iniget_multiline() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local values
values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
echo ${values}
+ $xtrace
}
# Determinate is the given option present in the INI file
# ini_has_option config-file section option
function ini_has_option() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local line
line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+ $xtrace
[ -n "$line" ]
}
# Set an option in an INI file
# iniset config-file section option value
function iniset() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
@@ -113,11 +130,14 @@
# Replace it
sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
+ $xtrace
}
# Set a multiple line option in an INI file
# iniset_multiline config-file section option value1 value2 valu3 ...
function iniset_multiline() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
@@ -142,15 +162,19 @@
$option = $v
" "$file"
done
+ $xtrace
}
# Uncomment an option in an INI file
# iniuncomment config-file section option
function iniuncomment() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+ $xtrace
}
# Normalize config values to True or False
@@ -158,6 +182,8 @@
# Accepts as True: 1 yes Yes YES true True TRUE
# VAR=$(trueorfalse default-value test-value)
function trueorfalse() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local default=$1
local testval=$2
@@ -165,6 +191,7 @@
[[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
[[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
+ $xtrace
}
@@ -195,6 +222,8 @@
fi
backtrace 2
err $line "$*"
+ # Give buffers a second to flush
+ sleep 1
exit $exitcode
}
@@ -498,16 +527,16 @@
if [[ ! -d $GIT_DEST ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git clone $GIT_REMOTE $GIT_DEST
+ git_timed clone $GIT_REMOTE $GIT_DEST
fi
cd $GIT_DEST
- git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+ git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $GIT_DEST ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git clone $GIT_REMOTE $GIT_DEST
+ git_timed clone $GIT_REMOTE $GIT_DEST
cd $GIT_DEST
# This checkout syntax works for both branches and tags
git checkout $GIT_REF
@@ -516,7 +545,7 @@
cd $GIT_DEST
# set the url to pull from and fetch
git remote set-url origin $GIT_REMOTE
- git fetch origin
+ git_timed fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
@@ -541,6 +570,37 @@
git show --oneline | head -1
}
+# git can sometimes get itself infinitely stuck with transient network
+# errors or other issues with the remote end. This wraps git in a
+# timeout/retry loop and is intended to watch over non-local git
+# processes that might hang. GIT_TIMEOUT, if set, is passed directly
+# to timeout(1); otherwise the default value of 0 maintains the status
+# quo of waiting forever.
+# usage: git_timed <git-command>
+function git_timed() {
+ local count=0
+ local timeout=0
+
+ if [[ -n "${GIT_TIMEOUT}" ]]; then
+ timeout=${GIT_TIMEOUT}
+ fi
+
+ until timeout -s SIGINT ${timeout} git "$@"; do
+ # 124 is timeout(1)'s special return code when it reached the
+ # timeout; otherwise assume fatal failure
+ if [[ $? -ne 124 ]]; then
+ die $LINENO "git call failed: [git $@]"
+ fi
+
+ count=$(($count + 1))
+ warn "timeout ${count} for git call: [git $@]"
+ if [ $count -eq 3 ]; then
+ die $LINENO "Maximum of 3 git retries reached"
+ fi
+ sleep 5
+ done
+}
+
# git update using reference as a branch.
# git_update_branch ref
function git_update_branch() {
@@ -571,7 +631,7 @@
git tag -d $GIT_TAG
# fetching given tag only
- git fetch origin tag $GIT_TAG
+ git_timed fetch origin tag $GIT_TAG
git checkout -f $GIT_TAG
}
@@ -675,9 +735,14 @@
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
+
+ $xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=$http_proxy https_proxy=$https_proxy \
no_proxy=$no_proxy \
@@ -695,6 +760,8 @@
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local services=$@
local package_dir=$(_get_package_dir)
local file_to_parse
@@ -706,6 +773,7 @@
fi
if [[ -z "$DISTRO" ]]; then
GetDistro
+ echo "Found Distro $DISTRO"
fi
for service in ${services//,/ }; do
# Allow individual services to specify dependencies
@@ -797,23 +865,30 @@
done
IFS=$OIFS
done
+ $xtrace
}
# Distro-agnostic package installer
# install_package package [package ...]
function install_package() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
if is_ubuntu; then
# if there are transient errors pulling the updates, that's fine. It may
# be secondary repositories that we don't really care about.
[[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true
NO_UPDATE_REPOS=True
+ $xtrace
apt_get install "$@"
elif is_fedora; then
+ $xtrace
yum_install "$@"
elif is_suse; then
+ $xtrace
zypper_install "$@"
else
+ $xtrace
exit_distro_not_supported "installing packages"
fi
}
@@ -1092,7 +1167,13 @@
# ``TRACK_DEPENDS``, ``*_proxy``
# pip_install package [package ...]
function pip_install {
- [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ if [[ "$OFFLINE" = "True" || -z "$@" ]]; then
+ $xtrace
+ return
+ fi
+
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
@@ -1121,6 +1202,7 @@
# this problem. See https://github.com/pypa/pip/issues/709
local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
+ $xtrace
$SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
@@ -1287,32 +1369,36 @@
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ local enabled=1
services=$@
for service in ${services}; do
- [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
# Look for top-level 'enabled' function for this service
if type is_${service}_enabled >/dev/null 2>&1; then
# A function exists for this service, use it
is_${service}_enabled
- return $?
+ enabled=$?
fi
# TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
# are implemented
- [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
- [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
- [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
- [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
- [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
- [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
- [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
- [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
- [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
- [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
+ [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
+ [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
+ [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
+ [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
+ [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
+ [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
+ [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
+ [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
+ [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
+ [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
done
- return 1
+ $xtrace
+ return $enabled
}
# Toggle enable/disable_service for services that must run exclusive of each other
@@ -1338,6 +1424,8 @@
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local args=( $@ )
local last
local sudo_cmd
@@ -1351,6 +1439,7 @@
fi
if is_nfs_directory "$dir_to_check" ; then
+ $xtrace
return 0
fi
@@ -1360,6 +1449,7 @@
sudo_cmd="sudo"
fi
+ $xtrace
$sudo_cmd $@
}
diff --git a/lib/cinder b/lib/cinder
index c8c90c0..e8f30b6 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -496,8 +496,12 @@
sudo stop tgt || true
sudo start tgt
elif is_fedora; then
- # bypass redirection to systemctl during restart
- sudo /sbin/service --skip-redirect tgtd restart
+ if [[ $DISTRO =~ (rhel6) ]]; then
+ sudo /sbin/service tgtd restart
+ else
+ # bypass redirection to systemctl during restart
+ sudo /sbin/service --skip-redirect tgtd restart
+ fi
elif is_suse; then
restart_service tgtd
else
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 31e7163..f5ee3c0 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -21,7 +21,7 @@
if is_ubuntu; then
# Get ruthless with mysql
stop_service $MYSQL
- sudo aptitude purge -y ~nmysql-server
+ apt_get purge -y mysql*
sudo rm -rf /var/lib/mysql
return
elif is_fedora; then
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index c459feb..96a5947 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -21,7 +21,7 @@
stop_service postgresql
if is_ubuntu; then
# Get ruthless with mysql
- sudo aptitude purge -y ~npostgresql
+ apt_get purge -y postgresql*
return
elif is_fedora; then
uninstall_package postgresql-server
diff --git a/lib/heat b/lib/heat
index af10fa6..972c35c 100644
--- a/lib/heat
+++ b/lib/heat
@@ -207,6 +207,16 @@
--description "Owns users and projects created by heat" \
| grep ' id ' | get_field 2)
iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
+
+ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
+ --domain $D_ID heat_domain_admin \
+ --description "Manages users and projects created by heat"
+ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ --os-identity-api-version=3 role add \
+ --user heat_domain_admin --domain ${D_ID} admin
+ iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+ iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
}
# Restore xtrace
diff --git a/lib/keystone b/lib/keystone
index cebb4d3..73af1d3 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -424,7 +424,7 @@
fi
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 4206a20..325e939 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -37,4 +37,4 @@
}
# Restore xtrace
-$MY_XTRACE
\ No newline at end of file
+$MY_XTRACE
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 415244f..a550600 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -58,40 +58,40 @@
if is_fedora || is_suse; then
if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
- sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+ cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-group:$LIBVIRT_GROUP
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
-EOF"
+EOF
elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
# openSUSE < 12.3 or SLE
# Work around the fact that polkit-default-privs overrules pklas
# with 'unix-group:$group'.
- sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+ cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-user:$STACK_USER
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
-EOF"
+EOF
else
# Starting with fedora 18 and opensuse-12.3 enable stack-user to
# virsh -c qemu:///system by creating a policy-kit rule for
# stack-user using the new Javascript syntax
rules_dir=/etc/polkit-1/rules.d
sudo mkdir -p $rules_dir
- sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
+ cat <<EOF | sudo tee $rules_dir/50-libvirt-$STACK_USER.rules
polkit.addRule(function(action, subject) {
if (action.id == 'org.libvirt.unix.manage' &&
subject.user == '"$STACK_USER"') {
return polkit.Result.YES;
}
});
-EOF"
+EOF
unset rules_dir
fi
fi
@@ -140,10 +140,12 @@
install_package kvm
install_package libvirt-bin
install_package python-libvirt
+ install_package python-guestfs
elif is_fedora || is_suse; then
install_package kvm
install_package libvirt
install_package libvirt-python
+ install_package python-libguestfs
fi
# Install and configure **LXC** if specified. LXC is another approach to
diff --git a/lib/oslo b/lib/oslo
index b089842..516ce1c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -24,6 +24,7 @@
OSLOCFG_DIR=$DEST/oslo.config
OSLOMSG_DIR=$DEST/oslo.messaging
OSLORWRAP_DIR=$DEST/oslo.rootwrap
+OSLOVMWARE_DIR=$DEST/oslo.vmware
PYCADF_DIR=$DEST/pycadf
STEVEDORE_DIR=$DEST/stevedore
TASKFLOW_DIR=$DEST/taskflow
@@ -49,6 +50,9 @@
git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
setup_develop $OSLORWRAP_DIR
+ git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH
+ setup_develop $OSLOVMWARE_DIR
+
git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
setup_develop $PYCADF_DIR
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3651bc0..34f576f 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -67,7 +67,7 @@
sudo killall epmd || sudo killall -9 epmd
if is_ubuntu; then
# And the Erlang runtime too
- sudo aptitude purge -y ~nerlang
+ apt_get purge -y erlang*
fi
elif is_service_enabled qpid; then
if is_fedora; then
diff --git a/lib/savanna b/lib/savanna
index 43c5e38..954f0e7 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -10,6 +10,7 @@
# configure_savanna
# start_savanna
# stop_savanna
+# cleanup_savanna
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -33,6 +34,8 @@
SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
+
# Support entry points installation of console scripts
if [[ -d $SAVANNA_DIR/bin ]]; then
SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
@@ -83,6 +86,14 @@
fi
}
+# cleanup_savanna() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_savanna() {
+
+ # Cleanup auth cache dir
+ sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
+}
+
# configure_savanna() - Set config files, create data dirs, etc
function configure_savanna() {
@@ -94,9 +105,27 @@
# Copy over savanna configuration file and configure common parameters.
cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
+ # Create auth cache dir
+ sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
+ rm -rf $SAVANNA_AUTH_CACHE_DIR/*
+
+ # Set obsolete keystone auth configs for backward compatibility
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+ # Set actual keystone auth configs
+ iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
+ iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
diff --git a/lib/tempest b/lib/tempest
index 596750b..410c80c 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,6 +63,9 @@
TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"}
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI}
+# Neutron/Network variables
+IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED)
+
# Functions
# ---------
@@ -87,11 +90,6 @@
local boto_instance_type="m1.tiny"
local ssh_connect_method="fixed"
- if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
- sudo mkdir -p $TEMPEST_CONFIG_DIR
- fi
- sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
-
# TODO(afazekas):
# sudo python setup.py deploy
@@ -142,8 +140,12 @@
# Create tempest.conf from tempest.conf.sample
# copy every time, because the image UUIDS are going to change
- sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
- sudo chmod 644 $TEMPEST_CONFIG
+ if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
+ sudo mkdir -p $TEMPEST_CONFIG_DIR
+ fi
+ sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
+ cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
+ chmod 644 $TEMPEST_CONFIG
password=${ADMIN_PASSWORD:-secrete}
@@ -285,11 +287,13 @@
# Compute admin
iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+ # Network
iniset $TEMPEST_CONFIG network api_version 2.0
iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
+ iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
# boto
iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/run_tests.sh b/run_tests.sh
index b4f26c5..a0bfbee 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -26,4 +26,4 @@
echo "Running bash8..."
-./tools/bash8.py $FILES
+./tools/bash8.py -v $FILES
diff --git a/stack.sh b/stack.sh
index 4a55225..4333fb2 100755
--- a/stack.sh
+++ b/stack.sh
@@ -181,7 +181,7 @@
# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
- if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+ if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
echo "RDO repo not detected; installing"
yum_install $RHEL6_RDO_REPO_RPM || \
die $LINENO "Error installing RDO repo, cannot continue"
@@ -189,11 +189,15 @@
# RHEL6 requires EPEL for many Open Stack dependencies
RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
- if ! yum repolist enabled epel | grep -q 'epel'; then
+ if ! sudo yum repolist enabled epel | grep -q 'epel'; then
echo "EPEL not detected; installing"
yum_install ${RHEL6_EPEL_RPM} || \
die $LINENO "Error installing EPEL repo, cannot continue"
fi
+
+ # ... and also optional to be enabled
+ sudo yum-config-manager --enable rhel-6-server-optional-rpms
+
fi
@@ -294,15 +298,9 @@
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
SYSLOG_PORT=${SYSLOG_PORT:-516}
-# Enable sysstat logging
-SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
-SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
-
+# for DSTAT logging
DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
-PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"}
-PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"}
-
# Use color for logging output (only available if syslog is not used)
LOG_COLOR=`trueorfalse True $LOG_COLOR`
@@ -528,15 +526,17 @@
exec 3>&1
if [[ "$VERBOSE" == "True" ]]; then
# Redirect stdout/stderr to tee to write the log file
- exec 1> >( awk '
+ exec 1> >( awk -v logfile=${LOGFILE} '
+ /((set \+o$)|xtrace)/ { next }
{
- cmd ="date +\"%Y-%m-%d %H:%M:%S \""
+ cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \""
cmd | getline now
- close("date +\"%Y-%m-%d %H:%M:%S \"")
+ close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"")
sub(/^/, now)
print
- fflush()
- }' | tee "${LOGFILE}" ) 2>&1
+ print > logfile
+ fflush("")
+ }' ) 2>&1
# Set up a second fd for output
exec 6> >( tee "${SUMFILE}" )
else
@@ -584,21 +584,24 @@
# -----------------------
# Kill background processes on exit
-trap clean EXIT
-clean() {
+trap exit_trap EXIT
+function exit_trap {
local r=$?
- kill >/dev/null 2>&1 $(jobs -p)
+ echo "exit_trap called, cleaning up child processes"
+ kill 2>&1 $(jobs -p)
exit $r
}
-
# Exit on any errors so that errors don't compound
-trap failed ERR
-failed() {
+trap err_trap ERR
+function err_trap {
local r=$?
- kill >/dev/null 2>&1 $(jobs -p)
set +o xtrace
- [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
+ if [[ -n "$LOGFILE" ]]; then
+ echo "${0##*/} failed: full log in $LOGFILE"
+ else
+ echo "${0##*/} failed"
+ fi
exit $r
}
@@ -862,46 +865,17 @@
# Initialize the directory for service status check
init_service_check
-
-# Sysstat
+# Dstat
# -------
-# If enabled, systat has to start early to track OpenStack service startup.
-if is_service_enabled sysstat; then
- # what we want to measure
- # -u : cpu statitics
- # -q : load
- # -b : io load rates
- # -w : process creation and context switch rates
- SYSSTAT_OPTS="-u -q -b -w"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
- else
- screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL"
- fi
+# A better kind of sysstat, with the top process per time slice
+DSTAT_OPTS="-tcndylp --top-cpu-adv"
+if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
+else
+ screen_it dstat "dstat $DSTAT_OPTS"
fi
-if is_service_enabled dstat; then
- # Per-process stats
- DSTAT_OPTS="-tcndylp --top-cpu-adv"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
- else
- screen_it dstat "dstat $DSTAT_OPTS"
- fi
-fi
-
-if is_service_enabled pidstat; then
- # Per-process stats
- PIDSTAT_OPTS="-l -p ALL -T ALL"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE"
- else
- screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL"
- fi
-fi
-
-
# Start Services
# ==============
diff --git a/stackrc b/stackrc
index 0b081c4..f235ccc 100644
--- a/stackrc
+++ b/stackrc
@@ -80,6 +80,17 @@
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING)
+# Set a timeout for git operations. If git is still running when the
+# timeout expires, the command will be retried up to 3 times. This is
+# in the format for timeout(1);
+#
+# DURATION is a floating point number with an optional suffix: 's'
+# for seconds (the default), 'm' for minutes, 'h' for hours or 'd'
+# for days.
+#
+# Zero disables timeouts
+GIT_TIMEOUT=${GIT_TIMEOUT:-0}
+
# Repositories
# ------------
@@ -167,6 +178,10 @@
OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
+# oslo.vmware
+OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
+OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master}
+
# pycadf auditing library
PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
PYCADF_BRANCH=${PYCADF_BRANCH:-master}
diff --git a/tools/bash8.py b/tools/bash8.py
index 7552e0d..f89b241 100755
--- a/tools/bash8.py
+++ b/tools/bash8.py
@@ -25,6 +25,7 @@
# - E001: check that lines do not end with trailing whitespace
# - E002: ensure that indents are only spaces, and not hard tabs
# - E003: ensure all indents are a multiple of 4 spaces
+# - E004: file did not end with a newline
#
# Structure errors
#
@@ -34,6 +35,7 @@
#
# - E010: *do* not on the same line as *for*
# - E011: *then* not on the same line as *if*
+# - E012: heredoc didn't end before EOF
import argparse
import fileinput
@@ -54,11 +56,16 @@
return IGNORE and re.search(IGNORE, error)
-def print_error(error, line):
+def print_error(error, line,
+ filename=None, filelineno=None):
+ if not filename:
+ filename = fileinput.filename()
+ if not filelineno:
+ filelineno = fileinput.filelineno()
global ERRORS
ERRORS = ERRORS + 1
print("%s: '%s'" % (error, line.rstrip('\n')))
- print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno()))
+ print(" - %s: L%s" % (filename, filelineno))
def not_continuation(line):
@@ -110,17 +117,46 @@
return False
-def check_files(files):
+def check_files(files, verbose):
in_multiline = False
+ multiline_start = 0
+ multiline_line = ""
logical_line = ""
token = False
+ prev_file = None
+ prev_line = ""
+ prev_lineno = 0
+
for line in fileinput.input(files):
+ if fileinput.isfirstline():
+ # if in_multiline when the new file starts then we didn't
+ # find the end of a heredoc in the last file.
+ if in_multiline:
+ print_error('E012: heredoc did not end before EOF',
+ multiline_line,
+ filename=prev_file, filelineno=multiline_start)
+ in_multiline = False
+
+ # last line of a previous file should always end with a
+ # newline
+ if prev_file and not prev_line.endswith('\n'):
+ print_error('E004: file did not end with a newline',
+ prev_line,
+ filename=prev_file, filelineno=prev_lineno)
+
+ prev_file = fileinput.filename()
+
+ if verbose:
+ print "Running bash8 on %s" % fileinput.filename()
+
# NOTE(sdague): multiline processing of heredocs is interesting
if not in_multiline:
logical_line = line
token = starts_multiline(line)
if token:
in_multiline = True
+ multiline_start = fileinput.filelineno()
+ multiline_line = line
continue
else:
logical_line = logical_line + line
@@ -134,6 +170,8 @@
check_for_do(logical_line)
check_if_then(logical_line)
+ prev_line = logical_line
+ prev_lineno = fileinput.filelineno()
def get_options():
parser = argparse.ArgumentParser(
@@ -141,13 +179,14 @@
parser.add_argument('files', metavar='file', nargs='+',
help='files to scan for errors')
parser.add_argument('-i', '--ignore', help='Rules to ignore')
+ parser.add_argument('-v', '--verbose', action='store_true', default=False)
return parser.parse_args()
def main():
opts = get_options()
register_ignores(opts.ignore)
- check_files(opts.files)
+ check_files(opts.files, opts.verbose)
if ERRORS > 0:
print("%d bash8 error(s) found" % ERRORS)
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 50f6592..9c29ecd 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -15,6 +15,7 @@
# and it was time for this nonsense to stop. Run this script as root to create
# the user and configure sudo.
+set -o errexit
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
@@ -27,12 +28,14 @@
# and ``DISTRO``
GetDistro
-# Needed to get ``ENABLED_SERVICES``
+# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER``
source $TOP_DIR/stackrc
# Give the non-root user the ability to run as **root** via ``sudo``
is_package_installed sudo || install_package sudo
+[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting."
+
if ! getent group $STACK_USER >/dev/null; then
echo "Creating a group called $STACK_USER"
groupadd $STACK_USER
diff --git a/tools/sar_filter.py b/tools/sar_filter.py
deleted file mode 100755
index 24ef0e4..0000000
--- a/tools/sar_filter.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Samsung Electronics Corp. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import subprocess
-import sys
-
-
-def is_data_line(line):
- timestamp, data = parse_line(line)
- return re.search('\d\.d', data)
-
-
-def parse_line(line):
- m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line)
- if m:
- date = m.group(1)
- data = m.group(3).rstrip()
- return date, data
- else:
- return None, None
-
-
-process = subprocess.Popen(
- "sar %s" % " ".join(sys.argv[1:]),
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
-# Poll process for new output until finished
-
-start_time = ""
-header = ""
-data_line = ""
-printed_header = False
-current_ts = None
-
-# print out the first sysstat line regardless
-print process.stdout.readline()
-
-while True:
- nextline = process.stdout.readline()
- if nextline == '' and process.poll() is not None:
- break
-
- date, data = parse_line(nextline)
- # stop until we get to the first set of real lines
- if not date:
- continue
-
- # now we eat the header lines, and only print out the header
- # if we've never seen them before
- if not start_time:
- start_time = date
- header += "%s %s" % (date, data)
- elif date == start_time:
- header += " %s" % data
- elif not printed_header:
- printed_header = True
- print header
-
- # now we know this is a data line, printing out if the timestamp
- # has changed, and stacking up otherwise.
- nextline = process.stdout.readline()
- date, data = parse_line(nextline)
- if date != current_ts:
- current_ts = date
- print data_line
- data_line = "%s %s" % (date, data)
- else:
- data_line += " %s" % data
-
- sys.stdout.flush()