Merge "Make admin_bind_host configurable"
diff --git a/clean.sh b/clean.sh
index 09f08dc..e121e4f 100755
--- a/clean.sh
+++ b/clean.sh
@@ -101,11 +101,6 @@
cleanup_nova_hypervisor
fi
-#if mount | grep $DATA_DIR/swift/drives; then
-# sudo umount $DATA_DIR/swift/drives/sdb1
-#fi
-
-
# Clean out /etc
sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift
@@ -123,9 +118,11 @@
sudo rm -rf $SCREEN_LOGDIR
fi
-# Clean up networking...
-# should this be in nova?
-# FIXED_IP_ADDR in br100
-
# Clean up files
-rm -f $TOP_DIR/.stackenv
+
+FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN+=".stackenv .prereqs"
+
+for file in FILES_TO_CLEAN; do
+ rm -f $TOP_DIR/$file
+done
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
index 6bbe113..edc1376 100644
--- a/extras.d/70-savanna.sh
+++ b/extras.d/70-savanna.sh
@@ -8,6 +8,7 @@
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Savanna"
install_savanna
+ cleanup_savanna
if is_service_enabled horizon; then
install_savanna_dashboard
fi
@@ -29,4 +30,8 @@
cleanup_savanna_dashboard
fi
fi
+
+ if [[ "$1" == "clean" ]]; then
+ cleanup_savanna
+ fi
fi
diff --git a/files/apts/glance b/files/apts/glance
index 22787bc..6dc878e 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,5 +1,5 @@
gcc
-libffi-dev # testonly
+libffi-dev
libmysqlclient-dev # testonly
libpq-dev # testonly
libssl-dev # testonly
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index b287107..a82304d 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -5,4 +5,4 @@
genisoimage
sysfsutils
sg3-utils
-python-guestfs
+python-guestfs # NOPRIME
diff --git a/files/apts/sysstat b/files/apts/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/apts/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index e64f68f..ff00e38 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -50,12 +50,12 @@
catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1
catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1
catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.cloudformation.name = Heat CloudFormation Service
+catalog.RegionOne.cloudformation.name = CloudFormation service
catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
-catalog.RegionOne.orchestration.name = Heat Service
+catalog.RegionOne.orchestration.name = Orchestration Service
catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1
catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1
diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms-suse/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 623c13e..199ae10 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -4,4 +4,4 @@
python-devel
postgresql-devel
iscsi-initiator-utils
-python-lxml #dist:f18,f19,f20
+python-lxml #dist:f18,f19,f20,rhel7
diff --git a/files/rpms/glance b/files/rpms/glance
index fffd9c8..25c5d39 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,5 @@
gcc
-libffi-devel # testonly
+libffi-devel
libxml2-devel # testonly
libxslt-devel # testonly
mysql-devel # testonly
@@ -9,8 +9,8 @@
python-devel
python-eventlet
python-greenlet
-python-lxml #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
+python-lxml #dist:f18,f19,f20,rhel7
+python-paste-deploy #dist:f18,f19,f20,rhel7
python-routes
python-sqlalchemy
python-wsgiref
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index e4fdaf4..32b1546 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -4,4 +4,4 @@
genisoimage
sysfsutils
sg3_utils
-python-libguestfs
+python-libguestfs # NOPRIME
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 67bf523..42d7f68 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
python-iso8601
python-kombu
#rhel6 gets via pip
-python-paste # dist:f18,f19,f20
-python-paste-deploy # dist:f18,f19,f20
+python-paste # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f18,f19,f20,rhel7
python-qpid
python-routes
python-sqlalchemy
diff --git a/files/rpms/nova b/files/rpms/nova
index ac70ac5..a607d92 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,11 +28,11 @@
python-lockfile
python-migrate
python-mox
-python-paramiko # dist:f18,f19,f20
-# ^ on RHEL, brings in python-crypto which conflicts with version from
+python-paramiko # dist:f18,f19,f20,rhel7
+# ^ on RHEL6, brings in python-crypto which conflicts with version from
# pip we need
-python-paste # dist:f18,f19,f20
-python-paste-deploy # dist:f18,f19,f20
+python-paste # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f18,f19,f20,rhel7
python-qpid
python-routes
python-sqlalchemy
diff --git a/files/rpms/swift b/files/rpms/swift
index 32432bc..72253f7 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -9,7 +9,7 @@
python-greenlet
python-netifaces
python-nose
-python-paste-deploy # dist:f18,f19,f20
+python-paste-deploy # dist:f18,f19,f20,rhel7
python-simplejson
python-webob
pyxattr
diff --git a/files/rpms/sysstat b/files/rpms/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/functions b/functions
index 6979c6c..3101111 100644
--- a/functions
+++ b/functions
@@ -44,60 +44,6 @@
}
-# ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
-#
-# Updates the dependencies in project_dir from the
-# openstack/requirements global list before installing anything.
-#
-# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
-# setup_develop directory
-function setup_develop() {
- local project_dir=$1
-
- echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir"
-
- # Don't update repo if local changes exist
- # Don't use buggy "git diff --quiet"
- (cd $project_dir && git diff --exit-code >/dev/null)
- local update_requirements=$?
-
- if [ $update_requirements -eq 0 ]; then
- (cd $REQUIREMENTS_DIR; \
- $SUDO_CMD python update.py $project_dir)
- fi
-
- setup_develop_no_requirements_update $project_dir
-
- # We've just gone and possibly modified the user's source tree in an
- # automated way, which is considered bad form if it's a development
- # tree because we've screwed up their next git checkin. So undo it.
- #
- # However... there are some circumstances, like running in the gate
- # where we really really want the overridden version to stick. So provide
- # a variable that tells us whether or not we should UNDO the requirements
- # changes (this will be set to False in the OpenStack ci gate)
- if [ $UNDO_REQUIREMENTS = "True" ]; then
- if [ $update_requirements -eq 0 ]; then
- (cd $project_dir && git reset --hard)
- fi
- fi
-}
-
-
-# ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
-# Uses globals ``STACK_USER``
-# setup_develop_no_requirements_update directory
-function setup_develop_no_requirements_update() {
- local project_dir=$1
-
- pip_install -e $project_dir
- # ensure that further actions can do things like setup.py sdist
- safe_chown -R $STACK_USER $1/*.egg-info
-}
-
-
# Retrieve an image from a URL and upload into Glance.
# Uses the following variables:
#
diff --git a/functions-common b/functions-common
index d92e39c..c93dd85 100644
--- a/functions-common
+++ b/functions-common
@@ -15,6 +15,7 @@
# - Process Functions
# - Python Functions
# - Service Functions
+# - System Functions
#
# The following variables are assumed to be defined by certain functions:
#
@@ -39,59 +40,76 @@
# Append a new option in an ini file without replacing the old value
# iniadd config-file section option value1 value2 value3 ...
function iniadd() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
shift 3
local values="$(iniget_multiline $file $section $option) $@"
iniset_multiline $file $section $option $values
+ $xtrace
}
# Comment an option in an INI file
# inicomment config-file section option
function inicomment() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+ $xtrace
}
# Get an option from an INI file
# iniget config-file section option
function iniget() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local line
line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
echo ${line#*=}
+ $xtrace
}
# Get a multiple line option from an INI file
# iniget_multiline config-file section option
function iniget_multiline() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local values
values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
echo ${values}
+ $xtrace
}
# Determinate is the given option present in the INI file
# ini_has_option config-file section option
function ini_has_option() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
local line
line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+ $xtrace
[ -n "$line" ]
}
# Set an option in an INI file
# iniset config-file section option value
function iniset() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
@@ -113,11 +131,14 @@
# Replace it
sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
+ $xtrace
}
# Set a multiple line option in an INI file
# iniset_multiline config-file section option value1 value2 valu3 ...
function iniset_multiline() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
@@ -142,15 +163,19 @@
$option = $v
" "$file"
done
+ $xtrace
}
# Uncomment an option in an INI file
# iniuncomment config-file section option
function iniuncomment() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local file=$1
local section=$2
local option=$3
sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+ $xtrace
}
# Normalize config values to True or False
@@ -158,6 +183,8 @@
# Accepts as True: 1 yes Yes YES true True TRUE
# VAR=$(trueorfalse default-value test-value)
function trueorfalse() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local default=$1
local testval=$2
@@ -165,6 +192,7 @@
[[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
[[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
+ $xtrace
}
@@ -195,6 +223,8 @@
fi
backtrace 2
err $line "$*"
+ # Give buffers a second to flush
+ sleep 1
exit $exitcode
}
@@ -498,16 +528,16 @@
if [[ ! -d $GIT_DEST ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git clone $GIT_REMOTE $GIT_DEST
+ git_timed clone $GIT_REMOTE $GIT_DEST
fi
cd $GIT_DEST
- git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+ git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $GIT_DEST ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git clone $GIT_REMOTE $GIT_DEST
+ git_timed clone $GIT_REMOTE $GIT_DEST
cd $GIT_DEST
# This checkout syntax works for both branches and tags
git checkout $GIT_REF
@@ -516,7 +546,7 @@
cd $GIT_DEST
# set the url to pull from and fetch
git remote set-url origin $GIT_REMOTE
- git fetch origin
+ git_timed fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
@@ -541,6 +571,37 @@
git show --oneline | head -1
}
+# git can sometimes get itself infinitely stuck with transient network
+# errors or other issues with the remote end. This wraps git in a
+# timeout/retry loop and is intended to watch over non-local git
+# processes that might hang. GIT_TIMEOUT, if set, is passed directly
+# to timeout(1); otherwise the default value of 0 maintains the status
+# quo of waiting forever.
+# usage: git_timed <git-command>
+function git_timed() {
+ local count=0
+ local timeout=0
+
+ if [[ -n "${GIT_TIMEOUT}" ]]; then
+ timeout=${GIT_TIMEOUT}
+ fi
+
+ until timeout -s SIGINT ${timeout} git "$@"; do
+ # 124 is timeout(1)'s special return code when it reached the
+ # timeout; otherwise assume fatal failure
+ if [[ $? -ne 124 ]]; then
+ die $LINENO "git call failed: [git $@]"
+ fi
+
+ count=$(($count + 1))
+ warn "timeout ${count} for git call: [git $@]"
+ if [ $count -eq 3 ]; then
+ die $LINENO "Maximum of 3 git retries reached"
+ fi
+ sleep 5
+ done
+}
+
# git update using reference as a branch.
# git_update_branch ref
function git_update_branch() {
@@ -571,7 +632,7 @@
git tag -d $GIT_TAG
# fetching given tag only
- git fetch origin tag $GIT_TAG
+ git_timed fetch origin tag $GIT_TAG
git checkout -f $GIT_TAG
}
@@ -675,9 +736,14 @@
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
+
+ $xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=$http_proxy https_proxy=$https_proxy \
no_proxy=$no_proxy \
@@ -695,6 +761,8 @@
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local services=$@
local package_dir=$(_get_package_dir)
local file_to_parse
@@ -706,6 +774,7 @@
fi
if [[ -z "$DISTRO" ]]; then
GetDistro
+ echo "Found Distro $DISTRO"
fi
for service in ${services//,/ }; do
# Allow individual services to specify dependencies
@@ -797,23 +866,30 @@
done
IFS=$OIFS
done
+ $xtrace
}
# Distro-agnostic package installer
# install_package package [package ...]
function install_package() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
if is_ubuntu; then
# if there are transient errors pulling the updates, that's fine. It may
# be secondary repositories that we don't really care about.
[[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true
NO_UPDATE_REPOS=True
+ $xtrace
apt_get install "$@"
elif is_fedora; then
+ $xtrace
yum_install "$@"
elif is_suse; then
+ $xtrace
zypper_install "$@"
else
+ $xtrace
exit_distro_not_supported "installing packages"
fi
}
@@ -1050,7 +1126,8 @@
fi
# Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
- failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null`
+ # make this -o errexit safe
+ failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
for service in $failures; do
service=`basename $service`
@@ -1092,7 +1169,13 @@
# ``TRACK_DEPENDS``, ``*_proxy``
# pip_install package [package ...]
function pip_install {
- [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ if [[ "$OFFLINE" = "True" || -z "$@" ]]; then
+ $xtrace
+ return
+ fi
+
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
@@ -1121,6 +1204,7 @@
# this problem. See https://github.com/pypa/pip/issues/709
local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
+ $xtrace
$SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
@@ -1130,6 +1214,58 @@
&& $SUDO_PIP rm -rf ${pip_build_tmp}
}
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+#
+# Updates the dependencies in project_dir from the
+# openstack/requirements global list before installing anything.
+#
+# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
+# setup_develop directory
+function setup_develop() {
+ local project_dir=$1
+
+ echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir"
+
+ # Don't update repo if local changes exist
+ # Don't use buggy "git diff --quiet"
+ (cd $project_dir && git diff --exit-code >/dev/null)
+ local update_requirements=$?
+
+ if [ $update_requirements -eq 0 ]; then
+ (cd $REQUIREMENTS_DIR; \
+ $SUDO_CMD python update.py $project_dir)
+ fi
+
+ setup_develop_no_requirements_update $project_dir
+
+ # We've just gone and possibly modified the user's source tree in an
+ # automated way, which is considered bad form if it's a development
+ # tree because we've screwed up their next git checkin. So undo it.
+ #
+ # However... there are some circumstances, like running in the gate
+ # where we really really want the overridden version to stick. So provide
+ # a variable that tells us whether or not we should UNDO the requirements
+ # changes (this will be set to False in the OpenStack ci gate)
+ if [ $UNDO_REQUIREMENTS = "True" ]; then
+ if [ $update_requirements -eq 0 ]; then
+ (cd $project_dir && git reset --hard)
+ fi
+ fi
+}
+
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+# Uses globals ``STACK_USER``
+# setup_develop_no_requirements_update directory
+function setup_develop_no_requirements_update() {
+ local project_dir=$1
+
+ pip_install -e $project_dir
+ # ensure that further actions can do things like setup.py sdist
+ safe_chown -R $STACK_USER $1/*.egg-info
+}
+
# Service Functions
# =================
@@ -1235,32 +1371,36 @@
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ local enabled=1
services=$@
for service in ${services}; do
- [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
# Look for top-level 'enabled' function for this service
if type is_${service}_enabled >/dev/null 2>&1; then
# A function exists for this service, use it
is_${service}_enabled
- return $?
+ enabled=$?
fi
# TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
# are implemented
- [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
- [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
- [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
- [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
- [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
- [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
- [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
- [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
- [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
- [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
+ [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
+ [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
+ [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
+ [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
+ [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
+ [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
+ [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
+ [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
+ [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
+ [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
done
- return 1
+ $xtrace
+ return $enabled
}
# Toggle enable/disable_service for services that must run exclusive of each other
@@ -1280,12 +1420,14 @@
}
-# System Function
-# ===============
+# System Functions
+# ================
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation() {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
local args=( $@ )
local last
local sudo_cmd
@@ -1299,6 +1441,7 @@
fi
if is_nfs_directory "$dir_to_check" ; then
+ $xtrace
return 0
fi
@@ -1308,6 +1451,7 @@
sudo_cmd="sudo"
fi
+ $xtrace
$sudo_cmd $@
}
diff --git a/lib/cinder b/lib/cinder
index c8c90c0..e8f30b6 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -496,8 +496,12 @@
sudo stop tgt || true
sudo start tgt
elif is_fedora; then
- # bypass redirection to systemctl during restart
- sudo /sbin/service --skip-redirect tgtd restart
+ if [[ $DISTRO =~ (rhel6) ]]; then
+ sudo /sbin/service tgtd restart
+ else
+ # bypass redirection to systemctl during restart
+ sudo /sbin/service --skip-redirect tgtd restart
+ fi
elif is_suse; then
restart_service tgtd
else
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 476b4b9..f5ee3c0 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -21,11 +21,15 @@
if is_ubuntu; then
# Get ruthless with mysql
stop_service $MYSQL
- sudo aptitude purge -y ~nmysql-server
+ apt_get purge -y mysql*
sudo rm -rf /var/lib/mysql
return
elif is_fedora; then
- MYSQL=mysqld
+ if [[ $DISTRO =~ (rhel7) ]]; then
+ MYSQL=mariadb
+ else
+ MYSQL=mysqld
+ fi
elif is_suse; then
MYSQL=mysql
else
@@ -48,8 +52,12 @@
MY_CONF=/etc/mysql/my.cnf
MYSQL=mysql
elif is_fedora; then
+ if [[ $DISTRO =~ (rhel7) ]]; then
+ MYSQL=mariadb
+ else
+ MYSQL=mysqld
+ fi
MY_CONF=/etc/my.cnf
- MYSQL=mysqld
elif is_suse; then
MY_CONF=/etc/my.cnf
MYSQL=mysql
@@ -135,7 +143,11 @@
fi
# Install mysql-server
if is_ubuntu || is_fedora; then
- install_package mysql-server
+ if [[ $DISTRO =~ (rhel7) ]]; then
+ install_package mariadb-server
+ else
+ install_package mysql-server
+ fi
elif is_suse; then
if ! is_package_installed mariadb; then
install_package mysql-community-server
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index c459feb..96a5947 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -21,7 +21,7 @@
stop_service postgresql
if is_ubuntu; then
# Get ruthless with mysql
- sudo aptitude purge -y ~npostgresql
+ apt_get purge -y postgresql*
return
elif is_fedora; then
uninstall_package postgresql-server
diff --git a/lib/heat b/lib/heat
index af10fa6..972c35c 100644
--- a/lib/heat
+++ b/lib/heat
@@ -207,6 +207,16 @@
--description "Owns users and projects created by heat" \
| grep ' id ' | get_field 2)
iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
+
+ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
+ --domain $D_ID heat_domain_admin \
+ --description "Manages users and projects created by heat"
+ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ --os-identity-api-version=3 role add \
+ --user heat_domain_admin --domain ${D_ID} admin
+ iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+ iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
}
# Restore xtrace
diff --git a/lib/keystone b/lib/keystone
index 44ac94d..1833301 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -426,7 +426,7 @@
fi
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 4206a20..325e939 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -37,4 +37,4 @@
}
# Restore xtrace
-$MY_XTRACE
\ No newline at end of file
+$MY_XTRACE
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index f95fcb7..dd3b2ba 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,6 +1,10 @@
# Neutron MidoNet plugin
# ----------------------
+MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
+MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
+MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
+
# Save trace setting
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -47,8 +51,8 @@
}
function neutron_plugin_configure_service() {
- if [[ "$MIDONET_API_URI" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI
+ if [[ "$MIDONET_API_URL" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL
fi
if [[ "$MIDONET_USERNAME" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
@@ -59,9 +63,6 @@
if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
fi
- if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
- fi
Q_L3_ENABLED=True
Q_L3_ROUTER_PER_TENANT=True
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index e672528..98be425 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -10,20 +10,12 @@
# MidoNet devstack destination dir
MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
-MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
# MidoNet client repo
MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
-# MidoNet OpenStack repo
-MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git}
-MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master}
-MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack}
-MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py}
-
# Save trace setting
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -33,19 +25,12 @@
}
function init_midonet() {
-
- # Initialize DB. Evaluate the output of setup_midonet_topology.py to set
- # env variables for provider router ID.
- eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices`
- die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set."
-
- iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id
+ :
}
function install_midonet() {
git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
- git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH
- export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH
+ export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH
}
function start_midonet() {
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 415244f..a550600 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -58,40 +58,40 @@
if is_fedora || is_suse; then
if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
- sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+ cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-group:$LIBVIRT_GROUP
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
-EOF"
+EOF
elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
# openSUSE < 12.3 or SLE
# Work around the fact that polkit-default-privs overrules pklas
# with 'unix-group:$group'.
- sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+ cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-user:$STACK_USER
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
-EOF"
+EOF
else
# Starting with fedora 18 and opensuse-12.3 enable stack-user to
# virsh -c qemu:///system by creating a policy-kit rule for
# stack-user using the new Javascript syntax
rules_dir=/etc/polkit-1/rules.d
sudo mkdir -p $rules_dir
- sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
+ cat <<EOF | sudo tee $rules_dir/50-libvirt-$STACK_USER.rules
polkit.addRule(function(action, subject) {
if (action.id == 'org.libvirt.unix.manage' &&
subject.user == '"$STACK_USER"') {
return polkit.Result.YES;
}
});
-EOF"
+EOF
unset rules_dir
fi
fi
@@ -140,10 +140,12 @@
install_package kvm
install_package libvirt-bin
install_package python-libvirt
+ install_package python-guestfs
elif is_fedora || is_suse; then
install_package kvm
install_package libvirt
install_package libvirt-python
+ install_package python-libguestfs
fi
# Install and configure **LXC** if specified. LXC is another approach to
diff --git a/lib/oslo b/lib/oslo
index b089842..516ce1c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -24,6 +24,7 @@
OSLOCFG_DIR=$DEST/oslo.config
OSLOMSG_DIR=$DEST/oslo.messaging
OSLORWRAP_DIR=$DEST/oslo.rootwrap
+OSLOVMWARE_DIR=$DEST/oslo.vmware
PYCADF_DIR=$DEST/pycadf
STEVEDORE_DIR=$DEST/stevedore
TASKFLOW_DIR=$DEST/taskflow
@@ -49,6 +50,9 @@
git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
setup_develop $OSLORWRAP_DIR
+ git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH
+ setup_develop $OSLOVMWARE_DIR
+
git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
setup_develop $PYCADF_DIR
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3651bc0..34f576f 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -67,7 +67,7 @@
sudo killall epmd || sudo killall -9 epmd
if is_ubuntu; then
# And the Erlang runtime too
- sudo aptitude purge -y ~nerlang
+ apt_get purge -y erlang*
fi
elif is_service_enabled qpid; then
if is_fedora; then
diff --git a/lib/savanna b/lib/savanna
index 43c5e38..9feff23 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -10,6 +10,7 @@
# configure_savanna
# start_savanna
# stop_savanna
+# cleanup_savanna
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -33,6 +34,8 @@
SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
+
# Support entry points installation of console scripts
if [[ -d $SAVANNA_DIR/bin ]]; then
SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
@@ -83,6 +86,14 @@
fi
}
+# cleanup_savanna() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_savanna() {
+
+ # Cleanup auth cache dir
+ sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
+}
+
# configure_savanna() - Set config files, create data dirs, etc
function configure_savanna() {
@@ -94,9 +105,27 @@
# Copy over savanna configuration file and configure common parameters.
cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
+ # Create auth cache dir
+ sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
+ rm -rf $SAVANNA_AUTH_CACHE_DIR/*
+
+ # Set obsolete keystone auth configs for backward compatibility
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+ iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+ # Set actual keystone auth configs
+ iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
+ iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
+ iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
@@ -106,6 +135,12 @@
iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
fi
+ if is_service_enabled heat; then
+ iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
+ else
+ iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
+ fi
+
iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
recreate_database savanna utf8
diff --git a/lib/swift b/lib/swift
index df586ab..6c33af5 100644
--- a/lib/swift
+++ b/lib/swift
@@ -231,6 +231,46 @@
done
}
+# This function generates an object/container/account configuration
+# emulating 4 nodes on different ports
+function generate_swift_config() {
+ local swift_node_config=$1
+ local node_id=$2
+ local bind_port=$3
+ local server_type=$4
+
+ log_facility=$[ node_id - 1 ]
+ node_path=${SWIFT_DATA_DIR}/${node_number}
+
+ iniuncomment ${swift_node_config} DEFAULT user
+ iniset ${swift_node_config} DEFAULT user ${STACK_USER}
+
+ iniuncomment ${swift_node_config} DEFAULT bind_port
+ iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
+
+ iniuncomment ${swift_node_config} DEFAULT swift_dir
+ iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
+
+ iniuncomment ${swift_node_config} DEFAULT devices
+ iniset ${swift_node_config} DEFAULT devices ${node_path}
+
+ iniuncomment ${swift_node_config} DEFAULT log_facility
+ iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+
+ iniuncomment ${swift_node_config} DEFAULT workers
+ iniset ${swift_node_config} DEFAULT workers 1
+
+ iniuncomment ${swift_node_config} DEFAULT disable_fallocate
+ iniset ${swift_node_config} DEFAULT disable_fallocate true
+
+ iniuncomment ${swift_node_config} DEFAULT mount_check
+ iniset ${swift_node_config} DEFAULT mount_check false
+
+ iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
+ iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
+}
+
+
# configure_swift() - Set config files, create data dirs and loop image
function configure_swift() {
local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
@@ -364,45 +404,6 @@
cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
- # This function generates an object/container/account configuration
- # emulating 4 nodes on different ports
- function generate_swift_config() {
- local swift_node_config=$1
- local node_id=$2
- local bind_port=$3
- local server_type=$4
-
- log_facility=$[ node_id - 1 ]
- node_path=${SWIFT_DATA_DIR}/${node_number}
-
- iniuncomment ${swift_node_config} DEFAULT user
- iniset ${swift_node_config} DEFAULT user ${STACK_USER}
-
- iniuncomment ${swift_node_config} DEFAULT bind_port
- iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
-
- iniuncomment ${swift_node_config} DEFAULT swift_dir
- iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
-
- iniuncomment ${swift_node_config} DEFAULT devices
- iniset ${swift_node_config} DEFAULT devices ${node_path}
-
- iniuncomment ${swift_node_config} DEFAULT log_facility
- iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
-
- iniuncomment ${swift_node_config} DEFAULT workers
- iniset ${swift_node_config} DEFAULT workers 1
-
- iniuncomment ${swift_node_config} DEFAULT disable_fallocate
- iniset ${swift_node_config} DEFAULT disable_fallocate true
-
- iniuncomment ${swift_node_config} DEFAULT mount_check
- iniset ${swift_node_config} DEFAULT mount_check false
-
- iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
- iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
- }
-
for node_number in ${SWIFT_REPLICAS_SEQ}; do
swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
diff --git a/lib/tempest b/lib/tempest
index 596750b..410c80c 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,6 +63,9 @@
TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"}
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI}
+# Neutron/Network variables
+IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED)
+
# Functions
# ---------
@@ -87,11 +90,6 @@
local boto_instance_type="m1.tiny"
local ssh_connect_method="fixed"
- if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
- sudo mkdir -p $TEMPEST_CONFIG_DIR
- fi
- sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
-
# TODO(afazekas):
# sudo python setup.py deploy
@@ -142,8 +140,12 @@
# Create tempest.conf from tempest.conf.sample
# copy every time, because the image UUIDS are going to change
- sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
- sudo chmod 644 $TEMPEST_CONFIG
+ if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
+ sudo mkdir -p $TEMPEST_CONFIG_DIR
+ fi
+ sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
+ cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
+ chmod 644 $TEMPEST_CONFIG
password=${ADMIN_PASSWORD:-secrete}
@@ -285,11 +287,13 @@
# Compute admin
iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+ # Network
iniset $TEMPEST_CONFIG network api_version 2.0
iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
+ iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
# boto
iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/run_tests.sh b/run_tests.sh
index b4f26c5..a0bfbee 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -26,4 +26,4 @@
echo "Running bash8..."
-./tools/bash8.py $FILES
+./tools/bash8.py -v $FILES
diff --git a/stack.sh b/stack.sh
index 4a55225..ac89e52 100755
--- a/stack.sh
+++ b/stack.sh
@@ -181,7 +181,7 @@
# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
- if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+ if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
echo "RDO repo not detected; installing"
yum_install $RHEL6_RDO_REPO_RPM || \
die $LINENO "Error installing RDO repo, cannot continue"
@@ -189,11 +189,15 @@
# RHEL6 requires EPEL for many Open Stack dependencies
RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
- if ! yum repolist enabled epel | grep -q 'epel'; then
+ if ! sudo yum repolist enabled epel | grep -q 'epel'; then
echo "EPEL not detected; installing"
yum_install ${RHEL6_EPEL_RPM} || \
die $LINENO "Error installing EPEL repo, cannot continue"
fi
+
+ # ... and also optional to be enabled
+ sudo yum-config-manager --enable rhel-6-server-optional-rpms
+
fi
@@ -294,15 +298,9 @@
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
SYSLOG_PORT=${SYSLOG_PORT:-516}
-# Enable sysstat logging
-SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
-SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
-
+# for DSTAT logging
DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
-PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"}
-PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"}
-
# Use color for logging output (only available if syslog is not used)
LOG_COLOR=`trueorfalse True $LOG_COLOR`
@@ -528,15 +526,17 @@
exec 3>&1
if [[ "$VERBOSE" == "True" ]]; then
# Redirect stdout/stderr to tee to write the log file
- exec 1> >( awk '
+ exec 1> >( awk -v logfile=${LOGFILE} '
+ /((set \+o$)|xtrace)/ { next }
{
- cmd ="date +\"%Y-%m-%d %H:%M:%S \""
+ cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \""
cmd | getline now
- close("date +\"%Y-%m-%d %H:%M:%S \"")
+ close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"")
sub(/^/, now)
print
- fflush()
- }' | tee "${LOGFILE}" ) 2>&1
+ print > logfile
+ fflush("")
+ }' ) 2>&1
# Set up a second fd for output
exec 6> >( tee "${SUMFILE}" )
else
@@ -584,24 +584,30 @@
# -----------------------
# Kill background processes on exit
-trap clean EXIT
-clean() {
+trap exit_trap EXIT
+function exit_trap {
local r=$?
- kill >/dev/null 2>&1 $(jobs -p)
+ echo "exit_trap called, cleaning up child processes"
+ kill 2>&1 $(jobs -p)
exit $r
}
-
# Exit on any errors so that errors don't compound
-trap failed ERR
-failed() {
+trap err_trap ERR
+function err_trap {
local r=$?
- kill >/dev/null 2>&1 $(jobs -p)
set +o xtrace
- [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
+ if [[ -n "$LOGFILE" ]]; then
+ echo "${0##*/} failed: full log in $LOGFILE"
+ else
+ echo "${0##*/} failed"
+ fi
exit $r
}
+
+set -o errexit
+
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following along as the install occurs.
set -o xtrace
@@ -862,46 +868,17 @@
# Initialize the directory for service status check
init_service_check
-
-# Sysstat
+# Dstat
# -------
-# If enabled, systat has to start early to track OpenStack service startup.
-if is_service_enabled sysstat; then
- # what we want to measure
- # -u : cpu statitics
- # -q : load
- # -b : io load rates
- # -w : process creation and context switch rates
- SYSSTAT_OPTS="-u -q -b -w"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
- else
- screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL"
- fi
+# A better kind of sysstat, with the top process per time slice
+DSTAT_OPTS="-tcndylp --top-cpu-adv"
+if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
+else
+ screen_it dstat "dstat $DSTAT_OPTS"
fi
-if is_service_enabled dstat; then
- # Per-process stats
- DSTAT_OPTS="-tcndylp --top-cpu-adv"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
- else
- screen_it dstat "dstat $DSTAT_OPTS"
- fi
-fi
-
-if is_service_enabled pidstat; then
- # Per-process stats
- PIDSTAT_OPTS="-l -p ALL -T ALL"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE"
- else
- screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL"
- fi
-fi
-
-
# Start Services
# ==============
diff --git a/stackrc b/stackrc
index 0b081c4..f235ccc 100644
--- a/stackrc
+++ b/stackrc
@@ -80,6 +80,17 @@
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING)
+# Set a timeout for git operations. If git is still running when the
+# timeout expires, the command will be retried up to 3 times. This is
+# in the format for timeout(1);
+#
+# DURATION is a floating point number with an optional suffix: 's'
+# for seconds (the default), 'm' for minutes, 'h' for hours or 'd'
+# for days.
+#
+# Zero disables timeouts
+GIT_TIMEOUT=${GIT_TIMEOUT:-0}
+
# Repositories
# ------------
@@ -167,6 +178,10 @@
OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
+# oslo.vmware
+OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
+OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master}
+
# pycadf auditing library
PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
PYCADF_BRANCH=${PYCADF_BRANCH:-master}
diff --git a/tools/bash8.py b/tools/bash8.py
index 7552e0d..f89b241 100755
--- a/tools/bash8.py
+++ b/tools/bash8.py
@@ -25,6 +25,7 @@
# - E001: check that lines do not end with trailing whitespace
# - E002: ensure that indents are only spaces, and not hard tabs
# - E003: ensure all indents are a multiple of 4 spaces
+# - E004: file did not end with a newline
#
# Structure errors
#
@@ -34,6 +35,7 @@
#
# - E010: *do* not on the same line as *for*
# - E011: *then* not on the same line as *if*
+# - E012: heredoc didn't end before EOF
import argparse
import fileinput
@@ -54,11 +56,16 @@
return IGNORE and re.search(IGNORE, error)
-def print_error(error, line):
+def print_error(error, line,
+ filename=None, filelineno=None):
+ if not filename:
+ filename = fileinput.filename()
+ if not filelineno:
+ filelineno = fileinput.filelineno()
global ERRORS
ERRORS = ERRORS + 1
print("%s: '%s'" % (error, line.rstrip('\n')))
- print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno()))
+ print(" - %s: L%s" % (filename, filelineno))
def not_continuation(line):
@@ -110,17 +117,46 @@
return False
-def check_files(files):
+def check_files(files, verbose):
in_multiline = False
+ multiline_start = 0
+ multiline_line = ""
logical_line = ""
token = False
+ prev_file = None
+ prev_line = ""
+ prev_lineno = 0
+
for line in fileinput.input(files):
+ if fileinput.isfirstline():
+ # if in_multiline when the new file starts then we didn't
+ # find the end of a heredoc in the last file.
+ if in_multiline:
+ print_error('E012: heredoc did not end before EOF',
+ multiline_line,
+ filename=prev_file, filelineno=multiline_start)
+ in_multiline = False
+
+ # last line of a previous file should always end with a
+ # newline
+ if prev_file and not prev_line.endswith('\n'):
+ print_error('E004: file did not end with a newline',
+ prev_line,
+ filename=prev_file, filelineno=prev_lineno)
+
+ prev_file = fileinput.filename()
+
+ if verbose:
+ print "Running bash8 on %s" % fileinput.filename()
+
# NOTE(sdague): multiline processing of heredocs is interesting
if not in_multiline:
logical_line = line
token = starts_multiline(line)
if token:
in_multiline = True
+ multiline_start = fileinput.filelineno()
+ multiline_line = line
continue
else:
logical_line = logical_line + line
@@ -134,6 +170,8 @@
check_for_do(logical_line)
check_if_then(logical_line)
+ prev_line = logical_line
+ prev_lineno = fileinput.filelineno()
def get_options():
parser = argparse.ArgumentParser(
@@ -141,13 +179,14 @@
parser.add_argument('files', metavar='file', nargs='+',
help='files to scan for errors')
parser.add_argument('-i', '--ignore', help='Rules to ignore')
+ parser.add_argument('-v', '--verbose', action='store_true', default=False)
return parser.parse_args()
def main():
opts = get_options()
register_ignores(opts.ignore)
- check_files(opts.files)
+ check_files(opts.files, opts.verbose)
if ERRORS > 0:
print("%d bash8 error(s) found" % ERRORS)
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 50f6592..9c29ecd 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -15,6 +15,7 @@
# and it was time for this nonsense to stop. Run this script as root to create
# the user and configure sudo.
+set -o errexit
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
@@ -27,12 +28,14 @@
# and ``DISTRO``
GetDistro
-# Needed to get ``ENABLED_SERVICES``
+# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER``
source $TOP_DIR/stackrc
# Give the non-root user the ability to run as **root** via ``sudo``
is_package_installed sudo || install_package sudo
+[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting."
+
if ! getent group $STACK_USER >/dev/null; then
echo "Creating a group called $STACK_USER"
groupadd $STACK_USER
diff --git a/tools/info.sh b/tools/info.sh
index 3ab7966..1e521b9 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -122,13 +122,11 @@
ver=${BASH_REMATCH[2]}
else
# Unhandled format in freeze file
- #echo "unknown: $p"
continue
fi
echo "pip|${p}|${ver}"
else
# No match in freeze file
- #echo "unknown: $p"
continue
fi
done <$FREEZE_FILE
diff --git a/tools/sar_filter.py b/tools/sar_filter.py
deleted file mode 100755
index 24ef0e4..0000000
--- a/tools/sar_filter.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Samsung Electronics Corp. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import subprocess
-import sys
-
-
-def is_data_line(line):
- timestamp, data = parse_line(line)
- return re.search('\d\.d', data)
-
-
-def parse_line(line):
- m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line)
- if m:
- date = m.group(1)
- data = m.group(3).rstrip()
- return date, data
- else:
- return None, None
-
-
-process = subprocess.Popen(
- "sar %s" % " ".join(sys.argv[1:]),
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
-# Poll process for new output until finished
-
-start_time = ""
-header = ""
-data_line = ""
-printed_header = False
-current_ts = None
-
-# print out the first sysstat line regardless
-print process.stdout.readline()
-
-while True:
- nextline = process.stdout.readline()
- if nextline == '' and process.poll() is not None:
- break
-
- date, data = parse_line(nextline)
- # stop until we get to the first set of real lines
- if not date:
- continue
-
- # now we eat the header lines, and only print out the header
- # if we've never seen them before
- if not start_time:
- start_time = date
- header += "%s %s" % (date, data)
- elif date == start_time:
- header += " %s" % data
- elif not printed_header:
- printed_header = True
- print header
-
- # now we know this is a data line, printing out if the timestamp
- # has changed, and stacking up otherwise.
- nextline = process.stdout.readline()
- date, data = parse_line(nextline)
- if date != current_ts:
- current_ts = date
- print data_line
- data_line = "%s %s" % (date, data)
- else:
- data_line += " %s" % data
-
- sys.stdout.flush()
diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh
index 0285f42..0eb2077 100755
--- a/tools/xen/build_domU_multi.sh
+++ b/tools/xen/build_domU_multi.sh
@@ -25,11 +25,5 @@
# because rabbit won't launch with an ip addr hostname :(
build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
-# Wait till the head node is up
-#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
-# echo "Waiting for head node ($HEAD_PUB_IP) to start..."
-# sleep 5
-#done
-
# Build the HA compute host
build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"