Merge "README.md: Correct the defaults of some of Q_ML2_PLUGIN variables"
diff --git a/.gitignore b/.gitignore
index 67ab722..c6900c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,7 +14,7 @@
files/*.qcow2
files/images
files/pip-*
-files/get-pip.py
+files/get-pip.py*
local.conf
local.sh
localrc
diff --git a/README.md b/README.md
index 2e26976..53de970 100644
--- a/README.md
+++ b/README.md
@@ -285,7 +285,15 @@
tests can be run as follows:
$ cd /opt/stack/tempest
- $ nosetests tempest/scenario/test_network_basic_ops.py
+ $ tox -efull tempest.scenario.test_network_basic_ops
+
+By default tempest is downloaded and the config file is generated, but the
+tempest package is not installed in the system's global site-packages (the
+package install includes installing dependences). So tempest won't run
+outside of tox. If you would like to install it add the following to your
+``localrc`` section:
+
+ INSTALL_TEMPEST=True
# DevStack on Xenserver
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 58ec3d3..610300b 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -129,7 +129,7 @@
LIBVIRT_TYPE=kvm
-Once DevStack is configured succesfully, verify if the Nova instances
+Once DevStack is configured successfully, verify if the Nova instances
are using KVM by noticing the QEMU CLI invoked by Nova is using the
parameter `accel=kvm`, e.g.:
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 70287a9..236ece9 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -67,7 +67,7 @@
::
- sudo apt-get install git -y || yum install -y git
+ sudo apt-get install git -y || sudo yum install -y git
git clone https://git.openstack.org/openstack-dev/devstack
cd devstack
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 10f4355..cfde991 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -158,7 +158,6 @@
* `lib/cinder <lib/cinder.html>`__
* `lib/config <lib/config.html>`__
* `lib/database <lib/database.html>`__
-* `lib/dib <lib/dib.html>`__
* `lib/dstat <lib/dstat.html>`__
* `lib/glance <lib/glance.html>`__
* `lib/heat <lib/heat.html>`__
@@ -181,7 +180,6 @@
* `clean.sh <clean.sh.html>`__
* `run\_tests.sh <run_tests.sh.html>`__
-* `extras.d/40-dib.sh <extras.d/40-dib.sh.html>`__
* `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
* `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
* `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 5d6d3f1..a9763e6 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -136,6 +136,31 @@
enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
+Plugins for gate jobs
+---------------------
+
+All OpenStack plugins that wish to be used as gate jobs need to exist
+in OpenStack's gerrit. Both ``openstack`` namespace and ``stackforge``
+namespace are fine. This allows testing of the plugin as well as
+provides network isolation against upstream git repository failures
+(which we see often enough to be an issue).
+
+Ideally plugins will be implemented as ``devstack`` directory inside
+the project they are testing. For example, the stackforge/ec2-api
+project has it's pluggin support in it's tree.
+
+In the cases where there is no "project tree" per say (like
+integrating a backend storage configuration such as ceph or glusterfs)
+it's also allowed to build a dedicated
+``stackforge/devstack-plugin-FOO`` project to house the plugin.
+
+Note jobs must not require cloning of repositories during tests.
+Tests must list their repository in the ``PROJECTS`` variable for
+`devstack-gate
+<https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_
+for the repository to be available to the test. Further information
+is provided in the project creator's guide.
+
Hypervisor
==========
diff --git a/extras.d/40-dib.sh b/extras.d/40-dib.sh
deleted file mode 100644
index fdae011..0000000
--- a/extras.d/40-dib.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# dib.sh - Devstack extras script to install diskimage-builder
-
-if is_service_enabled dib; then
- if [[ "$1" == "source" ]]; then
- # Initial source
- source $TOP_DIR/lib/dib
- elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing diskimage-builder"
- install_dib
- elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- # no-op
- :
- elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- # no-op
- :
- fi
-
- if [[ "$1" == "unstack" ]]; then
- # no-op
- :
- fi
-
- if [[ "$1" == "clean" ]]; then
- # no-op
- :
- fi
-fi
diff --git a/files/apache-dib-pip-repo.template b/files/apache-dib-pip-repo.template
deleted file mode 100644
index 5d2379b..0000000
--- a/files/apache-dib-pip-repo.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %DIB_PIP_REPO_PORT%
-
-<VirtualHost *:%DIB_PIP_REPO_PORT%>
- DocumentRoot %DIB_PIP_REPO%
- <Directory %DIB_PIP_REPO%>
- DirectoryIndex index.html
- Require all granted
- Order allow,deny
- allow from all
- </Directory>
-
- ErrorLog /var/log/%APACHE_NAME%/dib_pip_repo_error.log
- LogLevel warn
- CustomLog /var/log/%APACHE_NAME%/dib_pip_repo_access.log combined
-</VirtualHost>
diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template
new file mode 100644
index 0000000..d88ac3e
--- /dev/null
+++ b/files/apache-heat-pip-repo.template
@@ -0,0 +1,15 @@
+Listen %HEAT_PIP_REPO_PORT%
+
+<VirtualHost *:%HEAT_PIP_REPO_PORT%>
+ DocumentRoot %HEAT_PIP_REPO%
+ <Directory %HEAT_PIP_REPO%>
+ DirectoryIndex index.html
+ Require all granted
+ Order allow,deny
+ allow from all
+ </Directory>
+
+ ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log
+ LogLevel warn
+ CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined
+</VirtualHost>
diff --git a/files/debs/ironic b/files/debs/ironic
index f6c7b74..0a906db 100644
--- a/files/debs/ironic
+++ b/files/debs/ironic
@@ -4,6 +4,7 @@
ipxe
libguestfs0
libvirt-bin
+open-iscsi
openssh-client
openvswitch-switch
openvswitch-datapath-dkms
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 534b1c1..5d5052a 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,8 +1,6 @@
qemu-utils
-# Stuff for diablo volumes
-lvm2
+lvm2 # NOPRIME
open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:precise
genisoimage
sysfsutils
sg3-utils
diff --git a/files/debs/trema b/files/debs/trema
deleted file mode 100644
index f685ca5..0000000
--- a/files/debs/trema
+++ /dev/null
@@ -1,15 +0,0 @@
-# Trema
-make
-ruby1.8
-rubygems1.8
-ruby1.8-dev
-libpcap-dev
-libsqlite3-dev
-libglib2.0-dev
-
-# Sliceable Switch
-sqlite3
-libdbi-perl
-libdbd-sqlite3-perl
-apache2
-libjson-perl
diff --git a/files/rpms/general b/files/rpms/general
index 56a9331..cf40632 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -15,7 +15,6 @@
psmisc
pylint
python-unittest2
-python-virtualenv
python-devel
screen
tar
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
index 3c50061..e473a2f 100644
--- a/files/venv-requirements.txt
+++ b/files/venv-requirements.txt
@@ -1,7 +1,8 @@
+cryptography
lxml
MySQL-python
netifaces
-numpy
+#numpy # slowest wheel by far, stop building until we are actually using the output
posix-ipc
psycopg2
pycrypto
diff --git a/functions-common b/functions-common
index 267dfe8..df69cba 100644
--- a/functions-common
+++ b/functions-common
@@ -1601,25 +1601,6 @@
GITBRANCH[$name]=$branch
}
-# is_plugin_enabled
-#
-# Has a particular plugin been enabled?
-function is_plugin_enabled {
- local plugins=$@
- local plugin
- local enabled=1
-
- # short circuit if nothing to do
- if [[ -z ${DEVSTACK_PLUGINS} ]]; then
- return $enabled
- fi
-
- for plugin in ${plugins}; do
- [[ ,${DEVSTACK_PLUGINS}, =~ ,${plugin}, ]] && enabled=0
- done
- return $enabled
-}
-
# fetch_plugins
#
# clones all plugins
diff --git a/inc/python b/inc/python
index dfc4d63..d72c3c9 100644
--- a/inc/python
+++ b/inc/python
@@ -97,6 +97,7 @@
http_proxy=${http_proxy:-} \
https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
+ PIP_FIND_LINKS=$PIP_FIND_LINKS \
$cmd_pip install \
$@
@@ -108,6 +109,7 @@
http_proxy=${http_proxy:-} \
https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
+ PIP_FIND_LINKS=$PIP_FIND_LINKS \
$cmd_pip install \
-r $test_req
fi
diff --git a/lib/ceilometer b/lib/ceilometer
index 82e9417..9db0640 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -13,21 +13,16 @@
#
# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
#
-# To ensure events are stored, add the following section to local.conf:
-#
-# [[post-config|$CEILOMETER_CONF]]
-# [notification]
-# store_events=True
-#
# Several variables set in the localrc section adjust common behaviors
# of Ceilometer (see within for additional settings):
#
# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi.
# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing
# runs. Default 600.
-# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb')
+# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es')
# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided
# by tooz.
+# CEILOMETER_EVENTS: Enable event collection
# Dependencies:
@@ -80,6 +75,7 @@
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
+CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True}
CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-}
CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-}
@@ -137,8 +133,10 @@
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceilometer {
- if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
mongo ceilometer --eval "db.dropDatabase();"
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ curl -XDELETE "localhost:9200/events_*"
fi
if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
_cleanup_ceilometer_apache_wsgi
@@ -206,11 +204,21 @@
configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
+ iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS
+
if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ # es is only supported for events. we will use sql for alarming/metering.
+ iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF database event_connection es://localhost:9200
+ iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ ${TOP_DIR}/pkg/elasticsearch.sh start
+ cleanup_ceilometer
else
iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer
iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
@@ -264,7 +272,7 @@
rm -f $CEILOMETER_AUTH_CACHE_DIR/*
if is_service_enabled mysql postgresql; then
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
recreate_database ceilometer
$CEILOMETER_BIN_DIR/ceilometer-dbsync
fi
@@ -293,6 +301,11 @@
elif echo $CEILOMETER_COORDINATION_URL | grep -q '^redis:'; then
install_redis
fi
+
+ if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ ${TOP_DIR}/pkg/elasticsearch.sh download
+ ${TOP_DIR}/pkg/elasticsearch.sh install
+ fi
}
# install_ceilometerclient() - Collect source and prepare
diff --git a/lib/ceph b/lib/ceph
index a6b8cc8..76747cc 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -71,7 +71,7 @@
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
# Connect to an existing Ceph cluster
-REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
+REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
@@ -151,14 +151,14 @@
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
+
+ # purge ceph config file and keys
+ sudo rm -rf ${CEPH_CONF_DIR}/*
}
function cleanup_ceph_general {
undefine_virsh_secret
uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
-
- # purge ceph config file and keys
- sudo rm -rf ${CEPH_CONF_DIR}/*
}
diff --git a/lib/dib b/lib/dib
deleted file mode 100644
index 88d9fd8..0000000
--- a/lib/dib
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-#
-# lib/dib
-# Install and build images with **diskimage-builder**
-
-# Dependencies:
-#
-# - functions
-# - DEST, DATA_DIR must be defined
-
-# stack.sh
-# ---------
-# - install_dib
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# Defaults
-# --------
-
-# set up default directories
-DIB_DIR=$DEST/diskimage-builder
-TIE_DIR=$DEST/tripleo-image-elements
-
-# NOTE: Setting DIB_APT_SOURCES assumes you will be building
-# Debian/Ubuntu based images. Leave unset for other flavors.
-DIB_APT_SOURCES=${DIB_APT_SOURCES:-""}
-DIB_BUILD_OFFLINE=$(trueorfalse False DIB_BUILD_OFFLINE)
-DIB_IMAGE_CACHE=$DATA_DIR/diskimage-builder/image-create
-DIB_PIP_REPO=$DATA_DIR/diskimage-builder/pip-repo
-DIB_PIP_REPO_PORT=${DIB_PIP_REPO_PORT:-8899}
-
-OCC_DIR=$DEST/os-collect-config
-ORC_DIR=$DEST/os-refresh-config
-OAC_DIR=$DEST/os-apply-config
-
-# Functions
-# ---------
-
-# install_dib() - Collect source and prepare
-function install_dib {
- pip_install diskimage-builder
-
- git_clone $TIE_REPO $TIE_DIR $TIE_BRANCH
- git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
- git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
- git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
- mkdir -p $DIB_IMAGE_CACHE
-}
-
-# build_dib_pip_repo() - Builds a local pip repo from local projects
-function build_dib_pip_repo {
- local project_dirs=$1
- local projpath proj package
-
- rm -rf $DIB_PIP_REPO
- mkdir -p $DIB_PIP_REPO
-
- echo "<html><body>" > $DIB_PIP_REPO/index.html
- for projpath in $project_dirs; do
- proj=$(basename $projpath)
- mkdir -p $DIB_PIP_REPO/$proj
- pushd $projpath
- rm -rf dist
- python setup.py sdist
- pushd dist
- package=$(ls *)
- mv $package $DIB_PIP_REPO/$proj/$package
- popd
-
- echo "<html><body><a href=\"$package\">$package</a></body></html>" > $DIB_PIP_REPO/$proj/index.html
- echo "<a href=\"$proj\">$proj</a><br/>" >> $DIB_PIP_REPO/index.html
-
- popd
- done
-
- echo "</body></html>" >> $DIB_PIP_REPO/index.html
-
- local dib_pip_repo_apache_conf=$(apache_site_config_for dib_pip_repo)
-
- sudo cp $FILES/apache-dib-pip-repo.template $dib_pip_repo_apache_conf
- sudo sed -e "
- s|%DIB_PIP_REPO%|$DIB_PIP_REPO|g;
- s|%DIB_PIP_REPO_PORT%|$DIB_PIP_REPO_PORT|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- " -i $dib_pip_repo_apache_conf
- enable_apache_site dib_pip_repo
-}
-
-# disk_image_create_upload() - Creates and uploads a diskimage-builder built image
-function disk_image_create_upload {
-
- local image_name=$1
- local image_elements=$2
- local elements_path=$3
-
- local image_path=$TOP_DIR/files/$image_name.qcow2
-
- # Include the apt-sources element in builds if we have an
- # alternative sources.list specified.
- if [ -n "$DIB_APT_SOURCES" ]; then
- if [ ! -e "$DIB_APT_SOURCES" ]; then
- die $LINENO "DIB_APT_SOURCES set but not found at $DIB_APT_SOURCES"
- fi
- local extra_elements="apt-sources"
- fi
-
- # Set the local pip repo as the primary index mirror so the
- # image is built with local packages
- local pypi_mirror_url=http://$SERVICE_HOST:$DIB_PIP_REPO_PORT/
- local pypi_mirror_url_1
-
- if [ -a $HOME/.pip/pip.conf ]; then
- # Add the current pip.conf index-url as an extra-index-url
- # in the image build
- pypi_mirror_url_1=$(iniget $HOME/.pip/pip.conf global index-url)
- else
- # If no pip.conf, set upstream pypi as an extra mirror
- # (this also sets the .pydistutils.cfg index-url)
- pypi_mirror_url_1=http://pypi.python.org/simple
- fi
-
- # The disk-image-create command to run
- ELEMENTS_PATH=$elements_path \
- DIB_APT_SOURCES=$DIB_APT_SOURCES \
- DIB_OFFLINE=$DIB_BUILD_OFFLINE \
- PYPI_MIRROR_URL=$pypi_mirror_url \
- PYPI_MIRROR_URL_1=$pypi_mirror_url_1 \
- disk-image-create -a amd64 $image_elements ${extra_elements:-} \
- --image-cache $DIB_IMAGE_CACHE \
- -o $image_path
-
- local token=$(keystone token-get | grep ' id ' | get_field 2)
- die_if_not_set $LINENO token "Keystone fail to get token"
-
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT \
- image-create --name $image_name --is-public True \
- --container-format=bare --disk-format qcow2 \
- < $image_path
-}
-
-# Restore xtrace
-$XTRACE
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/heat b/lib/heat
index c102163..a088e82 100644
--- a/lib/heat
+++ b/lib/heat
@@ -8,9 +8,7 @@
# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
# Dependencies:
-#
-# - functions
-# - dib (if HEAT_CREATE_TEST_IMAGE=True)
+# (none)
# stack.sh
# ---------
@@ -37,6 +35,13 @@
HEAT_DIR=$DEST/heat
HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
+OCC_DIR=$DEST/os-collect-config
+ORC_DIR=$DEST/os-refresh-config
+OAC_DIR=$DEST/os-apply-config
+
+HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo
+HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899}
+
HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE)
HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON)
@@ -47,10 +52,6 @@
HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
-HEAT_FUNCTIONAL_IMAGE_ELEMENTS=${HEAT_FUNCTIONAL_IMAGE_ELEMENTS:-\
-vm fedora selinux-permissive pypi os-collect-config os-refresh-config \
-os-apply-config heat-cfntools heat-config heat-config-cfn-init \
-heat-config-puppet heat-config-script}
# other default options
@@ -296,22 +297,44 @@
fi
}
-# build_heat_functional_test_image() - Build and upload functional test image
-function build_heat_functional_test_image {
- if is_service_enabled dib; then
- build_dib_pip_repo "$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
- local image_name=heat-functional-tests-image
+# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
+function build_heat_pip_mirror {
+ local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
+ local projpath proj package
- # Elements path for tripleo-image-elements and heat-templates software-config
- local elements_path=$TIE_DIR/elements:$HEAT_TEMPLATES_REPO_DIR/hot/software-config/elements
+ rm -rf $HEAT_PIP_REPO
+ mkdir -p $HEAT_PIP_REPO
- disk_image_create_upload "$image_name" "$HEAT_FUNCTIONAL_IMAGE_ELEMENTS" "$elements_path"
- iniset $TEMPEST_CONFIG orchestration image_ref $image_name
- else
- echo "Error, HEAT_CREATE_TEST_IMAGE=True requires dib" >&2
- echo "Add \"enable_service dib\" to your localrc" >&2
- exit 1
- fi
+ echo "<html><body>" > $HEAT_PIP_REPO/index.html
+ for projpath in $project_dirs; do
+ proj=$(basename $projpath)
+ mkdir -p $HEAT_PIP_REPO/$proj
+ pushd $projpath
+ rm -rf dist
+ python setup.py sdist
+ pushd dist
+ package=$(ls *)
+ mv $package $HEAT_PIP_REPO/$proj/$package
+ popd
+
+ echo "<html><body><a href=\"$package\">$package</a></body></html>" > $HEAT_PIP_REPO/$proj/index.html
+ echo "<a href=\"$proj\">$proj</a><br/>" >> $HEAT_PIP_REPO/index.html
+
+ popd
+ done
+
+ echo "</body></html>" >> $HEAT_PIP_REPO/index.html
+
+ local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+
+ sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
+ sudo sed -e "
+ s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g;
+ s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ " -i $heat_pip_repo_apache_conf
+ enable_apache_site heat_pip_repo
+ restart_apache_server
}
# Restore xtrace
diff --git a/lib/horizon b/lib/horizon
index a8e83f9..c6e3692 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -94,6 +94,7 @@
cp $HORIZON_SETTINGS $local_settings
_horizon_config_set $local_settings "" COMPRESS_OFFLINE True
+ _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
_horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
diff --git a/lib/ironic b/lib/ironic
index ade889e..0d7c127 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -121,6 +121,16 @@
IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP}
IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088}
+# NOTE(lucasagomes): This flag is used to differentiate the nodes that
+# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers
+# (which also uses IPA but depends on Swift Temp URLs to work). At present,
+# all drivers that uses the iSCSI approach for their deployment supports
+# using both, IPA or bash ramdisks for the deployment. In the future we
+# want to remove the support for the bash ramdisk in favor of IPA, once
+# we get there this flag can be removed, and all conditionals that uses
+# it should just run by default.
+IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA)
+
# get_pxe_boot_file() - Get the PXE/iPXE boot file path
function get_pxe_boot_file {
local relpath=syslinux/pxelinux.0
@@ -162,6 +172,11 @@
return 1
}
+function is_deployed_with_ipa_ramdisk {
+ is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0
+ return 1
+}
+
# install_ironic() - Collect source and prepare
function install_ironic {
# make sure all needed service were enabled
@@ -328,9 +343,24 @@
iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP
iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
+
+ local pxe_params=""
if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0"
+ pxe_params+="nofb nomodeset vga=normal console=ttyS0"
+ if is_deployed_with_ipa_ramdisk; then
+ pxe_params+=" systemd.journald.forward_to_console=yes"
+ fi
fi
+ # When booting with less than 1GB, we need to switch from default tmpfs
+ # to ramfs for ramdisks to decompress successfully.
+ if (is_ironic_hardware && [[ "$IRONIC_HW_NODE_RAM" -lt 1024 ]]) ||
+ (! is_ironic_hardware && [[ "$IRONIC_VM_SPECS_RAM" -lt 1024 ]]); then
+ pxe_params+=" rootfstype=ramfs"
+ fi
+ if [[ -n "$pxe_params" ]]; then
+ iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params"
+ fi
+
if is_deployed_by_agent; then
if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY
@@ -344,9 +374,6 @@
iniset $IRONIC_CONF_FILE glance swift_container glance
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30
- if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE agent agent_pxe_append_params "nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes"
- fi
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
@@ -600,7 +627,7 @@
$node_options \
| grep " uuid " | get_field 2)
- ironic port-create --address $mac_address --node_uuid $node_id
+ ironic port-create --address $mac_address --node $node_id
total_nodes=$((total_nodes+1))
total_cpus=$((total_cpus+$ironic_node_cpu))
@@ -717,7 +744,7 @@
if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
# we can build them only if we're not offline
if [ "$OFFLINE" != "True" ]; then
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
else
ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
@@ -727,7 +754,7 @@
die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
fi
else
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
# download the agent image tarball
wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
diff --git a/lib/neutron b/lib/neutron
index a0f9c36..a7aabc5 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -100,8 +100,10 @@
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
+
NEUTRON_DIR=$DEST/neutron
NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
+NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -114,7 +116,6 @@
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-
export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
# Agent binaries. Note, binary paths for other agents are set in per-service
@@ -325,6 +326,12 @@
# Please refer to ``lib/neutron_plugins/README.md`` for details.
source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+# Agent loadbalancer service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
+
# Agent metering service plugin functions
# -------------------------------------------
@@ -351,17 +358,6 @@
TEMPEST_SERVICES+=,neutron
-# For backward compatibility, if q-lbaas service is enabled, make sure to load the
-# neutron-lbaas plugin. This hook should be removed in a future release, perhaps
-# as early as Liberty.
-
-if is_service_enabled q-lbaas; then
- if ! is_plugin_enabled neutron-lbaas; then
- DEPRECATED_TEXT+="External plugin neutron-lbaas has been automatically activated, please add the appropriate enable_plugin to your local.conf. This will be removed in the Liberty cycle."
- enable_plugin "neutron-lbaas" ${NEUTRON_LBAAS_REPO} ${NEUTRON_LBAAS_BRANCH}
- fi
-fi
-
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -429,7 +425,9 @@
iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT
# goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
-
+ if is_service_enabled q-lbaas; then
+ _configure_neutron_lbaas
+ fi
if is_service_enabled q-metering; then
_configure_neutron_metering
fi
@@ -607,8 +605,7 @@
recreate_database $Q_DB_NAME
# Run Neutron db migrations
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-
- for svc in fwaas vpnaas; do
+ for svc in fwaas lbaas vpnaas; do
if [ "$svc" = "vpnaas" ]; then
q_svc="q-vpn"
else
@@ -628,6 +625,10 @@
git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH
setup_develop $NEUTRON_FWAAS_DIR
fi
+ if is_service_enabled q-lbaas; then
+ git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
+ setup_develop $NEUTRON_LBAAS_DIR
+ fi
if is_service_enabled q-vpn; then
git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH
setup_develop $NEUTRON_VPNAAS_DIR
@@ -671,6 +672,10 @@
if is_service_enabled q-agt q-dhcp q-l3; then
neutron_plugin_install_agent_packages
fi
+
+ if is_service_enabled q-lbaas; then
+ neutron_agent_lbaas_install_agent_packages
+ fi
}
# Start running processes, including screen
@@ -730,6 +735,10 @@
run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
+ if is_service_enabled q-lbaas; then
+ run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ fi
+
if is_service_enabled q-metering; then
run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
fi
@@ -753,6 +762,9 @@
stop_process q-agt
+ if is_service_enabled q-lbaas; then
+ neutron_lbaas_stop
+ fi
if is_service_enabled q-fwaas; then
neutron_fwaas_stop
fi
@@ -780,11 +792,12 @@
fi
# delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
+
function _create_neutron_conf_dir {
# Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
if [[ ! -d $NEUTRON_CONF_DIR ]]; then
@@ -954,6 +967,14 @@
iniset $NEUTRON_CONF DEFAULT notification_driver messaging
}
+function _configure_neutron_lbaas {
+ if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then
+ cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR
+ fi
+ neutron_agent_lbaas_configure_common
+ neutron_agent_lbaas_configure_agent
+}
+
function _configure_neutron_metering {
neutron_agent_metering_configure_common
neutron_agent_metering_configure_agent
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index 3b1a257..9ea7338 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -1,131 +1,10 @@
#!/bin/bash
-#
-# Neutron NEC OpenFlow plugin
-# ---------------------------
-# Save trace setting
-NEC_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# This file is needed so Q_PLUGIN=nec will work.
-# Configuration parameters
-OFC_HOST=${OFC_HOST:-127.0.0.1}
-OFC_PORT=${OFC_PORT:-8888}
-
-OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST}
-OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT}
-OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST}
-OFC_OFP_PORT=${OFC_OFP_PORT:-6633}
-OFC_DRIVER=${OFC_DRIVER:-trema}
-OFC_RETRY_MAX=${OFC_RETRY_MAX:-0}
-OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1}
-
-# Main logic
-# ---------------------------
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function neutron_plugin_create_nova_conf {
- _neutron_ovs_base_configure_nova_vif_driver
-}
-
-function neutron_plugin_install_agent_packages {
- # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose
- # version is different from the version provided by the distribution.
- if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
- echo "You need to install Open vSwitch manually."
- return
- fi
- _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec
- Q_PLUGIN_CONF_FILENAME=nec.ini
- Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2"
-}
-
-function neutron_plugin_configure_debug_command {
- _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_configure_dhcp_agent {
- :
-}
-
-function neutron_plugin_configure_l3_agent {
- _neutron_ovs_base_configure_l3_agent
-}
-
-function _quantum_plugin_setup_bridge {
- if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then
- return
- fi
- # Set up integration bridge
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- # Generate datapath ID from HOST_IP
- local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ })
- sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid
- sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure
- sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT
- if [ -n "$OVS_INTERFACE" ]; then
- sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE
- fi
- _neutron_setup_ovs_tunnels $OVS_BRIDGE
-}
-
-function neutron_plugin_configure_plugin_agent {
- _quantum_plugin_setup_bridge
-
- AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent"
-
- _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_configure_service {
- iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/
- iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST
- iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT
- iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER
- iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX
- iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL
-
- _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
- iniset $conf_file DEFAULT ovs_use_veth True
-}
-
-# Utility functions
-# ---------------------------
-
-# Setup OVS tunnel manually
-function _neutron_setup_ovs_tunnels {
- local bridge=$1
- local id=0
- GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP}
- if [ -n "$GRE_REMOTE_IPS" ]; then
- for ip in ${GRE_REMOTE_IPS//:/ }; do
- if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then
- continue
- fi
- sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \
- set Interface gre$id type=gre options:remote_ip=$ip
- id=`expr $id + 1`
- done
- fi
-}
-
+# FIXME(amotoki): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
function has_neutron_plugin_security_group {
# 0 means True here
return 0
}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$NEC_XTRACE
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
new file mode 100644
index 0000000..f465cc9
--- /dev/null
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -0,0 +1,49 @@
+# Neutron loadbalancer plugin
+# ---------------------------
+
+# Save trace setting
+LB_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
+LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin
+
+function neutron_agent_lbaas_install_agent_packages {
+ if is_ubuntu || is_fedora || is_suse; then
+ install_package haproxy
+ fi
+}
+
+function neutron_agent_lbaas_configure_common {
+ _neutron_service_plugin_class_add $LBAAS_PLUGIN
+ _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
+}
+
+function neutron_agent_lbaas_configure_agent {
+ LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
+ mkdir -p $LBAAS_AGENT_CONF_PATH
+
+ LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
+
+ cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
+
+ # ovs_use_veth needs to be set before the plugin configuration
+ # occurs to allow plugins to override the setting.
+ iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
+ neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
+
+ if is_fedora; then
+ iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
+ iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
+ fi
+}
+
+function neutron_lbaas_stop {
+ pids=$(ps aux | awk '/haproxy/ { print $2 }')
+ [ ! -z "$pids" ] && sudo kill $pids
+}
+
+# Restore xtrace
+$LB_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 4cbedd6..b6c1c9c 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -1,147 +1,10 @@
#!/bin/bash
-#
-# Neutron VMware NSX plugin
-# -------------------------
-# Save trace setting
-NSX_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# This file is needed so Q_PLUGIN=vmware_nsx will work.
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function setup_integration_bridge {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- # Set manager to NSX controller (1st of list)
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- # Get the first controller
- controllers=(${NSX_CONTROLLERS//,/ })
- OVS_MGR_IP=${controllers[0]}
- else
- die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
- fi
- sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
-}
-
-function is_neutron_ovs_base_plugin {
- # NSX uses OVS, but not the l3-agent
- return 0
-}
-
-function neutron_plugin_create_nova_conf {
- # if n-cpu is enabled, then setup integration bridge
- if is_service_enabled n-cpu; then
- setup_integration_bridge
- fi
-}
-
-function neutron_plugin_install_agent_packages {
- # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents
- _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
- Q_PLUGIN_CONF_FILENAME=nsx.ini
- Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
-}
-
-function neutron_plugin_configure_debug_command {
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
-}
-
-function neutron_plugin_configure_dhcp_agent {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
- iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
-}
-
-function neutron_plugin_configure_l3_agent {
- # VMware NSX plugin does not run L3 agent
- die $LINENO "q-l3 should not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent {
- # VMware NSX plugin does not run L2 agent
- die $LINENO "q-agt must not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_service {
- if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
- fi
- if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
- fi
- if [[ "$FAILOVER_TIME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME
- fi
- if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS
- fi
-
- if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
- else
- die $LINENO "The VMware NSX plugin won't work without a default transport zone."
- fi
- if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
- iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network
- fi
- if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
- fi
- # NSX_CONTROLLERS must be a comma separated string
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS
- else
- die $LINENO "The VMware NSX plugin needs at least an NSX controller."
- fi
- if [[ "$NSX_USER" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER
- fi
- if [[ "$NSX_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
- fi
- if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
- fi
- if [[ "$NSX_RETRIES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES
- fi
- if [[ "$NSX_REDIRECTS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS
- fi
- if [[ "$AGENT_MODE" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE
- if [[ "$AGENT_MODE" == "agentless" ]]; then
- if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID
- else
- die $LINENO "Agentless mode requires a service cluster."
- fi
- iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP
- fi
- fi
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
function has_neutron_plugin_security_group {
# 0 means True here
return 0
}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$NSX_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx_v b/lib/neutron_plugins/vmware_nsx_v
new file mode 100644
index 0000000..3d33c65
--- /dev/null
+++ b/lib/neutron_plugins/vmware_nsx_v
@@ -0,0 +1,10 @@
+#!/bin/bash
+#
+# This file is needed so Q_PLUGIN=vmware_nsx_v will work.
+
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+ # 0 means True here
+ return 0
+}
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
deleted file mode 100644
index 075f013..0000000
--- a/lib/neutron_thirdparty/trema
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/bash
-#
-# Trema Sliceable Switch
-# ----------------------
-
-# Trema is a Full-Stack OpenFlow Framework in Ruby and C
-# https://github.com/trema/trema
-#
-# Trema Sliceable Switch is an OpenFlow controller which provides
-# virtual layer-2 network slices.
-# https://github.com/trema/apps/wiki
-
-# Trema Sliceable Switch (OpenFlow Controller)
-TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git}
-TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master}
-
-# Save trace setting
-TREMA3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-TREMA_DIR=${TREMA_DIR:-$DEST/trema}
-TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch"
-
-TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema}
-TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc
-TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db
-TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script
-TREMA_TMP_DIR=$TREMA_DATA_DIR/trema
-
-TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info}
-
-TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf
-TREMA_SS_APACHE_CONFIG=$(apache_site_config_for sliceable_switch)
-
-# configure_trema - Set config files, create data dirs, etc
-function configure_trema {
- # prepare dir
- for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do
- sudo mkdir -p $d
- sudo chown -R `whoami` $d
- done
- sudo mkdir -p $TREMA_TMP_DIR
-}
-
-# init_trema - Initialize databases, etc.
-function init_trema {
- local _pwd=$(pwd)
-
- # Initialize databases for Sliceable Switch
- cd $TREMA_SS_DIR
- rm -f filter.db slice.db
- ./create_tables.sh
- mv filter.db slice.db $TREMA_SS_DB_DIR
- # Make sure that apache cgi has write access to the databases
- sudo chown -R www-data.www-data $TREMA_SS_DB_DIR
- cd $_pwd
-
- # Setup HTTP Server for sliceable_switch
- cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR
- sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \
- $TREMA_SS_SCRIPT_DIR/config.cgi
-
- sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG
- sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \
- $TREMA_SS_APACHE_CONFIG
- # TODO(gabriel-bezerra): use some function from lib/apache to enable these modules
- sudo a2enmod rewrite actions
- enable_apache_site sliceable_switch
-
- cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
- sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
- -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
- $TREMA_SS_CONFIG
-}
-
-function gem_install {
- [[ "$OFFLINE" = "True" ]] && return
- [ -n "$RUBYGEMS_CMD" ] || get_gem_command
-
- local pkg=$1
- $RUBYGEMS_CMD list | grep "^${pkg} " && return
- sudo $RUBYGEMS_CMD install $pkg
-}
-
-function get_gem_command {
- # Trema requires ruby 1.8, so gem1.8 is checked first
- RUBYGEMS_CMD=$(which gem1.8 || which gem)
- if [ -z "$RUBYGEMS_CMD" ]; then
- echo "Warning: ruby gems command not found."
- fi
-}
-
-function install_trema {
- # Trema
- gem_install trema
- # Sliceable Switch
- git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH
- make -C $TREMA_DIR/apps/topology
- make -C $TREMA_DIR/apps/flow_manager
- make -C $TREMA_DIR/apps/sliceable_switch
-}
-
-function start_trema {
- restart_apache_server
-
- sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \
- trema run -d -c $TREMA_SS_CONFIG
-}
-
-function stop_trema {
- sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
-}
-
-function check_trema {
- :
-}
-
-# Restore xtrace
-$TREMA3_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 7027a29..03853a9 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,89 +1,2 @@
-#!/bin/bash
-#
-# VMware NSX
-# ----------
-
-# This third-party addition can be used to configure connectivity between a DevStack instance
-# and an NSX Gateway in dev/test environments. In order to use this correctly, the following
-# env variables need to be set (e.g. in your localrc file):
-#
-# * enable_service vmware_nsx --> to execute this third-party addition
-# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
-# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway
-# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure $PUBLIC_BRIDGE, e.g. 172.24.4.211/24
-
-# Save trace setting
-NSX3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# This is the interface that connects the Devstack instance
-# to an network that allows it to talk to the gateway for
-# testing purposes
-NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2}
-# Re-declare floating range as it's needed also in stop_vmware_nsx, which
-# is invoked by unstack.sh
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-
-function configure_vmware_nsx {
- :
-}
-
-function init_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address to set on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- # Make sure the interface is up, but not configured
- sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up
- # Save and then flush the IP addresses on the interface
- addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
- sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE
- # Use the PUBLIC Bridge to route traffic to the NSX gateway
- # NOTE(armando-migliaccio): if running in a nested environment this will work
- # only with mac learning enabled, portsecurity and security profiles disabled
- # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off
- # Try to create it anyway
- sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE
- sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE
- nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE
- for address in $addresses; do
- sudo ip addr add dev $PUBLIC_BRIDGE $address
- done
- sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR
- sudo ip link set $PUBLIC_BRIDGE up
-}
-
-function install_vmware_nsx {
- :
-}
-
-function start_vmware_nsx {
- :
-}
-
-function stop_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address expected on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE
- # Save and then flush remaining addresses on the interface
- addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'})
- sudo ip addr flush $PUBLIC_BRIDGE
- # Try to detach physical interface from PUBLIC_BRIDGE
- sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE
- # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE
- for address in $addresses; do
- sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address
- done
-}
-
-function check_vmware_nsx {
- neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
-}
-
-# Restore xtrace
-$NSX3_XTRACE
+# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
+# continues to work.
diff --git a/lib/oslo b/lib/oslo
index 18cddc1..86efb60 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -36,6 +36,7 @@
GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
GITDIR["oslo.serialization"]=$DEST/oslo.serialization
GITDIR["oslo.utils"]=$DEST/oslo.utils
+GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects
GITDIR["oslo.vmware"]=$DEST/oslo.vmware
GITDIR["pycadf"]=$DEST/pycadf
GITDIR["stevedore"]=$DEST/stevedore
@@ -72,6 +73,7 @@
_do_install_oslo_lib "oslo.rootwrap"
_do_install_oslo_lib "oslo.serialization"
_do_install_oslo_lib "oslo.utils"
+ _do_install_oslo_lib "oslo.versionedobjects"
_do_install_oslo_lib "oslo.vmware"
_do_install_oslo_lib "pycadf"
_do_install_oslo_lib "stevedore"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 899748c..ff22bbf 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -158,9 +158,6 @@
fi
_configure_qpid
elif is_service_enabled zeromq; then
- # NOTE(ewindisch): Redis is not strictly necessary
- # but there is a matchmaker driver that works
- # really well & out of the box for multi-node.
if is_fedora; then
install_package zeromq python-zmq
if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
@@ -243,11 +240,15 @@
local section=$3
if is_service_enabled zeromq; then
iniset $file $section rpc_backend "zmq"
- iniset $file $section rpc_zmq_matchmaker \
- oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis
- # Set MATCHMAKER_REDIS_HOST if running multi-node.
- MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
- iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
+ iniset $file $section rpc_zmq_host `hostname`
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ iniset $file $section rpc_zmq_matchmaker \
+ oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis
+ MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
+ iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
+ else
+ die $LINENO "Other matchmaker drivers not supported"
+ fi
elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
# For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
diff --git a/lib/sahara b/lib/sahara
index a84a06f..521b19a 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -65,9 +65,25 @@
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local sahara_service=$(get_or_create_service "sahara" \
- "data_processing" "Sahara Data Processing")
- get_or_create_endpoint $sahara_service \
+ # TODO: remove "data_processing" service when #1356053 will be fixed
+ local sahara_service_old=$(openstack service create \
+ "data_processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ local sahara_service_new=$(openstack service create \
+ "data-processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ get_or_create_endpoint $sahara_service_old \
+ "$REGION_NAME" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ get_or_create_endpoint $sahara_service_new \
"$REGION_NAME" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
@@ -123,14 +139,12 @@
if is_service_enabled neutron; then
iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
- iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE
fi
else
iniset $SAHARA_CONF_FILE DEFAULT use_neutron false
- iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips false
fi
if is_service_enabled heat; then
diff --git a/lib/tempest b/lib/tempest
index 6177ffe..f856ce0 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,6 +63,12 @@
BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
+# This must be False on stable branches, as master tempest
+# deps do not match stable branch deps. Set this to True to
+# have tempest installed in devstack by default.
+INSTALL_TEMPEST=${INSTALL_TEMPEST:-"False"}
+
+
BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
BOTO_CONF=/etc/boto.cfg
@@ -94,8 +100,12 @@
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
- # install testr since its used to process tempest logs
- pip_install $(get_from_global_requirements testrepository)
+ if [[ "$INSTALL_TEMPEST" == "True" ]]; then
+ setup_develop $TEMPEST_DIR
+ else
+ # install testr since its used to process tempest logs
+ pip_install $(get_from_global_requirements testrepository)
+ fi
local image_lines
local images
diff --git a/lib/trove b/lib/trove
index d437718..d777983 100644
--- a/lib/trove
+++ b/lib/trove
@@ -37,6 +37,7 @@
TROVE_CONF=$TROVE_CONF_DIR/trove.conf
TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf
TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf
+TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf
TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
@@ -171,18 +172,18 @@
fi
# Set up Guest Agent conf
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_userid $RABBIT_USERID
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_host $TROVE_HOST_GATEWAY
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_password $RABBIT_PASSWORD
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_user radmin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_tenant_name trove
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /tmp/
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
- setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log
+ setup_trove_logging $TROVE_GUESTAGENT_CONF
}
# install_troveclient() - Collect source and prepare
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 447596a..239d6b9 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -47,11 +47,20 @@
:
}
+function _check_elasticsearch_ready {
+ # poll elasticsearch to see if it's started
+ if ! wait_for_service 30 http://localhost:9200; then
+ die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
+ fi
+}
+
function start_elasticsearch {
if is_ubuntu; then
sudo /etc/init.d/elasticsearch start
+ _check_elasticsearch_ready
elif is_fedora; then
sudo /bin/systemctl start elasticsearch.service
+ _check_elasticsearch_ready
else
echo "Unsupported architecture...can not start elasticsearch."
fi
diff --git a/samples/local.conf b/samples/local.conf
index 9e0b540..e4052c2 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -3,7 +3,7 @@
# NOTE: Copy this file to the root ``devstack`` directory for it to
# work properly.
-# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``.
+# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``.
# This gives it the ability to override any variables set in ``stackrc``.
# Also, most of the settings in ``stack.sh`` are written to only be set if no
# value has already been set; this lets ``local.conf`` effectively override the
diff --git a/stack.sh b/stack.sh
index 44a0743..bf9fc01 100755
--- a/stack.sh
+++ b/stack.sh
@@ -250,8 +250,10 @@
enabled=0
gpgcheck=0
EOF
- # bare yum call due to --enablerepo
- sudo yum --enablerepo=epel-bootstrap -y install epel-release || \
+ # Enable a bootstrap repo. It is removed after finishing
+ # the epel-release installation.
+ sudo yum-config-manager --enable epel-bootstrap
+ yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue"
# epel rpm has installed it's version
sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
@@ -1225,9 +1227,9 @@
init_heat
echo_summary "Starting Heat"
start_heat
- if [ "$HEAT_CREATE_TEST_IMAGE" = "True" ]; then
- echo_summary "Building Heat functional test image"
- build_heat_functional_test_image
+ if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then
+ echo_summary "Building Heat pip mirror"
+ build_heat_pip_mirror
fi
fi
@@ -1298,6 +1300,13 @@
service_check
+# Bash completion
+# ===============
+
+# Prepare bash completion for OSC
+openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+
+
# Fin
# ===
diff --git a/stackrc b/stackrc
index 103be6d..02b12a3 100644
--- a/stackrc
+++ b/stackrc
@@ -198,9 +198,6 @@
NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
# neutron lbaas service
-# The neutron-lbaas specific entries are deprecated and replaced by the neutron-lbaas
-# devstack plugin and should be removed in a future release, possibly as soon as Liberty.
-
NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git}
NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master}
@@ -361,6 +358,10 @@
GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master}
+# oslo.versionedobjects
+GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git}
+GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master}
+
# oslo.vmware
GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master}
@@ -422,14 +423,10 @@
##################
#
-# TripleO Components
+# TripleO / Heat Agent Components
#
##################
-# diskimage-builder
-DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-DIB_BRANCH=${DIB_BRANCH:-master}
-
# os-apply-config configuration template tool
OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
OAC_BRANCH=${OAC_BRANCH:-master}
@@ -442,10 +439,6 @@
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
ORC_BRANCH=${ORC_BRANCH:-master}
-# Tripleo elements for diskimage-builder images
-TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git}
-TIE_BRANCH=${TIE_BRANCH:-master}
-
#################
#
# 3rd Party Components (non pip installable)
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 472b0ea..0bec584 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -34,8 +34,8 @@
ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib"
ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
-ALL_LIBS+=" oslo.vmware keystonemiddleware oslo.serialization"
-ALL_LIBS+=" python-saharaclient django_openstack_auth"
+ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
+ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth"
ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 73d0947..b7b40c7 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -42,9 +42,21 @@
function install_get_pip {
- if [[ ! -r $LOCAL_PIP ]]; then
- curl --retry 6 --retry-delay 5 -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+ # the openstack gate and others put a cached version of get-pip.py
+ # for this to find, explicitly to avoid download issues.
+ #
+ # However, if devstack *did* download the file, we want to check
+ # for updates; people can leave thier stacks around for a long
+ # time and in the mean-time pip might get upgraded.
+ #
+ # Thus we use curl's "-z" feature to always check the modified
+ # since and only download if a new version is out -- but only if
+ # it seems we downloaded the file originally.
+ if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
+ curl --retry 6 --retry-delay 5 \
+ -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \
die $LINENO "Download of get-pip.py failed"
+ touch $LOCAL_PIP.downloaded
fi
sudo -H -E python $LOCAL_PIP
}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 082c27e..b49347e 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -227,7 +227,7 @@
-n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \
-l "$GUEST_NAME"
- set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
+ set_vm_memory "$GUEST_NAME" "1024"
xe vm-start vm="$GUEST_NAME"