Merge "Add support for oslo.versionedobjects"
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 10f4355..cfde991 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -158,7 +158,6 @@
* `lib/cinder <lib/cinder.html>`__
* `lib/config <lib/config.html>`__
* `lib/database <lib/database.html>`__
-* `lib/dib <lib/dib.html>`__
* `lib/dstat <lib/dstat.html>`__
* `lib/glance <lib/glance.html>`__
* `lib/heat <lib/heat.html>`__
@@ -181,7 +180,6 @@
* `clean.sh <clean.sh.html>`__
* `run\_tests.sh <run_tests.sh.html>`__
-* `extras.d/40-dib.sh <extras.d/40-dib.sh.html>`__
* `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
* `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
* `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
diff --git a/extras.d/40-dib.sh b/extras.d/40-dib.sh
deleted file mode 100644
index fdae011..0000000
--- a/extras.d/40-dib.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# dib.sh - Devstack extras script to install diskimage-builder
-
-if is_service_enabled dib; then
- if [[ "$1" == "source" ]]; then
- # Initial source
- source $TOP_DIR/lib/dib
- elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing diskimage-builder"
- install_dib
- elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- # no-op
- :
- elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- # no-op
- :
- fi
-
- if [[ "$1" == "unstack" ]]; then
- # no-op
- :
- fi
-
- if [[ "$1" == "clean" ]]; then
- # no-op
- :
- fi
-fi
diff --git a/files/apache-dib-pip-repo.template b/files/apache-dib-pip-repo.template
deleted file mode 100644
index 5d2379b..0000000
--- a/files/apache-dib-pip-repo.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %DIB_PIP_REPO_PORT%
-
-<VirtualHost *:%DIB_PIP_REPO_PORT%>
- DocumentRoot %DIB_PIP_REPO%
- <Directory %DIB_PIP_REPO%>
- DirectoryIndex index.html
- Require all granted
- Order allow,deny
- allow from all
- </Directory>
-
- ErrorLog /var/log/%APACHE_NAME%/dib_pip_repo_error.log
- LogLevel warn
- CustomLog /var/log/%APACHE_NAME%/dib_pip_repo_access.log combined
-</VirtualHost>
diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template
new file mode 100644
index 0000000..d88ac3e
--- /dev/null
+++ b/files/apache-heat-pip-repo.template
@@ -0,0 +1,15 @@
+Listen %HEAT_PIP_REPO_PORT%
+
+<VirtualHost *:%HEAT_PIP_REPO_PORT%>
+ DocumentRoot %HEAT_PIP_REPO%
+ <Directory %HEAT_PIP_REPO%>
+ DirectoryIndex index.html
+ Require all granted
+ Order allow,deny
+ allow from all
+ </Directory>
+
+ ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log
+ LogLevel warn
+ CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined
+</VirtualHost>
diff --git a/files/rpms/general b/files/rpms/general
index 56a9331..cf40632 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -15,7 +15,6 @@
psmisc
pylint
python-unittest2
-python-virtualenv
python-devel
screen
tar
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
index 3c50061..8417b92 100644
--- a/files/venv-requirements.txt
+++ b/files/venv-requirements.txt
@@ -1,3 +1,4 @@
+cryptography
lxml
MySQL-python
netifaces
diff --git a/functions-common b/functions-common
index 267dfe8..df69cba 100644
--- a/functions-common
+++ b/functions-common
@@ -1601,25 +1601,6 @@
GITBRANCH[$name]=$branch
}
-# is_plugin_enabled
-#
-# Has a particular plugin been enabled?
-function is_plugin_enabled {
- local plugins=$@
- local plugin
- local enabled=1
-
- # short circuit if nothing to do
- if [[ -z ${DEVSTACK_PLUGINS} ]]; then
- return $enabled
- fi
-
- for plugin in ${plugins}; do
- [[ ,${DEVSTACK_PLUGINS}, =~ ,${plugin}, ]] && enabled=0
- done
- return $enabled
-}
-
# fetch_plugins
#
# clones all plugins
diff --git a/inc/python b/inc/python
index dfc4d63..d72c3c9 100644
--- a/inc/python
+++ b/inc/python
@@ -97,6 +97,7 @@
http_proxy=${http_proxy:-} \
https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
+ PIP_FIND_LINKS=$PIP_FIND_LINKS \
$cmd_pip install \
$@
@@ -108,6 +109,7 @@
http_proxy=${http_proxy:-} \
https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
+ PIP_FIND_LINKS=$PIP_FIND_LINKS \
$cmd_pip install \
-r $test_req
fi
diff --git a/lib/ceph b/lib/ceph
index a6b8cc8..76747cc 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -71,7 +71,7 @@
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
# Connect to an existing Ceph cluster
-REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
+REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
@@ -151,14 +151,14 @@
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
+
+ # purge ceph config file and keys
+ sudo rm -rf ${CEPH_CONF_DIR}/*
}
function cleanup_ceph_general {
undefine_virsh_secret
uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
-
- # purge ceph config file and keys
- sudo rm -rf ${CEPH_CONF_DIR}/*
}
diff --git a/lib/dib b/lib/dib
deleted file mode 100644
index 88d9fd8..0000000
--- a/lib/dib
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-#
-# lib/dib
-# Install and build images with **diskimage-builder**
-
-# Dependencies:
-#
-# - functions
-# - DEST, DATA_DIR must be defined
-
-# stack.sh
-# ---------
-# - install_dib
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# Defaults
-# --------
-
-# set up default directories
-DIB_DIR=$DEST/diskimage-builder
-TIE_DIR=$DEST/tripleo-image-elements
-
-# NOTE: Setting DIB_APT_SOURCES assumes you will be building
-# Debian/Ubuntu based images. Leave unset for other flavors.
-DIB_APT_SOURCES=${DIB_APT_SOURCES:-""}
-DIB_BUILD_OFFLINE=$(trueorfalse False DIB_BUILD_OFFLINE)
-DIB_IMAGE_CACHE=$DATA_DIR/diskimage-builder/image-create
-DIB_PIP_REPO=$DATA_DIR/diskimage-builder/pip-repo
-DIB_PIP_REPO_PORT=${DIB_PIP_REPO_PORT:-8899}
-
-OCC_DIR=$DEST/os-collect-config
-ORC_DIR=$DEST/os-refresh-config
-OAC_DIR=$DEST/os-apply-config
-
-# Functions
-# ---------
-
-# install_dib() - Collect source and prepare
-function install_dib {
- pip_install diskimage-builder
-
- git_clone $TIE_REPO $TIE_DIR $TIE_BRANCH
- git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
- git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
- git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
- mkdir -p $DIB_IMAGE_CACHE
-}
-
-# build_dib_pip_repo() - Builds a local pip repo from local projects
-function build_dib_pip_repo {
- local project_dirs=$1
- local projpath proj package
-
- rm -rf $DIB_PIP_REPO
- mkdir -p $DIB_PIP_REPO
-
- echo "<html><body>" > $DIB_PIP_REPO/index.html
- for projpath in $project_dirs; do
- proj=$(basename $projpath)
- mkdir -p $DIB_PIP_REPO/$proj
- pushd $projpath
- rm -rf dist
- python setup.py sdist
- pushd dist
- package=$(ls *)
- mv $package $DIB_PIP_REPO/$proj/$package
- popd
-
- echo "<html><body><a href=\"$package\">$package</a></body></html>" > $DIB_PIP_REPO/$proj/index.html
- echo "<a href=\"$proj\">$proj</a><br/>" >> $DIB_PIP_REPO/index.html
-
- popd
- done
-
- echo "</body></html>" >> $DIB_PIP_REPO/index.html
-
- local dib_pip_repo_apache_conf=$(apache_site_config_for dib_pip_repo)
-
- sudo cp $FILES/apache-dib-pip-repo.template $dib_pip_repo_apache_conf
- sudo sed -e "
- s|%DIB_PIP_REPO%|$DIB_PIP_REPO|g;
- s|%DIB_PIP_REPO_PORT%|$DIB_PIP_REPO_PORT|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- " -i $dib_pip_repo_apache_conf
- enable_apache_site dib_pip_repo
-}
-
-# disk_image_create_upload() - Creates and uploads a diskimage-builder built image
-function disk_image_create_upload {
-
- local image_name=$1
- local image_elements=$2
- local elements_path=$3
-
- local image_path=$TOP_DIR/files/$image_name.qcow2
-
- # Include the apt-sources element in builds if we have an
- # alternative sources.list specified.
- if [ -n "$DIB_APT_SOURCES" ]; then
- if [ ! -e "$DIB_APT_SOURCES" ]; then
- die $LINENO "DIB_APT_SOURCES set but not found at $DIB_APT_SOURCES"
- fi
- local extra_elements="apt-sources"
- fi
-
- # Set the local pip repo as the primary index mirror so the
- # image is built with local packages
- local pypi_mirror_url=http://$SERVICE_HOST:$DIB_PIP_REPO_PORT/
- local pypi_mirror_url_1
-
- if [ -a $HOME/.pip/pip.conf ]; then
- # Add the current pip.conf index-url as an extra-index-url
- # in the image build
- pypi_mirror_url_1=$(iniget $HOME/.pip/pip.conf global index-url)
- else
- # If no pip.conf, set upstream pypi as an extra mirror
- # (this also sets the .pydistutils.cfg index-url)
- pypi_mirror_url_1=http://pypi.python.org/simple
- fi
-
- # The disk-image-create command to run
- ELEMENTS_PATH=$elements_path \
- DIB_APT_SOURCES=$DIB_APT_SOURCES \
- DIB_OFFLINE=$DIB_BUILD_OFFLINE \
- PYPI_MIRROR_URL=$pypi_mirror_url \
- PYPI_MIRROR_URL_1=$pypi_mirror_url_1 \
- disk-image-create -a amd64 $image_elements ${extra_elements:-} \
- --image-cache $DIB_IMAGE_CACHE \
- -o $image_path
-
- local token=$(keystone token-get | grep ' id ' | get_field 2)
- die_if_not_set $LINENO token "Keystone fail to get token"
-
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT \
- image-create --name $image_name --is-public True \
- --container-format=bare --disk-format qcow2 \
- < $image_path
-}
-
-# Restore xtrace
-$XTRACE
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/heat b/lib/heat
index c102163..a088e82 100644
--- a/lib/heat
+++ b/lib/heat
@@ -8,9 +8,7 @@
# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
# Dependencies:
-#
-# - functions
-# - dib (if HEAT_CREATE_TEST_IMAGE=True)
+# (none)
# stack.sh
# ---------
@@ -37,6 +35,13 @@
HEAT_DIR=$DEST/heat
HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
+OCC_DIR=$DEST/os-collect-config
+ORC_DIR=$DEST/os-refresh-config
+OAC_DIR=$DEST/os-apply-config
+
+HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo
+HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899}
+
HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE)
HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON)
@@ -47,10 +52,6 @@
HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
-HEAT_FUNCTIONAL_IMAGE_ELEMENTS=${HEAT_FUNCTIONAL_IMAGE_ELEMENTS:-\
-vm fedora selinux-permissive pypi os-collect-config os-refresh-config \
-os-apply-config heat-cfntools heat-config heat-config-cfn-init \
-heat-config-puppet heat-config-script}
# other default options
@@ -296,22 +297,44 @@
fi
}
-# build_heat_functional_test_image() - Build and upload functional test image
-function build_heat_functional_test_image {
- if is_service_enabled dib; then
- build_dib_pip_repo "$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
- local image_name=heat-functional-tests-image
+# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
+function build_heat_pip_mirror {
+ local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
+ local projpath proj package
- # Elements path for tripleo-image-elements and heat-templates software-config
- local elements_path=$TIE_DIR/elements:$HEAT_TEMPLATES_REPO_DIR/hot/software-config/elements
+ rm -rf $HEAT_PIP_REPO
+ mkdir -p $HEAT_PIP_REPO
- disk_image_create_upload "$image_name" "$HEAT_FUNCTIONAL_IMAGE_ELEMENTS" "$elements_path"
- iniset $TEMPEST_CONFIG orchestration image_ref $image_name
- else
- echo "Error, HEAT_CREATE_TEST_IMAGE=True requires dib" >&2
- echo "Add \"enable_service dib\" to your localrc" >&2
- exit 1
- fi
+ echo "<html><body>" > $HEAT_PIP_REPO/index.html
+ for projpath in $project_dirs; do
+ proj=$(basename $projpath)
+ mkdir -p $HEAT_PIP_REPO/$proj
+ pushd $projpath
+ rm -rf dist
+ python setup.py sdist
+ pushd dist
+ package=$(ls *)
+ mv $package $HEAT_PIP_REPO/$proj/$package
+ popd
+
+ echo "<html><body><a href=\"$package\">$package</a></body></html>" > $HEAT_PIP_REPO/$proj/index.html
+ echo "<a href=\"$proj\">$proj</a><br/>" >> $HEAT_PIP_REPO/index.html
+
+ popd
+ done
+
+ echo "</body></html>" >> $HEAT_PIP_REPO/index.html
+
+ local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+
+ sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
+ sudo sed -e "
+ s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g;
+ s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ " -i $heat_pip_repo_apache_conf
+ enable_apache_site heat_pip_repo
+ restart_apache_server
}
# Restore xtrace
diff --git a/lib/ironic b/lib/ironic
index ade889e..bc30cdb 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -121,6 +121,16 @@
IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP}
IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088}
+# NOTE(lucasagomes): This flag is used to differentiate the nodes that
+# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers
+# (which also uses IPA but depends on Swift Temp URLs to work). At present,
+# all drivers that uses the iSCSI approach for their deployment supports
+# using both, IPA or bash ramdisks for the deployment. In the future we
+# want to remove the support for the bash ramdisk in favor of IPA, once
+# we get there this flag can be removed, and all conditionals that uses
+# it should just run by default.
+IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA)
+
# get_pxe_boot_file() - Get the PXE/iPXE boot file path
function get_pxe_boot_file {
local relpath=syslinux/pxelinux.0
@@ -162,6 +172,11 @@
return 1
}
+function is_deployed_with_ipa_ramdisk {
+ is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0
+ return 1
+}
+
# install_ironic() - Collect source and prepare
function install_ironic {
# make sure all needed service were enabled
@@ -329,7 +344,11 @@
iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0"
+ local pxe_params="nofb nomodeset vga=normal console=ttyS0"
+ if is_deployed_with_ipa_ramdisk; then
+ pxe_params+=" systemd.journald.forward_to_console=yes"
+ fi
+ iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params"
fi
if is_deployed_by_agent; then
if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
@@ -344,9 +363,6 @@
iniset $IRONIC_CONF_FILE glance swift_container glance
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30
- if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE agent agent_pxe_append_params "nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes"
- fi
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
@@ -717,7 +733,7 @@
if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
# we can build them only if we're not offline
if [ "$OFFLINE" != "True" ]; then
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
else
ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
@@ -727,7 +743,7 @@
die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
fi
else
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
# download the agent image tarball
wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
diff --git a/lib/neutron b/lib/neutron
index a0f9c36..a7aabc5 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -100,8 +100,10 @@
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
+
NEUTRON_DIR=$DEST/neutron
NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
+NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -114,7 +116,6 @@
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-
export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
# Agent binaries. Note, binary paths for other agents are set in per-service
@@ -325,6 +326,12 @@
# Please refer to ``lib/neutron_plugins/README.md`` for details.
source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+# Agent loadbalancer service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
+
# Agent metering service plugin functions
# -------------------------------------------
@@ -351,17 +358,6 @@
TEMPEST_SERVICES+=,neutron
-# For backward compatibility, if q-lbaas service is enabled, make sure to load the
-# neutron-lbaas plugin. This hook should be removed in a future release, perhaps
-# as early as Liberty.
-
-if is_service_enabled q-lbaas; then
- if ! is_plugin_enabled neutron-lbaas; then
- DEPRECATED_TEXT+="External plugin neutron-lbaas has been automatically activated, please add the appropriate enable_plugin to your local.conf. This will be removed in the Liberty cycle."
- enable_plugin "neutron-lbaas" ${NEUTRON_LBAAS_REPO} ${NEUTRON_LBAAS_BRANCH}
- fi
-fi
-
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -429,7 +425,9 @@
iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT
# goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
-
+ if is_service_enabled q-lbaas; then
+ _configure_neutron_lbaas
+ fi
if is_service_enabled q-metering; then
_configure_neutron_metering
fi
@@ -607,8 +605,7 @@
recreate_database $Q_DB_NAME
# Run Neutron db migrations
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-
- for svc in fwaas vpnaas; do
+ for svc in fwaas lbaas vpnaas; do
if [ "$svc" = "vpnaas" ]; then
q_svc="q-vpn"
else
@@ -628,6 +625,10 @@
git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH
setup_develop $NEUTRON_FWAAS_DIR
fi
+ if is_service_enabled q-lbaas; then
+ git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
+ setup_develop $NEUTRON_LBAAS_DIR
+ fi
if is_service_enabled q-vpn; then
git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH
setup_develop $NEUTRON_VPNAAS_DIR
@@ -671,6 +672,10 @@
if is_service_enabled q-agt q-dhcp q-l3; then
neutron_plugin_install_agent_packages
fi
+
+ if is_service_enabled q-lbaas; then
+ neutron_agent_lbaas_install_agent_packages
+ fi
}
# Start running processes, including screen
@@ -730,6 +735,10 @@
run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
+ if is_service_enabled q-lbaas; then
+ run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ fi
+
if is_service_enabled q-metering; then
run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
fi
@@ -753,6 +762,9 @@
stop_process q-agt
+ if is_service_enabled q-lbaas; then
+ neutron_lbaas_stop
+ fi
if is_service_enabled q-fwaas; then
neutron_fwaas_stop
fi
@@ -780,11 +792,12 @@
fi
# delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
+
function _create_neutron_conf_dir {
# Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
if [[ ! -d $NEUTRON_CONF_DIR ]]; then
@@ -954,6 +967,14 @@
iniset $NEUTRON_CONF DEFAULT notification_driver messaging
}
+function _configure_neutron_lbaas {
+ if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then
+ cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR
+ fi
+ neutron_agent_lbaas_configure_common
+ neutron_agent_lbaas_configure_agent
+}
+
function _configure_neutron_metering {
neutron_agent_metering_configure_common
neutron_agent_metering_configure_agent
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
new file mode 100644
index 0000000..f465cc9
--- /dev/null
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -0,0 +1,49 @@
+# Neutron loadbalancer plugin
+# ---------------------------
+
+# Save trace setting
+LB_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
+LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin
+
+function neutron_agent_lbaas_install_agent_packages {
+ if is_ubuntu || is_fedora || is_suse; then
+ install_package haproxy
+ fi
+}
+
+function neutron_agent_lbaas_configure_common {
+ _neutron_service_plugin_class_add $LBAAS_PLUGIN
+ _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
+}
+
+function neutron_agent_lbaas_configure_agent {
+ LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
+ mkdir -p $LBAAS_AGENT_CONF_PATH
+
+ LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
+
+ cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
+
+ # ovs_use_veth needs to be set before the plugin configuration
+ # occurs to allow plugins to override the setting.
+ iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
+ neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
+
+ if is_fedora; then
+ iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
+ iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
+ fi
+}
+
+function neutron_lbaas_stop {
+ pids=$(ps aux | awk '/haproxy/ { print $2 }')
+ [ ! -z "$pids" ] && sudo kill $pids
+}
+
+# Restore xtrace
+$LB_XTRACE
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 899748c..ff22bbf 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -158,9 +158,6 @@
fi
_configure_qpid
elif is_service_enabled zeromq; then
- # NOTE(ewindisch): Redis is not strictly necessary
- # but there is a matchmaker driver that works
- # really well & out of the box for multi-node.
if is_fedora; then
install_package zeromq python-zmq
if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
@@ -243,11 +240,15 @@
local section=$3
if is_service_enabled zeromq; then
iniset $file $section rpc_backend "zmq"
- iniset $file $section rpc_zmq_matchmaker \
- oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis
- # Set MATCHMAKER_REDIS_HOST if running multi-node.
- MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
- iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
+ iniset $file $section rpc_zmq_host `hostname`
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ iniset $file $section rpc_zmq_matchmaker \
+ oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis
+ MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
+ iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
+ else
+ die $LINENO "Other matchmaker drivers not supported"
+ fi
elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
# For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
diff --git a/lib/sahara b/lib/sahara
index a84a06f..9b2e9c4 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -65,9 +65,25 @@
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local sahara_service=$(get_or_create_service "sahara" \
- "data_processing" "Sahara Data Processing")
- get_or_create_endpoint $sahara_service \
+ # TODO: remove "data_processing" service when #1356053 will be fixed
+ local sahara_service_old=$(openstack service create \
+ "data_processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ local sahara_service_new=$(openstack service create \
+ "data-processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ get_or_create_endpoint $sahara_service_old \
+ "$REGION_NAME" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ get_or_create_endpoint $sahara_service_new \
"$REGION_NAME" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
diff --git a/lib/trove b/lib/trove
index 080e860..d777983 100644
--- a/lib/trove
+++ b/lib/trove
@@ -37,6 +37,7 @@
TROVE_CONF=$TROVE_CONF_DIR/trove.conf
TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf
TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf
+TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf
TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
@@ -171,18 +172,18 @@
fi
# Set up Guest Agent conf
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_userid $RABBIT_USERID
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_host $TROVE_HOST_GATEWAY
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_password $RABBIT_PASSWORD
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_user radmin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_tenant_name trove
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /var/log/trove/
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
- setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/
+ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log
+ setup_trove_logging $TROVE_GUESTAGENT_CONF
}
# install_troveclient() - Collect source and prepare
diff --git a/stack.sh b/stack.sh
index 58b4479..bf9fc01 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1227,9 +1227,9 @@
init_heat
echo_summary "Starting Heat"
start_heat
- if [ "$HEAT_CREATE_TEST_IMAGE" = "True" ]; then
- echo_summary "Building Heat functional test image"
- build_heat_functional_test_image
+ if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then
+ echo_summary "Building Heat pip mirror"
+ build_heat_pip_mirror
fi
fi
@@ -1300,6 +1300,13 @@
service_check
+# Bash completion
+# ===============
+
+# Prepare bash completion for OSC
+openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+
+
# Fin
# ===
diff --git a/stackrc b/stackrc
index 30706eb..02b12a3 100644
--- a/stackrc
+++ b/stackrc
@@ -198,9 +198,6 @@
NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
# neutron lbaas service
-# The neutron-lbaas specific entries are deprecated and replaced by the neutron-lbaas
-# devstack plugin and should be removed in a future release, possibly as soon as Liberty.
-
NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git}
NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master}
@@ -426,14 +423,10 @@
##################
#
-# TripleO Components
+# TripleO / Heat Agent Components
#
##################
-# diskimage-builder
-DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-DIB_BRANCH=${DIB_BRANCH:-master}
-
# os-apply-config configuration template tool
OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
OAC_BRANCH=${OAC_BRANCH:-master}
@@ -446,10 +439,6 @@
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
ORC_BRANCH=${ORC_BRANCH:-master}
-# Tripleo elements for diskimage-builder images
-TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git}
-TIE_BRANCH=${TIE_BRANCH:-master}
-
#################
#
# 3rd Party Components (non pip installable)