Merge "Remove debs for diablo volumes"
diff --git a/README.md b/README.md
index 206ffe0..c5e7f55 100644
--- a/README.md
+++ b/README.md
@@ -282,7 +282,15 @@
tests can be run as follows:
$ cd /opt/stack/tempest
- $ nosetests tempest/scenario/test_network_basic_ops.py
+ $ tox -efull tempest.scenario.test_network_basic_ops
+
+By default tempest is downloaded and the config file is generated, but the
+tempest package is not installed in the system's global site-packages (the
+package install includes installing dependences). So tempest won't run
+outside of tox. If you would like to install it add the following to your
+``localrc`` section:
+
+ INSTALL_TEMPEST=True
# DevStack on Xenserver
diff --git a/clean.sh b/clean.sh
index 50d414c..ad4525b 100755
--- a/clean.sh
+++ b/clean.sh
@@ -76,6 +76,8 @@
# ==========
# Phase: clean
+run_phase clean
+
if [[ -d $TOP_DIR/extras.d ]]; then
for i in $TOP_DIR/extras.d/*.sh; do
[[ -r $i ]] && source $i clean
@@ -119,6 +121,10 @@
sudo rm -rf $SCREEN_LOGDIR
fi
+# Clean up venvs
+DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]}"
+rm -rf $DIRS_TO_CLEAN
+
# Clean up files
FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*"
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
new file mode 100644
index 0000000..0d98f4a
--- /dev/null
+++ b/doc/source/guides/nova.rst
@@ -0,0 +1,72 @@
+=================
+Nova and devstack
+=================
+
+This is a rough guide to various configuration parameters for nova
+running with devstack.
+
+
+nova-serialproxy
+================
+
+In Juno nova implemented a `spec
+<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
+to allow read/write access to the serial console of an instance via
+`nova-serialproxy
+<http://docs.openstack.org/developer/nova/man/nova-serialproxy.html>`_.
+
+The service can be enabled by adding ``n-sproxy`` to
+``ENABLED_SERVICES``. Further options can be enabled via
+``local.conf``, e.g.
+
+::
+
+ [[post-config|$NOVA_CONF]]
+ [serial_console]
+ #
+ # Options defined in nova.cmd.serialproxy
+ #
+
+ # Host on which to listen for incoming requests (string value)
+ #serialproxy_host=0.0.0.0
+
+ # Port on which to listen for incoming requests (integer
+ # value)
+ #serialproxy_port=6083
+
+
+ #
+ # Options defined in nova.console.serial
+ #
+
+ # Enable serial console related features (boolean value)
+ #enabled=false
+ # Do not set this manually. Instead enable the service as
+ # outlined above.
+
+ # Range of TCP ports to use for serial ports on compute hosts
+ # (string value)
+ #port_range=10000:20000
+
+ # Location of serial console proxy. (string value)
+ #base_url=ws://127.0.0.1:6083/
+
+ # IP address on which instance serial console should listen
+ # (string value)
+ #listen=127.0.0.1
+
+ # The address to which proxy clients (like nova-serialproxy)
+ # should connect (string value)
+ #proxyclient_address=127.0.0.1
+
+
+Enabling the service is enough to be functional for a single machine devstack.
+
+These config options are defined in `nova.console.serial
+<https://github.com/openstack/nova/blob/master/nova/console/serial.py#L33-L52>`_
+and `nova.cmd.serialproxy
+<https://github.com/openstack/nova/blob/master/nova/cmd/serialproxy.py#L26-L33>`_.
+
+For more information on OpenStack configuration see the `OpenStack
+Configuration Reference
+<http://docs.openstack.org/trunk/config-reference/content/list-of-compute-config-options.html>`_
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 855a2d6..10f4355 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -67,6 +67,7 @@
guides/multinode-lab
guides/neutron
guides/devstack-with-nested-kvm
+ guides/nova
All-In-One Single VM
--------------------
@@ -102,6 +103,11 @@
<guides/devstack-with-nested-kvm>`. With this setup, Nova instances
will be more performant than with plain QEMU emulation.
+Nova and devstack
+--------------------------------
+
+Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
+
DevStack Documentation
======================
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index ad08221..4020580 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -36,7 +36,7 @@
is_service_enabled horizon || exit 55
# can we get the front page
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3.*>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
+$CURL_GET http://$SERVICE_HOST 2>/dev/null | grep -q '<h3.*>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/sahara.sh b/exercises/sahara.sh
index 867920e..2589e28 100755
--- a/exercises/sahara.sh
+++ b/exercises/sahara.sh
@@ -35,7 +35,7 @@
is_service_enabled sahara || exit 55
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
+$CURL_GET http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/zaqar.sh b/exercises/zaqar.sh
index 6996f34..c370b12 100755
--- a/exercises/zaqar.sh
+++ b/exercises/zaqar.sh
@@ -35,7 +35,7 @@
is_service_enabled zaqar-server || exit 55
-curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
+$CURL_GET http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index bca1251..6883898 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -28,7 +28,9 @@
Require all granted
</IfVersion>
</Directory>
-
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
ErrorLog /var/log/%APACHE_NAME%/horizon_error.log
LogLevel warn
CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined
diff --git a/files/debs/devlibs b/files/debs/devlibs
new file mode 100644
index 0000000..0446ceb
--- /dev/null
+++ b/files/debs/devlibs
@@ -0,0 +1,7 @@
+libffi-dev # pyOpenSSL
+libmysqlclient-dev # MySQL-python
+libpq-dev # psycopg2
+libssl-dev # pyOpenSSL
+libxml2-dev # lxml
+libxslt1-dev # lxml
+python-dev # pyOpenSSL
diff --git a/files/debs/general b/files/debs/general
index 4050191..84d4302 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,26 +1,21 @@
bridge-utils
-pylint
screen
unzip
wget
psmisc
gcc
+g++
git
graphviz # testonly - docs
lsof # useful when debugging
openssh-server
openssl
-python-virtualenv
-python-unittest2
iputils-ping
wget
curl
tcpdump
-euca2ools # only for testing client
tar
-python-cmd2 # dist:precise
python-dev
-python-mock # testonly
python2.7
bc
libyaml-dev
diff --git a/files/debs/glance b/files/debs/glance
index 8db8145..9fda6a6 100644
--- a/files/debs/glance
+++ b/files/debs/glance
@@ -3,11 +3,4 @@
libssl-dev # testonly
libxml2-dev
libxslt1-dev # testonly
-python-eventlet
-python-routes
-python-greenlet
-python-sqlalchemy
-python-pastedeploy
-python-xattr
-python-iso8601
zlib1g-dev # testonly
diff --git a/files/debs/horizon b/files/debs/horizon
index f9b7d59..1f45b54 100644
--- a/files/debs/horizon
+++ b/files/debs/horizon
@@ -1,19 +1,3 @@
apache2 # NOPRIME
libapache2-mod-wsgi # NOPRIME
-python-beautifulsoup
-python-dateutil
-python-paste
-python-pastedeploy
-python-anyjson
-python-routes
-python-xattr
-python-sqlalchemy
-python-webob
-pylint
-python-eventlet
-python-nose
-python-mox
-python-coverage
-python-cherrypy3 # why?
-python-migrate
libpcre3-dev # pyScss
diff --git a/files/debs/keystone b/files/debs/keystone
index d316a42..70a5649 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -1,15 +1,7 @@
python-lxml
-python-pastescript
-python-pastedeploy
-python-paste
sqlite3
-python-pysqlite2
-python-sqlalchemy
python-mysqldb
python-mysql.connector
-python-webob
-python-greenlet
-python-routes
libldap2-dev
libsasl2-dev
libkrb5-dev
diff --git a/files/debs/n-api b/files/debs/n-api
index b4372d9..0928cd5 100644
--- a/files/debs/n-api
+++ b/files/debs/n-api
@@ -1,3 +1 @@
-python-dateutil
-msgpack-python
fping
diff --git a/files/debs/neutron b/files/debs/neutron
index 3f4b6d2..aa3d709 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -7,17 +7,8 @@
mysql-server #NOPRIME
sudo
postgresql-server-dev-all # testonly
-python-iso8601
-python-paste
-python-routes
-python-suds
-python-pastedeploy
-python-greenlet
-python-eventlet
-python-sqlalchemy
python-mysqldb
python-mysql.connector
-python-pyudev
python-qpid # NOPRIME
dnsmasq-base
dnsmasq-utils # for dhcp_release only available in dist:precise
diff --git a/files/debs/nova b/files/debs/nova
index 66f29c4..0c31385 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -8,7 +8,6 @@
mysql-server # NOPRIME
python-mysqldb
python-mysql.connector
-python-xattr # needed for glance which is needed for nova --- this shouldn't be here
python-lxml # needed for glance which is needed for nova --- this shouldn't be here
gawk
iptables
@@ -27,22 +26,8 @@
rabbitmq-server # NOPRIME
qpidd # NOPRIME
socat # used by ajaxterm
-python-mox
-python-paste
-python-migrate
-python-greenlet
python-libvirt # NOPRIME
python-libxml2
-python-routes
python-numpy # used by websockify for spice console
-python-pastedeploy
-python-eventlet
-python-cheetah
-python-tempita
-python-sqlalchemy
-python-suds
-python-lockfile
python-m2crypto
-python-feedparser
-python-iso8601
python-qpid # NOPRIME
diff --git a/files/debs/postgresql b/files/debs/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/debs/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/debs/ryu b/files/debs/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/debs/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/debs/swift b/files/debs/swift
index fd51699..b32b439 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -1,14 +1,7 @@
curl
memcached
-python-configobj
-python-coverage
-python-eventlet
-python-greenlet
-python-netifaces
+# NOTE python-nose only exists because of swift functional job, we should probably
+# figure out a more consistent way of installing this from test-requirements.txt instead
python-nose
-python-pastedeploy
-python-simplejson
-python-webob
-python-xattr
sqlite3
xfsprogs
diff --git a/files/debs/zaqar-server b/files/debs/zaqar-server
index 32b1017..6c2a4d1 100644
--- a/files/debs/zaqar-server
+++ b/files/debs/zaqar-server
@@ -1,5 +1,4 @@
python-pymongo
mongodb-server
pkg-config
-redis-server # NOPRIME
-python-redis # NOPRIME
\ No newline at end of file
+redis-server # NOPRIME
\ No newline at end of file
diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs
new file mode 100644
index 0000000..c923825
--- /dev/null
+++ b/files/rpms-suse/devlibs
@@ -0,0 +1,6 @@
+libffi-devel # pyOpenSSL
+libopenssl-devel # pyOpenSSL
+libxml2-devel # lxml
+libxslt-devel # lxml
+postgresql-devel # psycopg2
+python-devel # pyOpenSSL
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 63ef705..7f4bbfb 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -4,6 +4,7 @@
curl
euca2ools
gcc
+gcc-c++
git-core
graphviz # testonly - docs
iputils
diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/rpms-suse/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/rpms-suse/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/rpms/devlibs b/files/rpms/devlibs
new file mode 100644
index 0000000..834a4b6
--- /dev/null
+++ b/files/rpms/devlibs
@@ -0,0 +1,9 @@
+libffi-devel # pyOpenSSL
+libxml2-devel # lxml
+libxslt-devel # lxml
+mariadb-devel # MySQL-python f20,f21,rhel7
+mysql-devel # MySQL-python rhel6
+openssl-devel # pyOpenSSL
+postgresql-devel # psycopg2
+python-devel # pyOpenSSL
+redhat-rpm-config # MySQL-python rhbz-1195207 f21
diff --git a/files/rpms/general b/files/rpms/general
index 6f22391..56a9331 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -3,6 +3,7 @@
dbus
euca2ools # only for testing client
gcc
+gcc-c++
git-core
graphviz # testonly - docs
openssh-server
@@ -26,4 +27,5 @@
libyaml-devel
gettext # used for compiling message catalogs
net-tools
-java-1.7.0-openjdk-headless # NOPRIME
+java-1.7.0-openjdk-headless # NOPRIME rhel7,f20
+java-1.8.0-openjdk-headless # NOPRIME f21,f22
diff --git a/files/rpms/postgresql b/files/rpms/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/rpms/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/rpms/ryu b/files/rpms/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/rpms/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
new file mode 100644
index 0000000..3c50061
--- /dev/null
+++ b/files/venv-requirements.txt
@@ -0,0 +1,10 @@
+lxml
+MySQL-python
+netifaces
+numpy
+posix-ipc
+psycopg2
+pycrypto
+pyOpenSSL
+PyYAML
+xattr
diff --git a/functions b/functions
index 2f976cf..79b2b37 100644
--- a/functions
+++ b/functions
@@ -325,13 +325,15 @@
fi
}
+#Macro for curl statements. curl requires -g option for literal IPv6 addresses.
+CURL_GET="${CURL_GET:-curl -g}"
# Wait for an HTTP server to start answering requests
# wait_for_service timeout url
function wait_for_service {
local timeout=$1
local url=$2
- timeout $timeout sh -c "while ! curl -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+ timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
}
diff --git a/functions-common b/functions-common
index ecaa4d9..267dfe8 100644
--- a/functions-common
+++ b/functions-common
@@ -527,13 +527,6 @@
[[ "$(uname -m)" == "$1" ]]
}
-# Quick check for a rackspace host; n.b. rackspace provided images
-# have these Xen tools installed but a custom image may not.
-function is_rackspace {
- [ -f /usr/bin/xenstore-ls ] && \
- sudo /usr/bin/xenstore-ls vm-data | grep -q "Rackspace"
-}
-
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
# is_fedora
@@ -1608,6 +1601,25 @@
GITBRANCH[$name]=$branch
}
+# is_plugin_enabled
+#
+# Has a particular plugin been enabled?
+function is_plugin_enabled {
+ local plugins=$@
+ local plugin
+ local enabled=1
+
+ # short circuit if nothing to do
+ if [[ -z ${DEVSTACK_PLUGINS} ]]; then
+ return $enabled
+ fi
+
+ for plugin in ${plugins}; do
+ [[ ,${DEVSTACK_PLUGINS}, =~ ,${plugin}, ]] && enabled=0
+ done
+ return $enabled
+}
+
# fetch_plugins
#
# clones all plugins
diff --git a/inc/python b/inc/python
index d9451b4..dfc4d63 100644
--- a/inc/python
+++ b/inc/python
@@ -15,6 +15,13 @@
set +o xtrace
+# Global Config Variables
+
+# PROJECT_VENV contains the name of the virtual enviromnet for each
+# project. A null value installs to the system Python directories.
+declare -A PROJECT_VENV
+
+
# Python Functions
# ================
@@ -105,7 +112,6 @@
-r $test_req
fi
fi
- $xtrace
}
# get version of a package from global requirements file
diff --git a/lib/ceilometer b/lib/ceilometer
index f509788..9db0640 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -13,21 +13,16 @@
#
# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
#
-# To ensure events are stored, add the following section to local.conf:
-#
-# [[post-config|$CEILOMETER_CONF]]
-# [notification]
-# store_events=True
-#
# Several variables set in the localrc section adjust common behaviors
# of Ceilometer (see within for additional settings):
#
# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi.
# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing
# runs. Default 600.
-# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb')
+# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es')
# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided
# by tooz.
+# CEILOMETER_EVENTS: Enable event collection
# Dependencies:
@@ -80,6 +75,7 @@
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
+CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True}
CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-}
CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-}
@@ -137,8 +133,10 @@
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceilometer {
- if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
mongo ceilometer --eval "db.dropDatabase();"
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ curl -XDELETE "localhost:9200/events_*"
fi
if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
_cleanup_ceilometer_apache_wsgi
@@ -206,11 +204,21 @@
configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
+ iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS
+
if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ # es is only supported for events. we will use sql for alarming/metering.
+ iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF database event_connection es://localhost:9200
+ iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ ${TOP_DIR}/pkg/elasticsearch.sh start
+ cleanup_ceilometer
else
iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer
iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
@@ -264,7 +272,7 @@
rm -f $CEILOMETER_AUTH_CACHE_DIR/*
if is_service_enabled mysql postgresql; then
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
recreate_database ceilometer
$CEILOMETER_BIN_DIR/ceilometer-dbsync
fi
@@ -293,6 +301,11 @@
elif echo $CEILOMETER_COORDINATION_URL | grep -q '^redis:'; then
install_redis
fi
+
+ if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ ${TOP_DIR}/pkg/elasticsearch.sh download
+ ${TOP_DIR}/pkg/elasticsearch.sh install
+ fi
}
# install_ceilometerclient() - Collect source and prepare
@@ -340,7 +353,7 @@
# only die on API if it was actually intended to be turned on
if is_service_enabled ceilometer-api; then
echo "Waiting for ceilometer-api to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then
die $LINENO "ceilometer-api did not start"
fi
fi
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index d83c31a..52fc6fb 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -78,9 +78,9 @@
for pv_info in $(sudo pvs --noheadings -o name,vg_name --separator ';'); do
echo_summary "Evaluate PV info for Cinder lvm.conf: $pv_info"
- IFS=';' read pv vg <<< $pv_info
+ IFS=';' read pv vg <<< "$pv_info"
for line in ${conf_entries}; do
- IFS='=' read label group <<< $line
+ IFS='=' read label group <<< "$line"
group=$(echo $group|sed "s/^ *//g")
if [[ "$vg" == "$group" ]]; then
new="\"a$pv/\", "
diff --git a/lib/databases/mysql b/lib/databases/mysql
index c8ceec2..70073c4 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -151,6 +151,9 @@
else
exit_distro_not_supported "mysql installation"
fi
+
+ # Install Python client module
+ pip_install MySQL-python
}
function database_connection_url_mysql {
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 317e0eb..e891a08 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -100,6 +100,9 @@
else
exit_distro_not_supported "postgresql installation"
fi
+
+ # Install Python client module
+ pip_install psycopg2
}
function database_connection_url_postgresql {
diff --git a/lib/horizon b/lib/horizon
index a8e83f9..c6e3692 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -94,6 +94,7 @@
cp $HORIZON_SETTINGS $local_settings
_horizon_config_set $local_settings "" COMPRESS_OFFLINE True
+ _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
_horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
diff --git a/lib/ironic b/lib/ironic
index 7ffa6a5..ade889e 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -238,7 +238,15 @@
die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found."
fi
- cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR
+ # Copy PXE binary
+ if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+ cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR
+ else
+ # Syslinux >= 5.00 pxelinux.0 binary is not "stand-alone" anymore,
+ # it depends on some c32 modules to work correctly.
+ # More info: http://www.syslinux.org/wiki/index.php/Library_modules
+ cp -aR $(dirname $IRONIC_PXE_BOOT_IMAGE)/*.{c32,0} $IRONIC_TFTPBOOT_DIR
+ fi
}
# configure_ironic() - Set config files, create data dirs, etc
@@ -278,7 +286,18 @@
function configure_ironic_api {
iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON
- configure_auth_token_middleware $IRONIC_CONF_FILE ironic $IRONIC_AUTH_CACHE_DIR/api
+
+ # TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632).
+ # These codes need to be changed to use the function of configure_auth_token_middleware
+ # after Ironic conforms to the new auth plugin.
+ iniset $IRONIC_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
+ iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
+ iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
+ iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $IRONIC_CONF_FILE keystone_authtoken cafile $SSL_BUNDLE_FILE
+ iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api
+
iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT
iniset $IRONIC_CONF_FILE api port $IRONIC_SERVICE_PORT
diff --git a/lib/keystone b/lib/keystone
index 102d188..0968445 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -564,7 +564,7 @@
# Check that the keystone service is running. Even if the tls tunnel
# should be enabled, make sure the internal port is checked using
# unencryted traffic at this point.
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/lvm b/lib/lvm
index c183f09..39eed00 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -103,14 +103,17 @@
function init_lvm_volume_group {
local vg=$1
local size=$2
- # Start with a clean volume group
- _create_lvm_volume_group $vg $size
+ # Start the lvmetad and tgtd services
if is_fedora || is_suse; then
- # service is not started by default
+ # services is not started by default
+ start_service lvm2-lvmetad
start_service tgtd
fi
+ # Start with a clean volume group
+ _create_lvm_volume_group $vg $size
+
# Remove iscsi targets
sudo tgtadm --op show --mode target | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
diff --git a/lib/neutron b/lib/neutron
index 8d27feb..a0f9c36 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -100,10 +100,8 @@
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
-
NEUTRON_DIR=$DEST/neutron
NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -116,6 +114,7 @@
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
+
export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
# Agent binaries. Note, binary paths for other agents are set in per-service
@@ -326,12 +325,6 @@
# Please refer to ``lib/neutron_plugins/README.md`` for details.
source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
-# Agent loadbalancer service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
-
# Agent metering service plugin functions
# -------------------------------------------
@@ -358,6 +351,17 @@
TEMPEST_SERVICES+=,neutron
+# For backward compatibility, if q-lbaas service is enabled, make sure to load the
+# neutron-lbaas plugin. This hook should be removed in a future release, perhaps
+# as early as Liberty.
+
+if is_service_enabled q-lbaas; then
+ if ! is_plugin_enabled neutron-lbaas; then
+ DEPRECATED_TEXT+="External plugin neutron-lbaas has been automatically activated, please add the appropriate enable_plugin to your local.conf. This will be removed in the Liberty cycle."
+ enable_plugin "neutron-lbaas" ${NEUTRON_LBAAS_REPO} ${NEUTRON_LBAAS_BRANCH}
+ fi
+fi
+
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -425,9 +429,7 @@
iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT
# goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
- if is_service_enabled q-lbaas; then
- _configure_neutron_lbaas
- fi
+
if is_service_enabled q-metering; then
_configure_neutron_metering
fi
@@ -538,13 +540,16 @@
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE"
NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
if [[ "$IP_VERSION" =~ 4.* ]]; then
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID"
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
+ die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID"
fi
sudo ip link set $OVS_PHYSICAL_BRIDGE up
@@ -602,7 +607,8 @@
recreate_database $Q_DB_NAME
# Run Neutron db migrations
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
- for svc in fwaas lbaas vpnaas; do
+
+ for svc in fwaas vpnaas; do
if [ "$svc" = "vpnaas" ]; then
q_svc="q-vpn"
else
@@ -622,10 +628,6 @@
git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH
setup_develop $NEUTRON_FWAAS_DIR
fi
- if is_service_enabled q-lbaas; then
- git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
- setup_develop $NEUTRON_LBAAS_DIR
- fi
if is_service_enabled q-vpn; then
git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH
setup_develop $NEUTRON_VPNAAS_DIR
@@ -669,10 +671,6 @@
if is_service_enabled q-agt q-dhcp q-l3; then
neutron_plugin_install_agent_packages
fi
-
- if is_service_enabled q-lbaas; then
- neutron_agent_lbaas_install_agent_packages
- fi
}
# Start running processes, including screen
@@ -732,10 +730,6 @@
run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
- if is_service_enabled q-lbaas; then
- run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
- fi
-
if is_service_enabled q-metering; then
run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
fi
@@ -759,9 +753,6 @@
stop_process q-agt
- if is_service_enabled q-lbaas; then
- neutron_lbaas_stop
- fi
if is_service_enabled q-fwaas; then
neutron_fwaas_stop
fi
@@ -789,12 +780,11 @@
fi
# delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
-
function _create_neutron_conf_dir {
# Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
if [[ ! -d $NEUTRON_CONF_DIR ]]; then
@@ -931,7 +921,7 @@
Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
if is_service_enabled q-vpn; then
- cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+ neutron_vpn_configure_agent
fi
cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -964,14 +954,6 @@
iniset $NEUTRON_CONF DEFAULT notification_driver messaging
}
-function _configure_neutron_lbaas {
- if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then
- cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR
- fi
- neutron_agent_lbaas_configure_common
- neutron_agent_lbaas_configure_agent
-}
-
function _configure_neutron_metering {
neutron_agent_metering_configure_common
neutron_agent_metering_configure_agent
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 23ad8b2..9e72aa0 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,100 +1,4 @@
#!/bin/bash
-#
-# Neutron MidoNet plugin
-# ----------------------
-MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
-MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
-
-# Save trace setting
-MN_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function is_neutron_ovs_base_plugin {
- # MidoNet does not use l3-agent
- # 0 means True here
- return 1
-}
-
-function neutron_plugin_create_nova_conf {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
-}
-
-function neutron_plugin_install_agent_packages {
- :
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
- Q_PLUGIN_CONF_FILENAME=midonet.ini
- Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
-
- # MidoNet implements LBaaS API in the plugin, not as an LBaaS driver.
- # In this model, the plugin references the 'neutron_lbaas' module but
- # does not require starting an LBaaS service. Devstack, however, clones
- # 'neutron_lbaas' only if 'lbaas' service is enabled. To get around this,
- # always clone 'neutron_lbaas' so that it is made available to the plugin.
- # Also, discontinue if the 'lbaas' service is enabled.
- if is_service_enabled q-lbaas; then
- die $LINENO "LBaaS service should be disabled for the MidoNet plugin"
- fi
- git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
- setup_develop $NEUTRON_LBAAS_DIR
-}
-
-function neutron_plugin_configure_debug_command {
- :
-}
-
-function neutron_plugin_configure_dhcp_agent {
- DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"}
- neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE
- iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER
- iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
-}
-
-function neutron_plugin_configure_l3_agent {
- die $LINENO "q-l3 must not be executed with MidoNet plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent {
- die $LINENO "q-agt must not be executed with MidoNet plugin!"
-}
-
-function neutron_plugin_configure_service {
- if [[ "$MIDONET_API_URL" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL
- fi
- if [[ "$MIDONET_USERNAME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
- fi
- if [[ "$MIDONET_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD
- fi
- if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
- fi
-
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver
-}
-
-function has_neutron_plugin_security_group {
- # 0 means True here
- return 0
-}
-
-function neutron_plugin_check_adv_test_requirements {
- # 0 means True here
- return 1
-}
-
-# Restore xtrace
-$MN_XTRACE
+# REVISIT(devvesa): This file is intentionally left empty
+# in order to keep Q_PLUGIN=midonet work.
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
deleted file mode 100644
index f465cc9..0000000
--- a/lib/neutron_plugins/services/loadbalancer
+++ /dev/null
@@ -1,49 +0,0 @@
-# Neutron loadbalancer plugin
-# ---------------------------
-
-# Save trace setting
-LB_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
-LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin
-
-function neutron_agent_lbaas_install_agent_packages {
- if is_ubuntu || is_fedora || is_suse; then
- install_package haproxy
- fi
-}
-
-function neutron_agent_lbaas_configure_common {
- _neutron_service_plugin_class_add $LBAAS_PLUGIN
- _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
-}
-
-function neutron_agent_lbaas_configure_agent {
- LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
- mkdir -p $LBAAS_AGENT_CONF_PATH
-
- LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
-
- cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
-
- # ovs_use_veth needs to be set before the plugin configuration
- # occurs to allow plugins to override the setting.
- iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
- neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
-
- if is_fedora; then
- iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
- iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
- fi
-}
-
-function neutron_lbaas_stop {
- pids=$(ps aux | awk '/haproxy/ { print $2 }')
- [ ! -z "$pids" ] && sudo kill $pids
-}
-
-# Restore xtrace
-$LB_XTRACE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 5912eab..4d6a2bf 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -12,6 +12,13 @@
function neutron_vpn_install_agent_packages {
install_package $IPSEC_PACKAGE
+ if is_ubuntu && [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
+ sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.charon /etc/apparmor.d/disable/
+ sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.stroke /etc/apparmor.d/disable/
+ # NOTE: Due to https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/1387220
+ # one must use 'sudo start apparmor ACTION=reload' for Ubuntu 14.10
+ restart_service apparmor
+ fi
}
function neutron_vpn_configure_common {
@@ -19,6 +26,18 @@
_neutron_deploy_rootwrap_filters $NEUTRON_VPNAAS_DIR
}
+function neutron_vpn_configure_agent {
+ cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+ if [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
+ iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver
+ if is_fedora; then
+ iniset $Q_VPN_CONF_FILE strongswan default_config_area /usr/share/strongswan/templates/config/strongswan.d
+ fi
+ else
+ iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver
+ fi
+}
+
function neutron_vpn_stop {
local ipsec_data_dir=$DATA_DIR/neutron/ipsec
local pids
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 4cbedd6..b6c1c9c 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -1,147 +1,10 @@
#!/bin/bash
-#
-# Neutron VMware NSX plugin
-# -------------------------
-# Save trace setting
-NSX_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# This file is needed so Q_PLUGIN=vmware_nsx will work.
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function setup_integration_bridge {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- # Set manager to NSX controller (1st of list)
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- # Get the first controller
- controllers=(${NSX_CONTROLLERS//,/ })
- OVS_MGR_IP=${controllers[0]}
- else
- die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
- fi
- sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
-}
-
-function is_neutron_ovs_base_plugin {
- # NSX uses OVS, but not the l3-agent
- return 0
-}
-
-function neutron_plugin_create_nova_conf {
- # if n-cpu is enabled, then setup integration bridge
- if is_service_enabled n-cpu; then
- setup_integration_bridge
- fi
-}
-
-function neutron_plugin_install_agent_packages {
- # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents
- _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
- Q_PLUGIN_CONF_FILENAME=nsx.ini
- Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
-}
-
-function neutron_plugin_configure_debug_command {
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
-}
-
-function neutron_plugin_configure_dhcp_agent {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
- iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
-}
-
-function neutron_plugin_configure_l3_agent {
- # VMware NSX plugin does not run L3 agent
- die $LINENO "q-l3 should not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent {
- # VMware NSX plugin does not run L2 agent
- die $LINENO "q-agt must not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_service {
- if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
- fi
- if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
- fi
- if [[ "$FAILOVER_TIME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME
- fi
- if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS
- fi
-
- if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
- else
- die $LINENO "The VMware NSX plugin won't work without a default transport zone."
- fi
- if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
- iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network
- fi
- if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
- fi
- # NSX_CONTROLLERS must be a comma separated string
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS
- else
- die $LINENO "The VMware NSX plugin needs at least an NSX controller."
- fi
- if [[ "$NSX_USER" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER
- fi
- if [[ "$NSX_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
- fi
- if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
- fi
- if [[ "$NSX_RETRIES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES
- fi
- if [[ "$NSX_REDIRECTS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS
- fi
- if [[ "$AGENT_MODE" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE
- if [[ "$AGENT_MODE" == "agentless" ]]; then
- if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID
- else
- die $LINENO "Agentless mode requires a service cluster."
- fi
- iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP
- fi
- fi
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
function has_neutron_plugin_security_group {
# 0 means True here
return 0
}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$NSX_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx_v b/lib/neutron_plugins/vmware_nsx_v
new file mode 100644
index 0000000..3d33c65
--- /dev/null
+++ b/lib/neutron_plugins/vmware_nsx_v
@@ -0,0 +1,10 @@
+#!/bin/bash
+#
+# This file is needed so Q_PLUGIN=vmware_nsx_v will work.
+
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+ # 0 means True here
+ return 0
+}
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
deleted file mode 100644
index 2c82d48..0000000
--- a/lib/neutron_thirdparty/midonet
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-# MidoNet
-# -------
-
-# This file implements functions required to configure MidoNet as the third-party
-# system used with devstack's Neutron. To include this file, specify the following
-# variables in localrc:
-#
-# * enable_service midonet
-#
-
-# MidoNet devstack destination dir
-MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-
-# MidoNet client repo
-MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
-MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
-MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
-
-# Save trace setting
-MN3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function configure_midonet {
- :
-}
-
-function init_midonet {
- :
-}
-
-function install_midonet {
- git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
- export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH
-}
-
-function start_midonet {
- :
-}
-
-function stop_midonet {
- :
-}
-
-function check_midonet {
- :
-}
-
-# Restore xtrace
-$MN3_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 7027a29..03853a9 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,89 +1,2 @@
-#!/bin/bash
-#
-# VMware NSX
-# ----------
-
-# This third-party addition can be used to configure connectivity between a DevStack instance
-# and an NSX Gateway in dev/test environments. In order to use this correctly, the following
-# env variables need to be set (e.g. in your localrc file):
-#
-# * enable_service vmware_nsx --> to execute this third-party addition
-# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
-# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway
-# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure $PUBLIC_BRIDGE, e.g. 172.24.4.211/24
-
-# Save trace setting
-NSX3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# This is the interface that connects the Devstack instance
-# to an network that allows it to talk to the gateway for
-# testing purposes
-NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2}
-# Re-declare floating range as it's needed also in stop_vmware_nsx, which
-# is invoked by unstack.sh
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-
-function configure_vmware_nsx {
- :
-}
-
-function init_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address to set on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- # Make sure the interface is up, but not configured
- sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up
- # Save and then flush the IP addresses on the interface
- addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
- sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE
- # Use the PUBLIC Bridge to route traffic to the NSX gateway
- # NOTE(armando-migliaccio): if running in a nested environment this will work
- # only with mac learning enabled, portsecurity and security profiles disabled
- # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off
- # Try to create it anyway
- sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE
- sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE
- nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE
- for address in $addresses; do
- sudo ip addr add dev $PUBLIC_BRIDGE $address
- done
- sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR
- sudo ip link set $PUBLIC_BRIDGE up
-}
-
-function install_vmware_nsx {
- :
-}
-
-function start_vmware_nsx {
- :
-}
-
-function stop_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address expected on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE
- # Save and then flush remaining addresses on the interface
- addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'})
- sudo ip addr flush $PUBLIC_BRIDGE
- # Try to detach physical interface from PUBLIC_BRIDGE
- sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE
- # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE
- for address in $addresses; do
- sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address
- done
-}
-
-function check_vmware_nsx {
- neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
-}
-
-# Restore xtrace
-$NSX3_XTRACE
+# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
+# continues to work.
diff --git a/lib/nova b/lib/nova
index 74a3411..e9e78c7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -566,6 +566,10 @@
if is_service_enabled tls-proxy; then
iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
fi
+
+ if is_service_enabled n-sproxy; then
+ iniset $NOVA_CONF serial_console enabled True
+ fi
}
function init_nova_cells {
@@ -764,6 +768,7 @@
run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
+ run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
@@ -794,7 +799,7 @@
# Kill the nova screen windows
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
- for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+ for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj n-sproxy; do
stop_process $serv
done
}
diff --git a/lib/oslo b/lib/oslo
index 31c9d34..18cddc1 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -23,6 +23,7 @@
# Defaults
# --------
GITDIR["cliff"]=$DEST/cliff
+GITDIR["debtcollector"]=$DEST/debtcollector
GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
GITDIR["oslo.config"]=$DEST/oslo.config
GITDIR["oslo.context"]=$DEST/oslo.context
@@ -31,6 +32,7 @@
GITDIR["oslo.log"]=$DEST/oslo.log
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
+GITDIR["oslo.policy"]=$DEST/oslo.policy
GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
GITDIR["oslo.serialization"]=$DEST/oslo.serialization
GITDIR["oslo.utils"]=$DEST/oslo.utils
@@ -57,6 +59,7 @@
# install_oslo() - Collect source and prepare
function install_oslo {
_do_install_oslo_lib "cliff"
+ _do_install_oslo_lib "debtcollector"
_do_install_oslo_lib "oslo.concurrency"
_do_install_oslo_lib "oslo.config"
_do_install_oslo_lib "oslo.context"
@@ -65,6 +68,7 @@
_do_install_oslo_lib "oslo.log"
_do_install_oslo_lib "oslo.messaging"
_do_install_oslo_lib "oslo.middleware"
+ _do_install_oslo_lib "oslo.policy"
_do_install_oslo_lib "oslo.rootwrap"
_do_install_oslo_lib "oslo.serialization"
_do_install_oslo_lib "oslo.utils"
diff --git a/lib/sahara b/lib/sahara
index da4fbcd..a84a06f 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -39,7 +39,7 @@
SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
-SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,fake}
+SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake}
# Support entry points installation of console scripts
if [[ -d $SAHARA_DIR/bin ]]; then
diff --git a/lib/stack b/lib/stack
new file mode 100644
index 0000000..9a509d8
--- /dev/null
+++ b/lib/stack
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# lib/stack
+#
+# These functions are code snippets pulled out of stack.sh for easier
+# re-use by Grenade. They can assume the same environment is available
+# as in the lower part of stack.sh, namely a valid stackrc has been sourced
+# as well as all of the lib/* files for the services have been sourced.
+#
+# For clarity, all functions declared here that came from ``stack.sh``
+# shall be named with the prefix ``stack_``.
+
+
+# Generic service install handles venv creation if confgured for service
+# stack_install_service service
+function stack_install_service {
+ local service=$1
+ if type install_${service} >/dev/null 2>&1; then
+ if [[ -n ${PROJECT_VENV[$service]:-} ]]; then
+ rm -rf ${PROJECT_VENV[$service]}
+ source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]}
+ export PIP_VIRTUAL_ENV=${PROJECT_VENV[$service]:-}
+ fi
+ install_${service}
+ if [[ -n ${PROJECT_VENV[$service]:-} ]]; then
+ unset PIP_VIRTUAL_ENV
+ fi
+ fi
+}
diff --git a/lib/tempest b/lib/tempest
index d3b40aa..f856ce0 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,6 +63,12 @@
BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
+# This must be False on stable branches, as master tempest
+# deps do not match stable branch deps. Set this to True to
+# have tempest installed in devstack by default.
+INSTALL_TEMPEST=${INSTALL_TEMPEST:-"False"}
+
+
BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
BOTO_CONF=/etc/boto.cfg
@@ -94,8 +100,12 @@
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
- # install testr since its used to process tempest logs
- pip_install $(get_from_global_requirements testrepository)
+ if [[ "$INSTALL_TEMPEST" == "True" ]]; then
+ setup_develop $TEMPEST_DIR
+ else
+ # install testr since its used to process tempest logs
+ pip_install $(get_from_global_requirements testrepository)
+ fi
local image_lines
local images
@@ -517,14 +527,23 @@
if use_library_from_git "tempest-lib"; then
git_clone_by_name "tempest-lib"
setup_dev_lib "tempest-lib"
+ # NOTE(mtreinish) For testing tempest-lib from git with tempest we need
+ # put the git version of tempest-lib in the tempest job's tox venv
+ export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]}
+ setup_dev_lib "tempest-lib"
+ unset PIP_VIRTUAL_ENV
fi
}
# install_tempest() - Collect source and prepare
function install_tempest {
- install_tempest_lib
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
pip_install tox
+ pushd $TEMPEST_DIR
+ tox --notest -efull
+ PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/full
+ install_tempest_lib
+ popd
}
# init_tempest() - Initialize ec2 images
diff --git a/lib/trove b/lib/trove
index d437718..080e860 100644
--- a/lib/trove
+++ b/lib/trove
@@ -180,7 +180,7 @@
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /tmp/
+ iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /var/log/trove/
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
}
diff --git a/lib/zaqar b/lib/zaqar
index 4a24415..c9321b9 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -147,13 +147,13 @@
function configure_redis {
if is_ubuntu; then
install_package redis-server
+ pip_install redis
elif is_fedora; then
install_package redis
+ pip_install redis
else
exit_distro_not_supported "redis installation"
fi
-
- install_package python-redis
}
function configure_mongodb {
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 15e1b2b..239d6b9 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -44,14 +44,23 @@
function configure_elasticsearch {
# currently a no op
- ::
+ :
+}
+
+function _check_elasticsearch_ready {
+ # poll elasticsearch to see if it's started
+ if ! wait_for_service 30 http://localhost:9200; then
+ die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
+ fi
}
function start_elasticsearch {
if is_ubuntu; then
sudo /etc/init.d/elasticsearch start
+ _check_elasticsearch_ready
elif is_fedora; then
sudo /bin/systemctl start elasticsearch.service
+ _check_elasticsearch_ready
else
echo "Unsupported architecture...can not start elasticsearch."
fi
@@ -78,7 +87,11 @@
sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
sudo update-rc.d elasticsearch defaults 95 10
elif is_fedora; then
- is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
+ if [[ "$os_RELEASE" -ge "21" ]]; then
+ is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
+ else
+ is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
+ fi
yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
sudo /bin/systemctl daemon-reload
sudo /bin/systemctl enable elasticsearch.service
diff --git a/stack.sh b/stack.sh
index 753135b..58b4479 100755
--- a/stack.sh
+++ b/stack.sh
@@ -94,6 +94,9 @@
# Import config functions
source $TOP_DIR/lib/config
+# Import 'public' stack.sh functions
+source $TOP_DIR/lib/stack
+
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
@@ -247,8 +250,10 @@
enabled=0
gpgcheck=0
EOF
- # bare yum call due to --enablerepo
- sudo yum --enablerepo=epel-bootstrap -y install epel-release || \
+ # Enable a bootstrap repo. It is removed after finishing
+ # the epel-release installation.
+ sudo yum-config-manager --enable epel-bootstrap
+ yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue"
# epel rpm has installed it's version
sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
@@ -671,6 +676,15 @@
source $TOP_DIR/tools/fixup_stuff.sh
+# Virtual Environment
+# -------------------
+
+# Pre-build some problematic wheels
+if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
+ source $TOP_DIR/tools/build_wheels.sh
+fi
+
+
# Extras Pre-install
# ------------------
@@ -716,24 +730,16 @@
# Install middleware
install_keystonemiddleware
-# install the OpenStack client, needed for most setup commands
-if use_library_from_git "python-openstackclient"; then
- git_clone_by_name "python-openstackclient"
- setup_dev_lib "python-openstackclient"
-else
- pip_install 'python-openstackclient>=1.0.2'
-fi
-
if is_service_enabled keystone; then
if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
- install_keystone
+ stack_install_service keystone
configure_keystone
fi
fi
if is_service_enabled s-proxy; then
- install_swift
+ stack_install_service swift
configure_swift
# swift3 middleware to provide S3 emulation to Swift
@@ -747,23 +753,23 @@
if is_service_enabled g-api n-api; then
# image catalog service
- install_glance
+ stack_install_service glance
configure_glance
fi
if is_service_enabled cinder; then
- install_cinder
+ stack_install_service cinder
configure_cinder
fi
if is_service_enabled neutron; then
- install_neutron
+ stack_install_service neutron
install_neutron_third_party
fi
if is_service_enabled nova; then
# compute service
- install_nova
+ stack_install_service nova
cleanup_nova
configure_nova
fi
@@ -772,19 +778,19 @@
# django openstack_auth
install_django_openstack_auth
# dashboard
- install_horizon
+ stack_install_service horizon
configure_horizon
fi
if is_service_enabled ceilometer; then
install_ceilometerclient
- install_ceilometer
+ stack_install_service ceilometer
echo_summary "Configuring Ceilometer"
configure_ceilometer
fi
if is_service_enabled heat; then
- install_heat
+ stack_install_service heat
install_heat_other
cleanup_heat
configure_heat
@@ -798,13 +804,22 @@
# don't be naive and add to existing line!
fi
-
# Extras Install
# --------------
# Phase: install
run_phase stack install
+
+# install the OpenStack client, needed for most setup commands
+if use_library_from_git "python-openstackclient"; then
+ git_clone_by_name "python-openstackclient"
+ setup_dev_lib "python-openstackclient"
+else
+ pip_install 'python-openstackclient>=1.0.2'
+fi
+
+
if [[ $TRACK_DEPENDS = True ]]; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
diff --git a/stackrc b/stackrc
index 386c5d5..103be6d 100644
--- a/stackrc
+++ b/stackrc
@@ -43,14 +43,6 @@
# enable_service q-meta
# # Optional, to enable tempest configuration as part of devstack
# enable_service tempest
-function isset {
- local nounset=$(set +o | grep nounset)
- set +o nounset
- [[ -n "${!1+x}" ]]
- result=$?
- $nounset
- return $result
-}
# this allows us to pass ENABLED_SERVICES
if ! isset ENABLED_SERVICES ; then
@@ -112,9 +104,14 @@
source $RC_DIR/.localrc.auto
fi
+# Configure wheel cache location
+export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse}
+export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE}
+export PIP_FIND_LINKS=${PIP_FIND_LINKS:-file://$WHEELHOUSE}
+
# This can be used to turn database query logging on and off
# (currently only implemented for MySQL backend)
-DATABASE_QUERY_LOGGING=$(trueorfalse True DATABASE_QUERY_LOGGING)
+DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
@@ -201,6 +198,9 @@
NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
# neutron lbaas service
+# The neutron-lbaas specific entries are deprecated and replaced by the neutron-lbaas
+# devstack plugin and should be removed in a future release, possibly as soon as Liberty.
+
NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git}
NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master}
@@ -309,6 +309,10 @@
GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
+# debtcollector deprecation framework/helpers
+GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
+GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master}
+
# oslo.concurrency
GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
@@ -341,6 +345,10 @@
GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master}
+# oslo.policy
+GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
+GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+
# oslo.rootwrap
GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
@@ -454,10 +462,6 @@
NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
-# ryu service
-RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git}
-RYU_BRANCH=${RYU_BRANCH:-master}
-
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
SPICE_BRANCH=${SPICE_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index cce0203..472b0ea 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -29,7 +29,17 @@
fi
done
-ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient python-keystoneclient taskflow oslo.middleware pycadf python-glanceclient python-ironicclient tempest-lib oslo.messaging oslo.log cliff python-heatclient stevedore python-cinderclient glance_store oslo.concurrency oslo.db oslo.vmware keystonemiddleware oslo.serialization python-saharaclient django_openstack_auth python-openstackclient oslo.rootwrap oslo.i18n python-ceilometerclient oslo.utils python-swiftclient python-neutronclient tooz ceilometermiddleware"
+ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient"
+ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf"
+ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib"
+ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
+ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
+ALL_LIBS+=" oslo.vmware keystonemiddleware oslo.serialization"
+ALL_LIBS+=" python-saharaclient django_openstack_auth"
+ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
+ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
+ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
+ALL_LIBS+=" debtcollector"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tools/build_venv.sh b/tools/build_venv.sh
new file mode 100755
index 0000000..11d1d35
--- /dev/null
+++ b/tools/build_venv.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# **tools/build_venv.sh** - Build a Python Virtual Envirnment
+#
+# build_venv.sh venv-path [package [...]]
+#
+# Assumes:
+# - a useful pip is installed
+# - virtualenv will be installed by pip
+# - installs basic common prereq packages that require compilation
+# to allow quick copying of resulting venv as a baseline
+
+
+VENV_DEST=${1:-.venv}
+shift
+
+MORE_PACKAGES="$@"
+
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+
+ set -o errexit
+ set -o nounset
+
+ # Keep track of the devstack directory
+ TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+ FILES=$TOP_DIR/files
+
+ # Import common functions
+ source $TOP_DIR/functions
+
+ GetDistro
+
+ source $TOP_DIR/stackrc
+
+fi
+
+# Build new venv
+virtualenv $VENV_DEST
+
+# Install modern pip
+PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip
+
+# Install additional packages
+PIP_VIRTUAL_ENV=$VENV_DEST pip_install ${MORE_PACKAGES}
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
new file mode 100755
index 0000000..f1740df
--- /dev/null
+++ b/tools/build_wheels.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+#
+# **tools/build_wheels.sh** - Build a cache of Python wheels
+#
+# build_wheels.sh [package [...]]
+#
+# System package prerequisites listed in files/*/devlibs will be installed
+#
+# Builds wheels for all virtual env requirements listed in
+# ``venv-requirements.txt`` plus any supplied on the command line.
+#
+# Assumes ``tools/install_pip.sh`` has been run and a suitable pip/setuptools is available.
+
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+
+ set -o errexit
+ set -o nounset
+
+ # Keep track of the devstack directory
+ TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+ FILES=$TOP_DIR/files
+
+ # Import common functions
+ source $TOP_DIR/functions
+
+ GetDistro
+
+ source $TOP_DIR/stackrc
+
+ trap err_trap ERR
+
+fi
+
+# Get additional packages to build
+MORE_PACKAGES="$@"
+
+# Exit on any errors so that errors don't compound
+function err_trap {
+ local r=$?
+ set +o xtrace
+
+ rm -rf $TMP_VENV_PATH
+
+ exit $r
+}
+
+# Get system prereqs
+install_package $(get_packages devlibs)
+
+# Get a modern ``virtualenv``
+pip_install virtualenv
+
+# Prepare the workspace
+TMP_VENV_PATH=$(mktemp -d tmp-venv-XXXX)
+virtualenv $TMP_VENV_PATH
+
+# Install modern pip and wheel
+PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
+
+# VENV_PACKAGES is a list of packages we want to pre-install
+VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
+if [[ -r $VENV_PACKAGE_FILE ]]; then
+ VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE)
+fi
+
+for pkg in ${VENV_PACKAGES,/ } ${MORE_PACKAGES}; do
+ $TMP_VENV_PATH/bin/pip wheel $pkg
+done
+
+# Clean up wheel workspace
+rm -rf $TMP_VENV_PATH
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index cc5275f..f8edd16 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -123,3 +123,9 @@
fi
fi
+
+# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
+# connection issues under proxy, hence uninstalling python-virtualenv package
+# and installing the latest version using pip.
+uninstall_package python-virtualenv
+pip_install -U virtualenv
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 0bb49ab..88c1d09 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -9,6 +9,8 @@
# dummy in the end position to trigger the fall through case.
DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
+CIRROS_ARCHS="x86_64 i386"
+
# Extra variables to trigger getting additional images.
export ENABLED_SERVICES="h-api,tr-api"
HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda"
@@ -17,12 +19,15 @@
# Loop over all the virt drivers and collect all the possible images
ALL_IMAGES=""
for driver in $DRIVERS; do
- VIRT_DRIVER=$driver
- URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
- if [[ ! -z "$ALL_IMAGES" ]]; then
- ALL_IMAGES+=,
- fi
- ALL_IMAGES+=$URLS
+ for arch in $CIRROS_ARCHS; do
+ CIRROS_ARCH=$arch
+ VIRT_DRIVER=$driver
+ URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
+ if [[ ! -z "$ALL_IMAGES" ]]; then
+ ALL_IMAGES+=,
+ fi
+ ALL_IMAGES+=$URLS
+ done
done
# Make a nice list