Merge "Change config of keystone_authtoken in lib/ironic"
diff --git a/README.md b/README.md
index 40060a7..206ffe0 100644
--- a/README.md
+++ b/README.md
@@ -25,9 +25,9 @@
The DevStack master branch generally points to trunk versions of OpenStack
components. For older, stable versions, look for branches named
stable/[release] in the DevStack repo. For example, you can do the
-following to create a grizzly OpenStack cloud:
+following to create a juno OpenStack cloud:
- git checkout stable/grizzly
+ git checkout stable/juno
./stack.sh
You can also pick specific OpenStack project releases by setting the appropriate
diff --git a/clean.sh b/clean.sh
index 50d414c..ad4525b 100755
--- a/clean.sh
+++ b/clean.sh
@@ -76,6 +76,8 @@
# ==========
# Phase: clean
+run_phase clean
+
if [[ -d $TOP_DIR/extras.d ]]; then
for i in $TOP_DIR/extras.d/*.sh; do
[[ -r $i ]] && source $i clean
@@ -119,6 +121,10 @@
sudo rm -rf $SCREEN_LOGDIR
fi
+# Clean up venvs
+DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]}"
+rm -rf $DIRS_TO_CLEAN
+
# Clean up files
FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*"
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index a449f49..d3b491f 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -80,7 +80,7 @@
Q: But, but, can't I test on OS/X?
A: Yes, even you, core developer who complained about this, needs to
install bash 4 via homebrew to keep running tests on OS/X. Get a Real
- Operating System. (For most of you who don't know, I am refering to
+ Operating System. (For most of you who don't know, I am referring to
myself.)
Operation and Configuration
@@ -118,13 +118,13 @@
::
[[local|localrc]]
- GLANCE_BRANCH=stable/grizzly
- HORIZON_BRANCH=stable/grizzly
- KEYSTONE_BRANCH=stable/grizzly
- NOVA_BRANCH=stable/grizzly
- GLANCE_BRANCH=stable/grizzly
- NEUTRON_BRANCH=stable/grizzly
- SWIFT_BRANCH=1.10.0
+ GLANCE_BRANCH=stable/juno
+ HORIZON_BRANCH=stable/juno
+ KEYSTONE_BRANCH=stable/juno
+ NOVA_BRANCH=stable/juno
+ GLANCE_BRANCH=stable/juno
+ NEUTRON_BRANCH=stable/juno
+ SWIFT_BRANCH=2.2.1
Q: Why not use [STRIKEOUT:``tools/pip-requires``]\ ``requirements.txt`` to grab project dependencies?
[STRIKEOUT:The majority of deployments will use packages to install
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
new file mode 100644
index 0000000..0d98f4a
--- /dev/null
+++ b/doc/source/guides/nova.rst
@@ -0,0 +1,72 @@
+=================
+Nova and devstack
+=================
+
+This is a rough guide to various configuration parameters for nova
+running with devstack.
+
+
+nova-serialproxy
+================
+
+In Juno nova implemented a `spec
+<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
+to allow read/write access to the serial console of an instance via
+`nova-serialproxy
+<http://docs.openstack.org/developer/nova/man/nova-serialproxy.html>`_.
+
+The service can be enabled by adding ``n-sproxy`` to
+``ENABLED_SERVICES``. Further options can be enabled via
+``local.conf``, e.g.
+
+::
+
+ [[post-config|$NOVA_CONF]]
+ [serial_console]
+ #
+ # Options defined in nova.cmd.serialproxy
+ #
+
+ # Host on which to listen for incoming requests (string value)
+ #serialproxy_host=0.0.0.0
+
+ # Port on which to listen for incoming requests (integer
+ # value)
+ #serialproxy_port=6083
+
+
+ #
+ # Options defined in nova.console.serial
+ #
+
+ # Enable serial console related features (boolean value)
+ #enabled=false
+ # Do not set this manually. Instead enable the service as
+ # outlined above.
+
+ # Range of TCP ports to use for serial ports on compute hosts
+ # (string value)
+ #port_range=10000:20000
+
+ # Location of serial console proxy. (string value)
+ #base_url=ws://127.0.0.1:6083/
+
+ # IP address on which instance serial console should listen
+ # (string value)
+ #listen=127.0.0.1
+
+ # The address to which proxy clients (like nova-serialproxy)
+ # should connect (string value)
+ #proxyclient_address=127.0.0.1
+
+
+Enabling the service is enough to be functional for a single machine devstack.
+
+These config options are defined in `nova.console.serial
+<https://github.com/openstack/nova/blob/master/nova/console/serial.py#L33-L52>`_
+and `nova.cmd.serialproxy
+<https://github.com/openstack/nova/blob/master/nova/cmd/serialproxy.py#L26-L33>`_.
+
+For more information on OpenStack configuration see the `OpenStack
+Configuration Reference
+<http://docs.openstack.org/trunk/config-reference/content/list-of-compute-config-options.html>`_
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 855a2d6..10f4355 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -67,6 +67,7 @@
guides/multinode-lab
guides/neutron
guides/devstack-with-nested-kvm
+ guides/nova
All-In-One Single VM
--------------------
@@ -102,6 +103,11 @@
<guides/devstack-with-nested-kvm>`. With this setup, Nova instances
will be more performant than with plain QEMU emulation.
+Nova and devstack
+--------------------------------
+
+Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
+
DevStack Documentation
======================
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index bca1251..6883898 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -28,7 +28,9 @@
Require all granted
</IfVersion>
</Directory>
-
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
ErrorLog /var/log/%APACHE_NAME%/horizon_error.log
LogLevel warn
CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined
diff --git a/files/debs/devlibs b/files/debs/devlibs
new file mode 100644
index 0000000..0446ceb
--- /dev/null
+++ b/files/debs/devlibs
@@ -0,0 +1,7 @@
+libffi-dev # pyOpenSSL
+libmysqlclient-dev # MySQL-python
+libpq-dev # psycopg2
+libssl-dev # pyOpenSSL
+libxml2-dev # lxml
+libxslt1-dev # lxml
+python-dev # pyOpenSSL
diff --git a/files/debs/general b/files/debs/general
index 4050191..84d4302 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,26 +1,21 @@
bridge-utils
-pylint
screen
unzip
wget
psmisc
gcc
+g++
git
graphviz # testonly - docs
lsof # useful when debugging
openssh-server
openssl
-python-virtualenv
-python-unittest2
iputils-ping
wget
curl
tcpdump
-euca2ools # only for testing client
tar
-python-cmd2 # dist:precise
python-dev
-python-mock # testonly
python2.7
bc
libyaml-dev
diff --git a/files/debs/glance b/files/debs/glance
index 8db8145..9fda6a6 100644
--- a/files/debs/glance
+++ b/files/debs/glance
@@ -3,11 +3,4 @@
libssl-dev # testonly
libxml2-dev
libxslt1-dev # testonly
-python-eventlet
-python-routes
-python-greenlet
-python-sqlalchemy
-python-pastedeploy
-python-xattr
-python-iso8601
zlib1g-dev # testonly
diff --git a/files/debs/horizon b/files/debs/horizon
index f9b7d59..1f45b54 100644
--- a/files/debs/horizon
+++ b/files/debs/horizon
@@ -1,19 +1,3 @@
apache2 # NOPRIME
libapache2-mod-wsgi # NOPRIME
-python-beautifulsoup
-python-dateutil
-python-paste
-python-pastedeploy
-python-anyjson
-python-routes
-python-xattr
-python-sqlalchemy
-python-webob
-pylint
-python-eventlet
-python-nose
-python-mox
-python-coverage
-python-cherrypy3 # why?
-python-migrate
libpcre3-dev # pyScss
diff --git a/files/debs/keystone b/files/debs/keystone
index d316a42..70a5649 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -1,15 +1,7 @@
python-lxml
-python-pastescript
-python-pastedeploy
-python-paste
sqlite3
-python-pysqlite2
-python-sqlalchemy
python-mysqldb
python-mysql.connector
-python-webob
-python-greenlet
-python-routes
libldap2-dev
libsasl2-dev
libkrb5-dev
diff --git a/files/debs/n-api b/files/debs/n-api
index b4372d9..0928cd5 100644
--- a/files/debs/n-api
+++ b/files/debs/n-api
@@ -1,3 +1 @@
-python-dateutil
-msgpack-python
fping
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index a82304d..534b1c1 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,3 +1,4 @@
+qemu-utils
# Stuff for diablo volumes
lvm2
open-iscsi
diff --git a/files/debs/neutron b/files/debs/neutron
index 3f4b6d2..aa3d709 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -7,17 +7,8 @@
mysql-server #NOPRIME
sudo
postgresql-server-dev-all # testonly
-python-iso8601
-python-paste
-python-routes
-python-suds
-python-pastedeploy
-python-greenlet
-python-eventlet
-python-sqlalchemy
python-mysqldb
python-mysql.connector
-python-pyudev
python-qpid # NOPRIME
dnsmasq-base
dnsmasq-utils # for dhcp_release only available in dist:precise
diff --git a/files/debs/nova b/files/debs/nova
index 66f29c4..0c31385 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -8,7 +8,6 @@
mysql-server # NOPRIME
python-mysqldb
python-mysql.connector
-python-xattr # needed for glance which is needed for nova --- this shouldn't be here
python-lxml # needed for glance which is needed for nova --- this shouldn't be here
gawk
iptables
@@ -27,22 +26,8 @@
rabbitmq-server # NOPRIME
qpidd # NOPRIME
socat # used by ajaxterm
-python-mox
-python-paste
-python-migrate
-python-greenlet
python-libvirt # NOPRIME
python-libxml2
-python-routes
python-numpy # used by websockify for spice console
-python-pastedeploy
-python-eventlet
-python-cheetah
-python-tempita
-python-sqlalchemy
-python-suds
-python-lockfile
python-m2crypto
-python-feedparser
-python-iso8601
python-qpid # NOPRIME
diff --git a/files/debs/postgresql b/files/debs/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/debs/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/debs/ryu b/files/debs/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/debs/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/debs/swift b/files/debs/swift
index fd51699..b32b439 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -1,14 +1,7 @@
curl
memcached
-python-configobj
-python-coverage
-python-eventlet
-python-greenlet
-python-netifaces
+# NOTE python-nose only exists because of swift functional job, we should probably
+# figure out a more consistent way of installing this from test-requirements.txt instead
python-nose
-python-pastedeploy
-python-simplejson
-python-webob
-python-xattr
sqlite3
xfsprogs
diff --git a/files/debs/zaqar-server b/files/debs/zaqar-server
index 32b1017..6c2a4d1 100644
--- a/files/debs/zaqar-server
+++ b/files/debs/zaqar-server
@@ -1,5 +1,4 @@
python-pymongo
mongodb-server
pkg-config
-redis-server # NOPRIME
-python-redis # NOPRIME
\ No newline at end of file
+redis-server # NOPRIME
\ No newline at end of file
diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs
new file mode 100644
index 0000000..c923825
--- /dev/null
+++ b/files/rpms-suse/devlibs
@@ -0,0 +1,6 @@
+libffi-devel # pyOpenSSL
+libopenssl-devel # pyOpenSSL
+libxml2-devel # lxml
+libxslt-devel # lxml
+postgresql-devel # psycopg2
+python-devel # pyOpenSSL
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 63ef705..7f4bbfb 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -4,6 +4,7 @@
curl
euca2ools
gcc
+gcc-c++
git-core
graphviz # testonly - docs
iputils
diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/rpms-suse/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/rpms-suse/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/rpms/devlibs b/files/rpms/devlibs
new file mode 100644
index 0000000..834a4b6
--- /dev/null
+++ b/files/rpms/devlibs
@@ -0,0 +1,9 @@
+libffi-devel # pyOpenSSL
+libxml2-devel # lxml
+libxslt-devel # lxml
+mariadb-devel # MySQL-python f20,f21,rhel7
+mysql-devel # MySQL-python rhel6
+openssl-devel # pyOpenSSL
+postgresql-devel # psycopg2
+python-devel # pyOpenSSL
+redhat-rpm-config # MySQL-python rhbz-1195207 f21
diff --git a/files/rpms/general b/files/rpms/general
index 6f22391..56a9331 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -3,6 +3,7 @@
dbus
euca2ools # only for testing client
gcc
+gcc-c++
git-core
graphviz # testonly - docs
openssh-server
@@ -26,4 +27,5 @@
libyaml-devel
gettext # used for compiling message catalogs
net-tools
-java-1.7.0-openjdk-headless # NOPRIME
+java-1.7.0-openjdk-headless # NOPRIME rhel7,f20
+java-1.8.0-openjdk-headless # NOPRIME f21,f22
diff --git a/files/rpms/postgresql b/files/rpms/postgresql
deleted file mode 100644
index bf19d39..0000000
--- a/files/rpms/postgresql
+++ /dev/null
@@ -1 +0,0 @@
-python-psycopg2
diff --git a/files/rpms/ryu b/files/rpms/ryu
deleted file mode 100644
index 354c1b7..0000000
--- a/files/rpms/ryu
+++ /dev/null
@@ -1 +0,0 @@
-python-eventlet
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
new file mode 100644
index 0000000..3c50061
--- /dev/null
+++ b/files/venv-requirements.txt
@@ -0,0 +1,10 @@
+lxml
+MySQL-python
+netifaces
+numpy
+posix-ipc
+psycopg2
+pycrypto
+pyOpenSSL
+PyYAML
+xattr
diff --git a/functions-common b/functions-common
index 6beb670..df69cba 100644
--- a/functions-common
+++ b/functions-common
@@ -527,13 +527,6 @@
[[ "$(uname -m)" == "$1" ]]
}
-# Quick check for a rackspace host; n.b. rackspace provided images
-# have these Xen tools installed but a custom image may not.
-function is_rackspace {
- [ -f /usr/bin/xenstore-ls ] && \
- sudo /usr/bin/xenstore-ls vm-data | grep -q "Rackspace"
-}
-
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
# is_fedora
@@ -1817,7 +1810,6 @@
[[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
[[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
[[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
- [[ ${service} == key-* && ${ENABLED_SERVICES} =~ "key" ]] && enabled=0
done
$xtrace
return $enabled
diff --git a/inc/python b/inc/python
index 0348cb3..dfc4d63 100644
--- a/inc/python
+++ b/inc/python
@@ -15,6 +15,13 @@
set +o xtrace
+# Global Config Variables
+
+# PROJECT_VENV contains the name of the virtual enviromnet for each
+# project. A null value installs to the system Python directories.
+declare -A PROJECT_VENV
+
+
# Python Functions
# ================
@@ -31,6 +38,13 @@
# Get the path to the direcotry where python executables are installed.
# get_python_exec_prefix
function get_python_exec_prefix {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ if [[ -z "$os_PACKAGE" ]]; then
+ GetOSVersion
+ fi
+ $xtrace
+
if is_fedora || is_suse; then
echo "/usr/bin"
else
@@ -39,8 +53,8 @@
}
# Wrapper for ``pip install`` to set cache and proxy environment variables
-# Uses globals ``INSTALL_TESTONLY_PACKAGES``, ``OFFLINE``, ``TRACK_DEPENDS``,
-# ``*_proxy``
+# Uses globals ``INSTALL_TESTONLY_PACKAGES``, ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
+# ``TRACK_DEPENDS``, ``*_proxy``
# pip_install package [package ...]
function pip_install {
local xtrace=$(set +o | grep xtrace)
@@ -62,8 +76,13 @@
local cmd_pip=$DEST/.venv/bin/pip
local sudo_pip="env"
else
- local cmd_pip=$(get_pip_command)
- local sudo_pip="sudo -H"
+ if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
+ local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
+ local sudo_pip="env"
+ else
+ local cmd_pip=$(get_pip_command)
+ local sudo_pip="sudo -H"
+ fi
fi
local pip_version=$(python -c "import pip; \
@@ -95,6 +114,17 @@
fi
}
+# get version of a package from global requirements file
+# get_from_global_requirements <package>
+function get_from_global_requirements {
+ local package=$1
+ local required_pkg=$(grep -h ${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+ if [[ $required_pkg == "" ]]; then
+ die $LINENO "Can't find package $package in requirements"
+ fi
+ echo $required_pkg
+}
+
# should we use this library from their git repo, or should we let it
# get pulled in via pip dependencies.
function use_library_from_git {
diff --git a/lib/ceilometer b/lib/ceilometer
index 698e8b0..f509788 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -57,6 +57,7 @@
# Set up default directories
GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient
+GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
CEILOMETER_DIR=$DEST/ceilometer
CEILOMETER_CONF_DIR=/etc/ceilometer
@@ -108,7 +109,7 @@
# Ceilometer
if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
- create_service_user "ceilometer"
+ create_service_user "ceilometer" "admin"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
local ceilometer_service=$(get_or_create_service "ceilometer" \
@@ -303,6 +304,14 @@
fi
}
+# install_ceilometermiddleware() - Collect source and prepare
+function install_ceilometermiddleware {
+ if use_library_from_git "ceilometermiddleware"; then
+ git_clone_by_name "ceilometermiddleware"
+ setup_dev_lib "ceilometermiddleware"
+ fi
+}
+
# start_ceilometer() - Start running processes, including screen
function start_ceilometer {
run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index d83c31a..52fc6fb 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -78,9 +78,9 @@
for pv_info in $(sudo pvs --noheadings -o name,vg_name --separator ';'); do
echo_summary "Evaluate PV info for Cinder lvm.conf: $pv_info"
- IFS=';' read pv vg <<< $pv_info
+ IFS=';' read pv vg <<< "$pv_info"
for line in ${conf_entries}; do
- IFS='=' read label group <<< $line
+ IFS='=' read label group <<< "$line"
group=$(echo $group|sed "s/^ *//g")
if [[ "$vg" == "$group" ]]; then
new="\"a$pv/\", "
diff --git a/lib/databases/mysql b/lib/databases/mysql
index c8ceec2..70073c4 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -151,6 +151,9 @@
else
exit_distro_not_supported "mysql installation"
fi
+
+ # Install Python client module
+ pip_install MySQL-python
}
function database_connection_url_mysql {
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 317e0eb..e891a08 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -100,6 +100,9 @@
else
exit_distro_not_supported "postgresql installation"
fi
+
+ # Install Python client module
+ pip_install psycopg2
}
function database_connection_url_postgresql {
diff --git a/lib/horizon b/lib/horizon
index 122d516..a8e83f9 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -182,8 +182,7 @@
# NOTE: It can be moved to common functions, but it is only used by compilation
# of django_openstack_auth catalogs at the moment.
function _prepare_message_catalog_compilation {
- local babel_package=$(grep ^Babel $REQUIREMENTS_DIR/global-requirements.txt)
- pip_install "$babel_package"
+ pip_install $(get_from_global_requirements Babel)
}
diff --git a/lib/keystone b/lib/keystone
index 2da2d1b..102d188 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -111,8 +111,17 @@
KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3
KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3
+
# Functions
# ---------
+
+# Test if Keystone is enabled
+# is_keystone_enabled
+function is_keystone_enabled {
+ [[ ,${ENABLED_SERVICES}, =~ ,"key", ]] && return 0
+ return 1
+}
+
# cleanup_keystone() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_keystone {
@@ -576,9 +585,6 @@
stop_process key
}
-function is_keystone_enabled {
- return is_service_enabled key
-}
# Restore xtrace
$XTRACE
diff --git a/lib/lvm b/lib/lvm
index ed24487..39eed00 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -86,7 +86,7 @@
local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX
if ! sudo vgs $vg; then
# Only create if the file doesn't already exists
- [[ -f $DATA_DIR/$backing_file ]] || truncate -s $size $backing_file
+ [[ -f $backing_file ]] || truncate -s $size $backing_file
local vg_dev=`sudo losetup -f --show $backing_file`
# Only create volume group if it doesn't already exist
@@ -103,14 +103,17 @@
function init_lvm_volume_group {
local vg=$1
local size=$2
- # Start with a clean volume group
- _create_lvm_volume_group $vg $size
+ # Start the lvmetad and tgtd services
if is_fedora || is_suse; then
- # service is not started by default
+ # services is not started by default
+ start_service lvm2-lvmetad
start_service tgtd
fi
+ # Start with a clean volume group
+ _create_lvm_volume_group $vg $size
+
# Remove iscsi targets
sudo tgtadm --op show --mode target | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
diff --git a/lib/neutron b/lib/neutron
index 15a5f00..3804e05 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -538,13 +538,16 @@
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE"
NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
if [[ "$IP_VERSION" =~ 4.* ]]; then
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID"
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
+ die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID"
fi
sudo ip link set $OVS_PHYSICAL_BRIDGE up
@@ -552,7 +555,7 @@
sudo ip link set $PUBLIC_INTERFACE up
else
NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
- die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $TENANT_ID"
if [[ "$IP_VERSION" =~ 4.* ]]; then
# Create IPv4 private subnet
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 23ad8b2..9e72aa0 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,100 +1,4 @@
#!/bin/bash
-#
-# Neutron MidoNet plugin
-# ----------------------
-MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
-MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
-
-# Save trace setting
-MN_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function is_neutron_ovs_base_plugin {
- # MidoNet does not use l3-agent
- # 0 means True here
- return 1
-}
-
-function neutron_plugin_create_nova_conf {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
-}
-
-function neutron_plugin_install_agent_packages {
- :
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
- Q_PLUGIN_CONF_FILENAME=midonet.ini
- Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
-
- # MidoNet implements LBaaS API in the plugin, not as an LBaaS driver.
- # In this model, the plugin references the 'neutron_lbaas' module but
- # does not require starting an LBaaS service. Devstack, however, clones
- # 'neutron_lbaas' only if 'lbaas' service is enabled. To get around this,
- # always clone 'neutron_lbaas' so that it is made available to the plugin.
- # Also, discontinue if the 'lbaas' service is enabled.
- if is_service_enabled q-lbaas; then
- die $LINENO "LBaaS service should be disabled for the MidoNet plugin"
- fi
- git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
- setup_develop $NEUTRON_LBAAS_DIR
-}
-
-function neutron_plugin_configure_debug_command {
- :
-}
-
-function neutron_plugin_configure_dhcp_agent {
- DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"}
- neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE
- iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER
- iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
-}
-
-function neutron_plugin_configure_l3_agent {
- die $LINENO "q-l3 must not be executed with MidoNet plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent {
- die $LINENO "q-agt must not be executed with MidoNet plugin!"
-}
-
-function neutron_plugin_configure_service {
- if [[ "$MIDONET_API_URL" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL
- fi
- if [[ "$MIDONET_USERNAME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
- fi
- if [[ "$MIDONET_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD
- fi
- if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
- fi
-
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver
-}
-
-function has_neutron_plugin_security_group {
- # 0 means True here
- return 0
-}
-
-function neutron_plugin_check_adv_test_requirements {
- # 0 means True here
- return 1
-}
-
-# Restore xtrace
-$MN_XTRACE
+# REVISIT(devvesa): This file is intentionally left empty
+# in order to keep Q_PLUGIN=midonet work.
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
index d38fbeb..0bc9bff 100644
--- a/lib/neutron_plugins/ofagent_agent
+++ b/lib/neutron_plugins/ofagent_agent
@@ -1,100 +1,4 @@
#!/bin/bash
-#
-# OpenFlow Agent plugin
-# ----------------------
-# Save trace setting
-OFA_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-source $TOP_DIR/lib/neutron_thirdparty/ryu # for RYU_DIR, install_ryu, etc
-
-function neutron_plugin_create_nova_conf {
- _neutron_ovs_base_configure_nova_vif_driver
-}
-
-function neutron_plugin_install_agent_packages {
- _neutron_ovs_base_install_agent_packages
-
- # This agent uses ryu to talk with switches
- install_package $(get_packages "ryu")
- install_ryu
-}
-
-function neutron_plugin_configure_debug_command {
- _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_configure_dhcp_agent {
- iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function neutron_plugin_configure_l3_agent {
- _neutron_ovs_base_configure_l3_agent
- iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function _neutron_ofagent_configure_firewall_driver {
- if [[ "$Q_USE_SECGROUP" == "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
- else
- iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
- fi
-}
-
-function neutron_plugin_configure_plugin_agent {
- # Set up integration bridge
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- _neutron_ofagent_configure_firewall_driver
-
- # Check a supported openflow version
- OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2`
- if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then
- die $LINENO "This agent requires OpenFlow 1.3+ capable switch."
- fi
-
- # Enable tunnel networks if selected
- if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then
- # Verify tunnels are supported
- # REVISIT - also check kernel module support for GRE and patch ports
- OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"`
- if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then
- die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
- fi
- iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
- fi
-
- # Setup physical network bridge mappings. Override
- # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
- # complex physical network configurations.
- if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
- OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
-
- # Configure bridge manually with physical interface as port for multi-node
- sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
- fi
- if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
- fi
- if [[ "$OFAGENT_PHYSICAL_INTERFACE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE agent physical_interface_mappings \
- $OFAGENT_PHYSICAL_INTERFACE_MAPPINGS
- fi
- AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent"
-
- iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
- iniset $conf_file DEFAULT ovs_use_veth True
-}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$OFA_XTRACE
+# REVISIT(yamamoto): This file is intentionally left empty
+# in order to keep Q_AGENT=ofagent_agent work.
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
deleted file mode 100644
index 2c82d48..0000000
--- a/lib/neutron_thirdparty/midonet
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-# MidoNet
-# -------
-
-# This file implements functions required to configure MidoNet as the third-party
-# system used with devstack's Neutron. To include this file, specify the following
-# variables in localrc:
-#
-# * enable_service midonet
-#
-
-# MidoNet devstack destination dir
-MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-
-# MidoNet client repo
-MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
-MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
-MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
-
-# Save trace setting
-MN3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function configure_midonet {
- :
-}
-
-function init_midonet {
- :
-}
-
-function install_midonet {
- git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
- export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH
-}
-
-function start_midonet {
- :
-}
-
-function stop_midonet {
- :
-}
-
-function check_midonet {
- :
-}
-
-# Restore xtrace
-$MN3_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
deleted file mode 100644
index 1f78a21..0000000
--- a/lib/neutron_thirdparty/ryu
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#
-# Ryu SDN Framework
-# -----------------
-
-# Used by ofagent.
-# TODO(yamamoto): Switch to pip_install once the development was settled
-
-# Save trace setting
-RYU3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-RYU_DIR=$DEST/ryu
-
-# Make this function idempotent and avoid cloning same repo many times
-# with RECLONE=yes
-_RYU_INSTALLED=${_RYU_INSTALLED:-False}
-function install_ryu {
- if [[ "$_RYU_INSTALLED" == "False" ]]; then
- git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
- export PYTHONPATH=$RYU_DIR:$PYTHONPATH
- pip_install $(cat $RYU_DIR/tools/pip-requires)
- _RYU_INSTALLED=True
- fi
-}
-
-# Restore xtrace
-$RYU3_XTRACE
diff --git a/lib/nova b/lib/nova
index 74a3411..e9e78c7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -566,6 +566,10 @@
if is_service_enabled tls-proxy; then
iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
fi
+
+ if is_service_enabled n-sproxy; then
+ iniset $NOVA_CONF serial_console enabled True
+ fi
}
function init_nova_cells {
@@ -764,6 +768,7 @@
run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
+ run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
@@ -794,7 +799,7 @@
# Kill the nova screen windows
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
- for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+ for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj n-sproxy; do
stop_process $serv
done
}
diff --git a/lib/oslo b/lib/oslo
index 31c9d34..18cddc1 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -23,6 +23,7 @@
# Defaults
# --------
GITDIR["cliff"]=$DEST/cliff
+GITDIR["debtcollector"]=$DEST/debtcollector
GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
GITDIR["oslo.config"]=$DEST/oslo.config
GITDIR["oslo.context"]=$DEST/oslo.context
@@ -31,6 +32,7 @@
GITDIR["oslo.log"]=$DEST/oslo.log
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
+GITDIR["oslo.policy"]=$DEST/oslo.policy
GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
GITDIR["oslo.serialization"]=$DEST/oslo.serialization
GITDIR["oslo.utils"]=$DEST/oslo.utils
@@ -57,6 +59,7 @@
# install_oslo() - Collect source and prepare
function install_oslo {
_do_install_oslo_lib "cliff"
+ _do_install_oslo_lib "debtcollector"
_do_install_oslo_lib "oslo.concurrency"
_do_install_oslo_lib "oslo.config"
_do_install_oslo_lib "oslo.context"
@@ -65,6 +68,7 @@
_do_install_oslo_lib "oslo.log"
_do_install_oslo_lib "oslo.messaging"
_do_install_oslo_lib "oslo.middleware"
+ _do_install_oslo_lib "oslo.policy"
_do_install_oslo_lib "oslo.rootwrap"
_do_install_oslo_lib "oslo.serialization"
_do_install_oslo_lib "oslo.utils"
diff --git a/lib/sahara b/lib/sahara
index da4fbcd..a84a06f 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -39,7 +39,7 @@
SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
-SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,fake}
+SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake}
# Support entry points installation of console scripts
if [[ -d $SAHARA_DIR/bin ]]; then
diff --git a/lib/stack b/lib/stack
new file mode 100644
index 0000000..9a509d8
--- /dev/null
+++ b/lib/stack
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# lib/stack
+#
+# These functions are code snippets pulled out of stack.sh for easier
+# re-use by Grenade. They can assume the same environment is available
+# as in the lower part of stack.sh, namely a valid stackrc has been sourced
+# as well as all of the lib/* files for the services have been sourced.
+#
+# For clarity, all functions declared here that came from ``stack.sh``
+# shall be named with the prefix ``stack_``.
+
+
+# Generic service install handles venv creation if confgured for service
+# stack_install_service service
+function stack_install_service {
+ local service=$1
+ if type install_${service} >/dev/null 2>&1; then
+ if [[ -n ${PROJECT_VENV[$service]:-} ]]; then
+ rm -rf ${PROJECT_VENV[$service]}
+ source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]}
+ export PIP_VIRTUAL_ENV=${PROJECT_VENV[$service]:-}
+ fi
+ install_${service}
+ if [[ -n ${PROJECT_VENV[$service]:-} ]]; then
+ unset PIP_VIRTUAL_ENV
+ fi
+ fi
+}
diff --git a/lib/swift b/lib/swift
index 56baa12..8a96615 100644
--- a/lib/swift
+++ b/lib/swift
@@ -393,7 +393,7 @@
swift_pipeline+=" swift3 s3token "
fi
- if is_service_enabled key;then
+ if is_service_enabled keystone; then
swift_pipeline+=" authtoken keystoneauth"
fi
swift_pipeline+=" tempauth "
@@ -498,7 +498,7 @@
iniset ${testfile} func_test password4 testing4
iniset ${testfile} func_test domain4 swift_test
- if is_service_enabled key;then
+ if is_service_enabled keystone; then
iniuncomment ${testfile} func_test auth_version
local auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
diff --git a/lib/tempest b/lib/tempest
index 8396a78..6177ffe 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -95,7 +95,8 @@
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
# install testr since its used to process tempest logs
- pip_install `grep -h testrepository $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1`
+ pip_install $(get_from_global_requirements testrepository)
+
local image_lines
local images
local num_images
@@ -516,14 +517,23 @@
if use_library_from_git "tempest-lib"; then
git_clone_by_name "tempest-lib"
setup_dev_lib "tempest-lib"
+ # NOTE(mtreinish) For testing tempest-lib from git with tempest we need
+ # put the git version of tempest-lib in the tempest job's tox venv
+ export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]}
+ setup_dev_lib "tempest-lib"
+ unset PIP_VIRTUAL_ENV
fi
}
# install_tempest() - Collect source and prepare
function install_tempest {
- install_tempest_lib
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
pip_install tox
+ pushd $TEMPEST_DIR
+ tox --notest -efull
+ PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/full
+ install_tempest_lib
+ popd
}
# init_tempest() - Initialize ec2 images
diff --git a/lib/trove b/lib/trove
index e1b307a..d437718 100644
--- a/lib/trove
+++ b/lib/trove
@@ -34,7 +34,13 @@
TROVE_DIR=$DEST/trove
TROVE_CONF_DIR=/etc/trove
+TROVE_CONF=$TROVE_CONF_DIR/trove.conf
+TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf
+TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf
+TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
+
TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
+TROVE_LOCAL_API_PASTE_INI=$TROVE_LOCAL_CONF_DIR/api-paste.ini
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"}
TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.5"}
@@ -46,6 +52,7 @@
else
TROVE_BIN_DIR=$(get_python_exec_prefix)
fi
+TROVE_MANAGE=$TROVE_BIN_DIR/trove-manage
# Tell Tempest this project is present
TEMPEST_SERVICES+=,trove
@@ -119,48 +126,48 @@
sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR}
# Copy api-paste file over to the trove conf dir
- cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini
+ cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI
# (Re)create trove conf files
- rm -f $TROVE_CONF_DIR/trove.conf
- rm -f $TROVE_CONF_DIR/trove-taskmanager.conf
- rm -f $TROVE_CONF_DIR/trove-conductor.conf
+ rm -f $TROVE_CONF
+ rm -f $TROVE_TASKMANAGER_CONF
+ rm -f $TROVE_CONDUCTOR_CONF
- iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_userid $RABBIT_USERID
- iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD
- iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove`
- iniset $TROVE_CONF_DIR/trove.conf DEFAULT default_datastore $TROVE_DATASTORE_TYPE
- setup_trove_logging $TROVE_CONF_DIR/trove.conf
- iniset $TROVE_CONF_DIR/trove.conf DEFAULT trove_api_workers "$API_WORKERS"
+ iniset $TROVE_CONF DEFAULT rabbit_userid $RABBIT_USERID
+ iniset $TROVE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $TROVE_CONF DEFAULT sql_connection `database_connection_url trove`
+ iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE
+ setup_trove_logging $TROVE_CONF
+ iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS"
- configure_auth_token_middleware $TROVE_CONF_DIR/trove.conf trove $TROVE_AUTH_CACHE_DIR
+ configure_auth_token_middleware $TROVE_CONF trove $TROVE_AUTH_CACHE_DIR
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_userid $RABBIT_USERID
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove`
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
- iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
- setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_userid $RABBIT_USERID
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT sql_connection `database_connection_url trove`
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ setup_trove_logging $TROVE_TASKMANAGER_CONF
fi
# (Re)create trove conductor conf file if needed
if is_service_enabled tr-cond; then
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT rabbit_userid $RABBIT_USERID
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT rabbit_password $RABBIT_PASSWORD
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT sql_connection `database_connection_url trove`
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_user radmin
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_tenant_name trove
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
- iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove
- setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_userid $RABBIT_USERID
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT sql_connection `database_connection_url trove`
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_user radmin
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_tenant_name trove
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove
+ setup_trove_logging $TROVE_CONDUCTOR_CONF
fi
# Set up Guest Agent conf
@@ -197,7 +204,7 @@
recreate_database trove
# Initialize the trove database
- $TROVE_BIN_DIR/trove-manage db_sync
+ $TROVE_MANAGE db_sync
# If no guest image is specified, skip remaining setup
[ -z "$TROVE_GUEST_IMAGE_URL" ] && return 0
@@ -214,19 +221,19 @@
fi
# Now that we have the guest image id, initialize appropriate datastores / datastore versions
- $TROVE_BIN_DIR/trove-manage datastore_update "$TROVE_DATASTORE_TYPE" ""
- $TROVE_BIN_DIR/trove-manage datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \
+ $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" ""
+ $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \
"$TROVE_GUEST_IMAGE_ID" "$TROVE_DATASTORE_PACKAGE" 1
- $TROVE_BIN_DIR/trove-manage datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "inactive_manager" "$TROVE_GUEST_IMAGE_ID" "" 0
- $TROVE_BIN_DIR/trove-manage datastore_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION"
- $TROVE_BIN_DIR/trove-manage datastore_update "Inactive_Datastore" ""
+ $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "inactive_manager" "$TROVE_GUEST_IMAGE_ID" "" 0
+ $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION"
+ $TROVE_MANAGE datastore_update "Inactive_Datastore" ""
}
# start_trove() - Start running processes, including screen
function start_trove {
- run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug"
- run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug"
- run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug"
+ run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug"
+ run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_TASKMANAGER_CONF --debug"
+ run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONDUCTOR_CONF --debug"
}
# stop_trove() - Stop running processes
diff --git a/lib/zaqar b/lib/zaqar
index 4a24415..c9321b9 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -147,13 +147,13 @@
function configure_redis {
if is_ubuntu; then
install_package redis-server
+ pip_install redis
elif is_fedora; then
install_package redis
+ pip_install redis
else
exit_distro_not_supported "redis installation"
fi
-
- install_package python-redis
}
function configure_mongodb {
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 15e1b2b..447596a 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -44,7 +44,7 @@
function configure_elasticsearch {
# currently a no op
- ::
+ :
}
function start_elasticsearch {
@@ -78,7 +78,11 @@
sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
sudo update-rc.d elasticsearch defaults 95 10
elif is_fedora; then
- is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
+ if [[ "$os_RELEASE" -ge "21" ]]; then
+ is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
+ else
+ is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
+ fi
yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
sudo /bin/systemctl daemon-reload
sudo /bin/systemctl enable elasticsearch.service
diff --git a/stack.sh b/stack.sh
index 43cb991..44a0743 100755
--- a/stack.sh
+++ b/stack.sh
@@ -94,6 +94,9 @@
# Import config functions
source $TOP_DIR/lib/config
+# Import 'public' stack.sh functions
+source $TOP_DIR/lib/stack
+
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
@@ -601,7 +604,7 @@
# Keystone
-if is_service_enabled key; then
+if is_service_enabled keystone; then
# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
# just a string and is not a 'real' Keystone token.
read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
@@ -671,6 +674,15 @@
source $TOP_DIR/tools/fixup_stuff.sh
+# Virtual Environment
+# -------------------
+
+# Pre-build some problematic wheels
+if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
+ source $TOP_DIR/tools/build_wheels.sh
+fi
+
+
# Extras Pre-install
# ------------------
@@ -716,24 +728,16 @@
# Install middleware
install_keystonemiddleware
-# install the OpenStack client, needed for most setup commands
-if use_library_from_git "python-openstackclient"; then
- git_clone_by_name "python-openstackclient"
- setup_dev_lib "python-openstackclient"
-else
- pip_install 'python-openstackclient>=1.0.2'
-fi
-
-if is_service_enabled key; then
+if is_service_enabled keystone; then
if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
- install_keystone
+ stack_install_service keystone
configure_keystone
fi
fi
if is_service_enabled s-proxy; then
- install_swift
+ stack_install_service swift
configure_swift
# swift3 middleware to provide S3 emulation to Swift
@@ -747,23 +751,23 @@
if is_service_enabled g-api n-api; then
# image catalog service
- install_glance
+ stack_install_service glance
configure_glance
fi
if is_service_enabled cinder; then
- install_cinder
+ stack_install_service cinder
configure_cinder
fi
if is_service_enabled neutron; then
- install_neutron
+ stack_install_service neutron
install_neutron_third_party
fi
if is_service_enabled nova; then
# compute service
- install_nova
+ stack_install_service nova
cleanup_nova
configure_nova
fi
@@ -772,19 +776,19 @@
# django openstack_auth
install_django_openstack_auth
# dashboard
- install_horizon
+ stack_install_service horizon
configure_horizon
fi
if is_service_enabled ceilometer; then
install_ceilometerclient
- install_ceilometer
+ stack_install_service ceilometer
echo_summary "Configuring Ceilometer"
configure_ceilometer
fi
if is_service_enabled heat; then
- install_heat
+ stack_install_service heat
install_heat_other
cleanup_heat
configure_heat
@@ -798,13 +802,22 @@
# don't be naive and add to existing line!
fi
-
# Extras Install
# --------------
# Phase: install
run_phase stack install
+
+# install the OpenStack client, needed for most setup commands
+if use_library_from_git "python-openstackclient"; then
+ git_clone_by_name "python-openstackclient"
+ setup_dev_lib "python-openstackclient"
+else
+ pip_install 'python-openstackclient>=1.0.2'
+fi
+
+
if [[ $TRACK_DEPENDS = True ]]; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
@@ -918,7 +931,7 @@
# Keystone
# --------
-if is_service_enabled key; then
+if is_service_enabled keystone; then
echo_summary "Starting Keystone"
if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
@@ -1143,7 +1156,7 @@
fi
# Create an access key and secret key for nova ec2 register image
-if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
+if is_service_enabled keystone && is_service_enabled swift3 && is_service_enabled nova; then
eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret)
iniset $NOVA_CONF DEFAULT s3_access_key "$access"
iniset $NOVA_CONF DEFAULT s3_secret_key "$secret"
@@ -1226,7 +1239,7 @@
# This step also creates certificates for tenants and users,
# which is helpful in image bundle steps.
-if is_service_enabled nova && is_service_enabled key; then
+if is_service_enabled nova && is_service_enabled keystone; then
USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
if [ -f $SSL_BUNDLE_FILE ]; then
@@ -1314,7 +1327,7 @@
fi
# If Keystone is present you can point ``nova`` cli to this server
-if is_service_enabled key; then
+if is_service_enabled keystone; then
echo "Keystone is serving at $KEYSTONE_SERVICE_URI/v2.0/"
echo "Examples on using novaclient command line is in exercise.sh"
echo "The default users are: admin and demo"
diff --git a/stackrc b/stackrc
index 7bbde99..cb044b8 100644
--- a/stackrc
+++ b/stackrc
@@ -43,14 +43,6 @@
# enable_service q-meta
# # Optional, to enable tempest configuration as part of devstack
# enable_service tempest
-function isset {
- local nounset=$(set +o | grep nounset)
- set +o nounset
- [[ -n "${!1+x}" ]]
- result=$?
- $nounset
- return $result
-}
# this allows us to pass ENABLED_SERVICES
if ! isset ENABLED_SERVICES ; then
@@ -112,9 +104,14 @@
source $RC_DIR/.localrc.auto
fi
+# Configure wheel cache location
+export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse}
+export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE}
+export PIP_FIND_LINKS=${PIP_FIND_LINKS:-file://$WHEELHOUSE}
+
# This can be used to turn database query logging on and off
# (currently only implemented for MySQL backend)
-DATABASE_QUERY_LOGGING=$(trueorfalse True DATABASE_QUERY_LOGGING)
+DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
@@ -164,7 +161,7 @@
#
##############
-# metering service
+# telemetry service
CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master}
@@ -309,6 +306,10 @@
GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
+# debtcollector deprecation framework/helpers
+GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
+GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master}
+
# oslo.concurrency
GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
@@ -341,6 +342,10 @@
GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master}
+# oslo.policy
+GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
+GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+
# oslo.rootwrap
GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
@@ -407,6 +412,10 @@
SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
+# ceilometer middleware
+GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
+GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master}
+
##################
#
@@ -450,10 +459,6 @@
NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
-# ryu service
-RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git}
-RYU_BRANCH=${RYU_BRANCH:-master}
-
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
SPICE_BRANCH=${SPICE_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 6e1b515..472b0ea 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -29,7 +29,17 @@
fi
done
-ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient python-keystoneclient taskflow oslo.middleware pycadf python-glanceclient python-ironicclient tempest-lib oslo.messaging oslo.log cliff python-heatclient stevedore python-cinderclient glance_store oslo.concurrency oslo.db oslo.vmware keystonemiddleware oslo.serialization python-saharaclient django_openstack_auth python-openstackclient oslo.rootwrap oslo.i18n python-ceilometerclient oslo.utils python-swiftclient python-neutronclient tooz"
+ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient"
+ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf"
+ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib"
+ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
+ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
+ALL_LIBS+=" oslo.vmware keystonemiddleware oslo.serialization"
+ALL_LIBS+=" python-saharaclient django_openstack_auth"
+ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
+ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
+ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
+ALL_LIBS+=" debtcollector"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tools/build_venv.sh b/tools/build_venv.sh
new file mode 100755
index 0000000..11d1d35
--- /dev/null
+++ b/tools/build_venv.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# **tools/build_venv.sh** - Build a Python Virtual Envirnment
+#
+# build_venv.sh venv-path [package [...]]
+#
+# Assumes:
+# - a useful pip is installed
+# - virtualenv will be installed by pip
+# - installs basic common prereq packages that require compilation
+# to allow quick copying of resulting venv as a baseline
+
+
+VENV_DEST=${1:-.venv}
+shift
+
+MORE_PACKAGES="$@"
+
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+
+ set -o errexit
+ set -o nounset
+
+ # Keep track of the devstack directory
+ TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+ FILES=$TOP_DIR/files
+
+ # Import common functions
+ source $TOP_DIR/functions
+
+ GetDistro
+
+ source $TOP_DIR/stackrc
+
+fi
+
+# Build new venv
+virtualenv $VENV_DEST
+
+# Install modern pip
+PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip
+
+# Install additional packages
+PIP_VIRTUAL_ENV=$VENV_DEST pip_install ${MORE_PACKAGES}
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
new file mode 100755
index 0000000..31398f9
--- /dev/null
+++ b/tools/build_wheels.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+#
+# **tools/build_wheels.sh** - Build a cache of Python wheels
+#
+# build_wheels.sh [package [...]]
+#
+# System package prerequisites listed in files/*/devlibs will be installed
+#
+# Builds wheels for all virtual env requirements listed in
+# ``venv-requirements.txt`` plus any supplied on the command line.
+#
+# Assumes ``tools/install_pip.sh`` has been run and a suitable pip/setuptools is available.
+
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+
+ set -o errexit
+ set -o nounset
+
+ # Keep track of the devstack directory
+ TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+ FILES=$TOP_DIR/files
+
+ # Import common functions
+ source $TOP_DIR/functions
+
+ GetDistro
+
+ source $TOP_DIR/stackrc
+
+ trap err_trap ERR
+
+fi
+
+# Get additional packages to build
+MORE_PACKAGES="$@"
+
+# Exit on any errors so that errors don't compound
+function err_trap {
+ local r=$?
+ set +o xtrace
+
+ rm -rf $TMP_VENV_PATH
+
+ exit $r
+}
+
+# Get system prereqs
+install_package $(get_packages devlibs)
+
+# Get a modern ``virtualenv``
+pip_install virtualenv
+
+# Prepare the workspace
+TMP_VENV_PATH=$(mktemp -d tmp-venv-XXXX)
+virtualenv $TMP_VENV_PATH
+
+# Install modern pip and wheel
+$TMP_VENV_PATH/bin/pip install -U pip wheel
+
+# VENV_PACKAGES is a list of packages we want to pre-install
+VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
+if [[ -r $VENV_PACKAGE_FILE ]]; then
+ VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE)
+fi
+
+for pkg in ${VENV_PACKAGES,/ } ${MORE_PACKAGES}; do
+ $TMP_VENV_PATH/bin/pip wheel $pkg
+done
+
+# Clean up wheel workspace
+rm -rf $TMP_VENV_PATH
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index cc5275f..f8edd16 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -123,3 +123,9 @@
fi
fi
+
+# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
+# connection issues under proxy, hence uninstalling python-virtualenv package
+# and installing the latest version using pip.
+uninstall_package python-virtualenv
+pip_install -U virtualenv
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 0bb49ab..88c1d09 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -9,6 +9,8 @@
# dummy in the end position to trigger the fall through case.
DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
+CIRROS_ARCHS="x86_64 i386"
+
# Extra variables to trigger getting additional images.
export ENABLED_SERVICES="h-api,tr-api"
HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda"
@@ -17,12 +19,15 @@
# Loop over all the virt drivers and collect all the possible images
ALL_IMAGES=""
for driver in $DRIVERS; do
- VIRT_DRIVER=$driver
- URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
- if [[ ! -z "$ALL_IMAGES" ]]; then
- ALL_IMAGES+=,
- fi
- ALL_IMAGES+=$URLS
+ for arch in $CIRROS_ARCHS; do
+ CIRROS_ARCH=$arch
+ VIRT_DRIVER=$driver
+ URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
+ if [[ ! -z "$ALL_IMAGES" ]]; then
+ ALL_IMAGES+=,
+ fi
+ ALL_IMAGES+=$URLS
+ done
done
# Make a nice list
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index f28ae97..082c27e 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -149,12 +149,10 @@
function wait_for_VM_to_halt {
set +x
- echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:"
+ echo "Waiting for the VM to halt. Progress in-VM can be checked with XenCenter or xl console:"
mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
domid=$(get_domid "$GUEST_NAME")
- sleep 20 # Wait for the vnc-port to be written
- port=$(xenstore-read /local/domain/$domid/console/vnc-port)
- echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
+ echo "ssh root@$mgmt_ip \"xl console $domid\""
while true; do
state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
if [ -n "$state" ]; then
diff --git a/unstack.sh b/unstack.sh
index 6deeba2..a6aeec5 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -112,7 +112,7 @@
stop_glance
fi
-if is_service_enabled key; then
+if is_service_enabled keystone; then
stop_keystone
fi