Merge "Fix iniset and his friends"
diff --git a/.gitignore b/.gitignore
index 17cb38c..f9e2644 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,5 @@
files/images
stack-screenrc
*.pem
+accrc
+.stackenv
diff --git a/AUTHORS b/AUTHORS
index cd0acac..ba68e32 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,6 +1,7 @@
Aaron Lee <aaron.lee@rackspace.com>
Aaron Rosen <arosen@nicira.com>
Adam Gandelman <adamg@canonical.com>
+Akihiro MOTOKI <motoki@da.jp.nec.com>
Andrew Laski <andrew.laski@rackspace.com>
Andy Smith <github@anarkystic.com>
Anthony Young <sleepsonthefloor@gmail.com>
diff --git a/HACKING.rst b/HACKING.rst
index e8f90c7..6ad8c7e 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -15,6 +15,16 @@
tracks the OpenStack trunk branches a separate branch is maintained for all
OpenStack releases starting with Diablo (stable/diablo).
+Contributing code to DevStack follows the usual OpenStack process as described
+in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__
+contains the usual links for blueprints, bugs, tec.
+
+__ contribute_
+.. _contribute: http://wiki.openstack.org/HowToContribute.
+
+__ lp_
+.. _lp: https://launchpad.net/~devstack
+
The primary script in DevStack is ``stack.sh``, which performs the bulk of the
work for DevStack's use cases. There is a subscript ``functions`` that contains
generally useful shell functions and is used by a number of the scripts in
@@ -53,8 +63,8 @@
source $TOP_DIR/openrc
``stack.sh`` is a rather large monolithic script that flows through from beginning
-to end. The process of breaking it down into project-level sub-scripts has begun
-with the introduction of ``lib/cinder`` and ``lib/ceilometer``.
+to end. The process of breaking it down into project-level sub-scripts is nearly
+complete and should make ``stack.sh`` easier to read and manage.
These library sub-scripts have a number of fixed entry points, some of which may
just be stubs. These entry points will be called by ``stack.sh`` in the
@@ -71,6 +81,36 @@
service sub-scripts. The comments in ``<>`` are meta comments describing
how to use the template and should be removed.
+In order to show the dependencies and conditions under which project functions
+are executed the top-level conditional testing for things like ``is_service_enabled``
+should be done in ``stack.sh``. There may be nested conditionals that need
+to be in the sub-script, such as testing for keystone being enabled in
+``configure_swift()``.
+
+
+stackrc
+-------
+
+``stackrc`` is the global configuration file for DevStack. It is responsible for
+calling ``localrc`` if it exists so configuration can be overridden by the user.
+
+The criteria for what belongs in ``stackrc`` can be vaguely summarized as
+follows:
+
+* All project respositories and branches (for historical reasons)
+* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR``
+* Global service configuration like ``ENABLED_SERVICES``
+* Variables used by multiple services that do not have a clear owner, i.e.
+ ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME``
+ (nova-network and quantum)
+* Variables that can not be cleanly declared in a project file due to
+ dependency ordering, i.e. the order of sourcing the project files can
+ not be changed for other reasons but the earlier file needs to dereference a
+ variable set in the later file. This should be rare.
+
+Also, variable declarations in ``stackrc`` do NOT allow overriding (the form
+``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc``
+and can stay in the project file.
Documentation
-------------
diff --git a/exercise.sh b/exercise.sh
index a0349ce..5b3c56e 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -28,7 +28,7 @@
# Loop over each possible script (by basename)
for script in $basenames; do
- if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
+ if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then
skips="$skips $script"
else
echo "====================================================================="
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 982653e..76df254 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -165,8 +165,11 @@
euca-terminate-instances $INSTANCE || \
die "Failure terminating instance $INSTANCE"
-# Assure it has terminated within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then
+# Assure it has terminated within a reasonable time. The behaviour of this
+# case changed with bug/836978. Requesting the status of an invalid instance
+# will now return an error message including the instance id, so we need to
+# filter that out.
+if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then
echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
exit 1
fi
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index 493e223..bc33fe8 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -1,10 +1,9 @@
#!/usr/bin/env bash
#
-# **quantum.sh**
+# **quantum-adv-test.sh**
-# We will use this test to perform integration testing of nova and
-# other components with Quantum.
+# Perform integration testing of Nova and other components with Quantum.
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
@@ -14,6 +13,7 @@
# only the first error that occured.
set -o errtrace
+
trap failed ERR
failed() {
local r=$?
@@ -30,17 +30,8 @@
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
-#------------------------------------------------------------------------------
-# Quantum config check
-#------------------------------------------------------------------------------
-# Warn if quantum is not enabled
-if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then
- echo "WARNING: Running quantum test without enabling quantum"
-fi
-
-#------------------------------------------------------------------------------
# Environment
-#------------------------------------------------------------------------------
+# -----------
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
@@ -62,9 +53,8 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
-#------------------------------------------------------------------------------
-# Test settings for quantum
-#------------------------------------------------------------------------------
+# Quantum Settings
+# ----------------
TENANTS="DEMO1"
# TODO (nati)_Test public network
@@ -106,24 +96,17 @@
DEMO1_ROUTER1_NET="demo1-net1"
DEMO2_ROUTER1_NET="demo2-net1"
-#------------------------------------------------------------------------------
-# Keystone settings.
-#------------------------------------------------------------------------------
KEYSTONE="keystone"
-#------------------------------------------------------------------------------
-# Get a token for clients that don't support service catalog
-#------------------------------------------------------------------------------
-
-# manually create a token by querying keystone (sending JSON data). Keystone
+# Manually create a token by querying keystone (sending JSON data). Keystone
# returns a token and catalog of endpoints. We use python to parse the token
# and save it.
TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'`
-#------------------------------------------------------------------------------
-# Various functions.
-#------------------------------------------------------------------------------
+# Various functions
+# -----------------
+
function foreach_tenant {
COMMAND=$1
for TENANT in ${TENANTS//,/ };do
@@ -192,10 +175,9 @@
function confirm_server_active {
local VM_UUID=$1
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server '$VM_UUID' did not become active!"
- false
-fi
-
+ echo "server '$VM_UUID' did not become active!"
+ false
+ fi
}
function add_tenant {
@@ -214,23 +196,15 @@
function remove_tenant {
local TENANT=$1
local TENANT_ID=$(get_tenant_id $TENANT)
-
$KEYSTONE tenant-delete $TENANT_ID
}
function remove_user {
local USER=$1
local USER_ID=$(get_user_id $USER)
-
$KEYSTONE user-delete $USER_ID
}
-
-
-#------------------------------------------------------------------------------
-# "Create" functions
-#------------------------------------------------------------------------------
-
function create_tenants {
source $TOP_DIR/openrc admin admin
add_tenant demo1 demo1 demo1
@@ -383,9 +357,9 @@
delete_all
}
-#------------------------------------------------------------------------------
-# Test functions.
-#------------------------------------------------------------------------------
+# Test functions
+# --------------
+
function test_functions {
IMAGE=$(get_image_id)
echo $IMAGE
@@ -400,9 +374,9 @@
echo $NETWORK_ID
}
-#------------------------------------------------------------------------------
-# Usage and main.
-#------------------------------------------------------------------------------
+# Usage and main
+# --------------
+
usage() {
echo "$0: [-h]"
echo " -h, --help Display help message"
@@ -473,10 +447,9 @@
fi
}
+# Kick off script
+# ---------------
-#-------------------------------------------------------------------------------
-# Kick off script.
-#-------------------------------------------------------------------------------
echo $*
main $*
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 48a976e..5c5e0e4 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -192,7 +192,7 @@
# Delete volume
start_time=`date +%s`
cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
echo "Volume $VOL_NAME not deleted"
exit 1
fi
diff --git a/files/apts/baremetal b/files/apts/baremetal
new file mode 100644
index 0000000..54e76e0
--- /dev/null
+++ b/files/apts/baremetal
@@ -0,0 +1,9 @@
+busybox
+dnsmasq
+gcc
+ipmitool
+make
+open-iscsi
+qemu-kvm
+syslinux
+tgt
diff --git a/files/apts/general b/files/apts/general
index 12a92e0..0264066 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -6,7 +6,7 @@
unzip
wget
psmisc
-git-core
+git
lsof # useful when debugging
openssh-server
vim-nox
diff --git a/files/apts/nova b/files/apts/nova
index c16a708..b7d1e92 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -16,6 +16,7 @@
sudo
kvm
libvirt-bin # NOPRIME
+libjs-jquery-tablesorter # Needed for coverage html reports
vlan
curl
rabbitmq-server # NOPRIME
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 71a8e5e..4c76c9b 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -5,7 +5,6 @@
# Tenant User Roles
# ------------------------------------------------------------------
# service glance admin
-# service quantum admin # if enabled
# service swift admin # if enabled
# service heat admin # if enabled
# service ceilometer admin # if enabled
@@ -148,30 +147,6 @@
fi
fi
-if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
- QUANTUM_USER=$(get_id keystone user-create \
- --name=quantum \
- --pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=quantum@example.com)
- keystone user-role-add \
- --tenant_id $SERVICE_TENANT \
- --user_id $QUANTUM_USER \
- --role_id $ADMIN_ROLE
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- QUANTUM_SERVICE=$(get_id keystone service-create \
- --name=quantum \
- --type=network \
- --description="Quantum Service")
- keystone endpoint-create \
- --region RegionOne \
- --service_id $QUANTUM_SERVICE \
- --publicurl "http://$SERVICE_HOST:9696/" \
- --adminurl "http://$SERVICE_HOST:9696/" \
- --internalurl "http://$SERVICE_HOST:9696/"
- fi
-fi
-
if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then
CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \
--pass="$SERVICE_PASSWORD" \
diff --git a/functions b/functions
index 3bf0655..80e1796 100644
--- a/functions
+++ b/functions
@@ -73,7 +73,6 @@
set +o xtrace
local evar=$1; shift
if ! is_set $evar || [ $exitcode != 0 ]; then
- set +o xtrace
echo $@
exit -1
fi
@@ -660,10 +659,8 @@
if [[ "$os_PACKAGE" = "deb" ]]; then
dpkg -l "$@" > /dev/null
- return $?
elif [[ "$os_PACKAGE" = "rpm" ]]; then
rpm --quiet -q "$@"
- return $?
else
exit_distro_not_supported "finding if a package is installed"
fi
@@ -674,10 +671,7 @@
# is_set env-var
function is_set() {
local var=\$"$1"
- if eval "[ -z \"$var\" ]"; then
- return 1
- fi
- return 0
+ eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
}
@@ -983,11 +977,9 @@
if [[ -z "$DATABASE_BACKENDS" ]]; then
# The backends haven't initialized yet, just save the selection for now
DATABASE_TYPE=$1
- return
+ else
+ use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
fi
- use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0
- ret=$?
- return $ret
}
# Toggle enable/disable_service for services that must run exclusive of each other
@@ -1006,6 +998,14 @@
return 0
}
+# Wait for an HTTP server to start answering requests
+# wait_for_service timeout url
+function wait_for_service() {
+ local timeout=$1
+ local url=$2
+ timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done"
+}
+
# Wrapper for ``yum`` to set proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy`
# yum_install package [package ...]
@@ -1127,9 +1127,9 @@
# get_pip_command
function get_pip_command() {
if is_fedora; then
- echo "/usr/bin/pip-python"
+ which pip-python
else
- echo "/usr/bin/pip"
+ which pip
fi
}
@@ -1143,7 +1143,6 @@
# Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is
# not in openSUSE either right now.
( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) )
- return $?
}
# Restore xtrace
diff --git a/lib/baremetal b/lib/baremetal
new file mode 100644
index 0000000..112fd6d
--- /dev/null
+++ b/lib/baremetal
@@ -0,0 +1,435 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+# This file provides devstack with the environment and utilities to
+# control nova-compute's baremetal driver.
+# It sets reasonable defaults to run within a single host,
+# using virtual machines in place of physical hardware.
+# However, by changing just a few options, devstack+baremetal can in fact
+# control physical hardware resources on the same network, if you know
+# the MAC address(es) and IPMI credentials.
+#
+# At a minimum, to enable the baremetal driver, you must set these in loclarc:
+# VIRT_DRIVER=baremetal
+# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal"
+#
+#
+# We utilize diskimage-builder to create a ramdisk, and then
+# baremetal driver uses that to push a disk image onto the node(s).
+#
+# Below we define various defaults which control the behavior of the
+# baremetal compute service, and inform it of the hardware it will contorl.
+#
+# Below that, various functions are defined, which are called by devstack
+# in the following order:
+#
+# before nova-cpu starts:
+# - prepare_baremetal_toolchain
+# - configure_baremetal_nova_dirs
+#
+# after nova and glance have started:
+# - build_and_upload_baremetal_deploy_k_and_r $token
+# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
+# - upload_baremetal_image $url $token
+# - add_baremetal_node <first_mac> <second_mac>
+
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Sub-driver settings
+# -------------------
+
+# sub-driver to use for kernel deployment
+# - nova.virt.baremetal.pxe.PXE
+# - nova.virt.baremetal.tilera.TILERA
+BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE}
+
+# sub-driver to use for remote power management
+# - nova.virt.baremetal.fake.FakePowerManager, for manual power control
+# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI
+# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
+BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager}
+
+
+# These should be customized to your environment and hardware
+# -----------------------------------------------------------
+
+# whether to create a fake environment, eg. for devstack-gate
+BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV`
+
+# Extra options to pass to bm_poseur
+# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1
+# change the virtualization type: --engine qemu
+BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
+
+# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
+if [ "$BM_USE_FAKE_ENV" ]; then
+ BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
+ BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
+else
+ BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
+ # if testing on a physical network,
+ # BM_DNSMASQ_RANGE must be changed to suit your network
+ BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
+fi
+
+# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot.
+# This is passed to dnsmasq along with the kernel/ramdisk to
+# deploy via PXE.
+BM_FIRST_MAC=${BM_FIRST_MAC:-}
+
+# BM_SECOND_MAC is only important if the host has >1 NIC.
+BM_SECOND_MAC=${BM_SECOND_MAC:-}
+
+# Hostname for the baremetal nova-compute node, if not run on this host
+BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)}
+
+# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI
+BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0}
+BM_PM_USER=${BM_PM_USER:-user}
+BM_PM_PASS=${BM_PM_PASS:-pass}
+
+# BM_FLAVOR_* options are arbitrary and not necessarily related to physical
+# hardware capacity. These can be changed if you are testing
+# BaremetalHostManager with multiple nodes and different flavors.
+BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64}
+BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1}
+BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024}
+BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10}
+BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0}
+BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1}
+BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small}
+BM_FLAVOR_ID=${BM_FLAVOR_ID:-11}
+BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH}
+
+
+# Below this, we set some path and filenames.
+# Defaults are probably sufficient.
+BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
+BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur}
+
+BM_HOST_CURRENT_KERNEL=$(uname -r)
+BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd}
+BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz}
+
+# If you need to add any extra flavors to the deploy ramdisk image
+# eg, specific network drivers, specify them here
+BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-}
+
+# set URL and version for google shell-in-a-box
+BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz}
+
+
+# Functions
+# ---------
+
+# Check if baremetal is properly enabled
+# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES
+# does not contain "baremetal"
+function is_baremetal() {
+ if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then
+ return 0
+ fi
+ return 1
+}
+
+# Install diskimage-builder and shell-in-a-box
+# so that we can build the deployment kernel & ramdisk
+function prepare_baremetal_toolchain() {
+ git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+ git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
+
+ local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
+ if [[ ! -e $DEST/$shellinabox_basename ]]; then
+ cd $DEST
+ wget $BM_SHELL_IN_A_BOX
+ fi
+ if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then
+ cd $DEST
+ tar xzf $shellinabox_basename
+ fi
+ if [[ ! $(which shellinaboxd) ]]; then
+ cd $DEST/${shellinabox_basename%%.tar.gz}
+ ./configure
+ make
+ sudo make install
+ fi
+}
+
+# set up virtualized environment for devstack-gate testing
+function create_fake_baremetal_env() {
+ local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
+ # TODO(deva): add support for >1 VM
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm
+ BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
+
+ # NOTE: there is currently a limitation in baremetal driver
+ # that requires second MAC even if it is not used.
+ # Passing a fake value allows this to work.
+ # TODO(deva): remove this after driver issue is fixed.
+ BM_SECOND_MAC='12:34:56:78:90:12'
+}
+
+function cleanup_fake_baremetal_env() {
+ local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
+}
+
+# prepare various directories needed by baremetal hypervisor
+function configure_baremetal_nova_dirs() {
+ # ensure /tftpboot is prepared
+ sudo mkdir -p /tftpboot
+ sudo mkdir -p /tftpboot/pxelinux.cfg
+ sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
+ sudo chown -R `whoami`:libvirtd /tftpboot
+
+ # ensure $NOVA_STATE_PATH/baremetal is prepared
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal/console
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq
+ sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host
+ sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal
+
+ # ensure dnsmasq is installed but not running
+ # because baremetal driver will reconfigure and restart this as needed
+ is_package_installed dnsmasq || install_package dnsmasq
+ stop_service dnsmasq
+}
+
+# build deploy kernel+ramdisk, then upload them to glance
+# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID
+function upload_baremetal_deploy() {
+ token=$1
+
+ if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then
+ sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL
+ sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL
+ fi
+ if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then
+ $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \
+ -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL
+ fi
+
+ # load them into glance
+ BM_DEPLOY_KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $BM_DEPLOY_KERNEL \
+ --public --disk-format=aki \
+ < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
+ BM_DEPLOY_RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $BM_DEPLOY_RAMDISK \
+ --public --disk-format=ari \
+ < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
+}
+
+# create a basic baremetal flavor, associated with deploy kernel & ramdisk
+#
+# Usage: create_baremetal_flavor <aki_uuid> <ari_uuid>
+function create_baremetal_flavor() {
+ aki=$1
+ ari=$2
+ nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
+ $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
+ nova flavor-key $BM_FLAVOR_NAME set \
+ cpu_arch=$BM_FLAVOR_ARCH \
+ deploy_kernel_id=$aki \
+ deploy_ramdisk_id=$ari
+}
+
+# pull run-time kernel/ramdisk out of disk image and load into glance
+# note that $file is currently expected to be in qcow2 format
+# Sets KERNEL_ID and RAMDISK_ID
+#
+# Usage: extract_and_upload_k_and_r_from_image $token $file
+function extract_and_upload_k_and_r_from_image() {
+ token=$1
+ file=$2
+ image_name=$(basename "$file" ".qcow2")
+
+ # this call returns the file names as "$kernel,$ramdisk"
+ out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
+ -x -d $TOP_DIR/files -o bm-deploy -i $file)
+ if [ $? -ne 0 ]; then
+ die "Failed to get kernel and ramdisk from $file"
+ fi
+ XTRACE=$(set +o | grep xtrace)
+ set +o xtrace
+ out=$(echo "$out" | tail -1)
+ $XTRACE
+ OUT_KERNEL=${out%%,*}
+ OUT_RAMDISK=${out##*,}
+
+ # load them into glance
+ KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $image_name-kernel \
+ --public --disk-format=aki \
+ < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $image_name-initrd \
+ --public --disk-format=ari \
+ < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
+}
+
+
+# Re-implementation of devstack's "upload_image" function
+#
+# Takes the same parameters, but has some peculiarities which made it
+# easier to create a separate method, rather than complicate the logic
+# of the existing function.
+function upload_baremetal_image() {
+ local image_url=$1
+ local token=$2
+
+ # Create a directory for the downloaded image tarballs.
+ mkdir -p $FILES/images
+
+ # Downloads the image (uec ami+aki style), then extracts it.
+ IMAGE_FNAME=`basename "$image_url"`
+ if [[ ! -f $FILES/$IMAGE_FNAME || \
+ "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
+ wget -c $image_url -O $FILES/$IMAGE_FNAME
+ if [[ $? -ne 0 ]]; then
+ echo "Not found: $image_url"
+ return
+ fi
+ fi
+
+ local KERNEL=""
+ local RAMDISK=""
+ local DISK_FORMAT=""
+ local CONTAINER_FORMAT=""
+ case "$IMAGE_FNAME" in
+ *.tar.gz|*.tgz)
+ # Extract ami and aki files
+ [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
+ IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
+ IMAGE_NAME="${IMAGE_FNAME%.tgz}"
+ xdir="$FILES/images/$IMAGE_NAME"
+ rm -Rf "$xdir";
+ mkdir "$xdir"
+ tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
+ KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ if [[ -z "$IMAGE_NAME" ]]; then
+ IMAGE_NAME=$(basename "$IMAGE" ".img")
+ fi
+ DISK_FORMAT=ami
+ CONTAINER_FORMAT=ami
+ ;;
+ *.qcow2)
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
+ DISK_FORMAT=qcow2
+ CONTAINER_FORMAT=bare
+ ;;
+ *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+ esac
+
+ if [ "$CONTAINER_FORMAT" = "bare" ]; then
+ extract_and_upload_k_and_r_from_image $token $IMAGE
+ elif [ "$CONTAINER_FORMAT" = "ami" ]; then
+ KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "$IMAGE_NAME-kernel" --public \
+ --container-format aki \
+ --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "$IMAGE_NAME-ramdisk" --public \
+ --container-format ari \
+ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ else
+ # TODO(deva): add support for other image types
+ return
+ fi
+
+ glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "${IMAGE_NAME%.img}" --public \
+ --container-format $CONTAINER_FORMAT \
+ --disk-format $DISK_FORMAT \
+ ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
+ ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+
+ # override DEFAULT_IMAGE_NAME so that tempest can find the image
+ # that we just uploaded in glance
+ DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
+}
+
+function clear_baremetal_of_all_nodes() {
+ list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' )
+ for node in $list
+ do
+ nova-baremetal-manage node delete $node
+ done
+ list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' )
+ for iface in $list
+ do
+ nova-baremetal-manage interface delete $iface
+ done
+}
+
+# inform nova-baremetal about nodes, MACs, etc
+# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified
+#
+# Usage: add_baremetal_node <first_mac> <second_mac>
+function add_baremetal_node() {
+ mac_1=${1:-$BM_FIRST_MAC}
+ mac_2=${2:-$BM_SECOND_MAC}
+
+ id=$(nova-baremetal-manage node create \
+ --host=$BM_HOSTNAME --prov_mac=$mac_1 \
+ --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \
+ --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \
+ --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \
+ )
+ [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node"
+ id2=$(nova-baremetal-manage interface create \
+ --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \
+ )
+ [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id"
+}
+
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/ceilometer b/lib/ceilometer
index aa1b396..749e785 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,9 +1,9 @@
# lib/ceilometer
-# Install and start Ceilometer service
+# Install and start **Ceilometer** service
+
# To enable, add the following to localrc
# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api
-
# Dependencies:
# - functions
# - OS_AUTH_URL for auth in api
@@ -12,12 +12,12 @@
# stack.sh
# ---------
-# install_XXX
-# configure_XXX
-# init_XXX
-# start_XXX
-# stop_XXX
-# cleanup_XXX
+# install_ceilometer
+# configure_ceilometer
+# init_ceilometer
+# start_ceilometer
+# stop_ceilometer
+# cleanup_ceilometer
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -27,17 +27,20 @@
# Defaults
# --------
-# set up default directories
+# Set up default directories
CEILOMETER_DIR=$DEST/ceilometer
+CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient
+CEILOMETER_CONF_DIR=/etc/ceilometer
+CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
+CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
+CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
+
# Support potential entry-points console scripts
if [ -d $CEILOMETER_DIR/bin ] ; then
CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin
else
CEILOMETER_BIN_DIR=/usr/local/bin
fi
-CEILOMETER_CONF_DIR=/etc/ceilometer
-CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
-CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
@@ -45,6 +48,11 @@
mongo ceilometer --eval "db.dropDatabase();"
}
+# configure_ceilometerclient() - Set config files, create data dirs, etc
+function configure_ceilometerclient() {
+ setup_develop $CEILOMETERCLIENT_DIR
+}
+
# configure_ceilometer() - Set config files, create data dirs, etc
function configure_ceilometer() {
setup_develop $CEILOMETER_DIR
@@ -77,15 +85,29 @@
iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer
iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
cleanup_ceilometer
}
+# init_ceilometer() - Initialize etc.
+function init_ceilometer() {
+ # Create cache dir
+ sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR
+ sudo chown `whoami` $CEILOMETER_AUTH_CACHE_DIR
+ rm -f $CEILOMETER_AUTH_CACHE_DIR/*
+}
+
# install_ceilometer() - Collect source and prepare
function install_ceilometer() {
git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH
}
+# install_ceilometerclient() - Collect source and prepare
+function install_ceilometerclient() {
+ git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH
+}
+
# start_ceilometer() - Start running processes, including screen
function start_ceilometer() {
screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
diff --git a/lib/cinder b/lib/cinder
index 2b2f8f1..4aaea5d 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -1,5 +1,5 @@
# lib/cinder
-# Install and start Cinder volume service
+# Install and start **Cinder** volume service
# Dependencies:
# - functions
@@ -48,11 +48,58 @@
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+# _clean_volume_group removes all cinder volumes from the specified volume group
+# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+function _clean_volume_group() {
+ local vg=$1
+ local vg_prefix=$2
+ # Clean out existing volumes
+ for lv in `sudo lvs --noheadings -o lv_name $vg`; do
+ # vg_prefix prefixes the LVs we want
+ if [[ "${lv#$vg_prefix}" != "$lv" ]]; then
+ sudo lvremove -f $vg/$lv
+ fi
+ done
+}
+
# cleanup_cinder() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_cinder() {
- # This function intentionally left blank
- :
+ # ensure the volume group is cleared up because fails might
+ # leave dead volumes in the group
+ TARGETS=$(sudo tgtadm --op show --mode target)
+ if [ $? -ne 0 ]; then
+ # If tgt driver isn't running this won't work obviously
+ # So check the response and restart if need be
+ echo "tgtd seems to be in a bad state, restarting..."
+ if is_ubuntu; then
+ restart_service tgt
+ else
+ restart_service tgtd
+ fi
+ TARGETS=$(sudo tgtadm --op show --mode target)
+ fi
+
+ if [[ -n "$TARGETS" ]]; then
+ iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
+ for i in "${iqn_list[@]}"; do
+ echo removing iSCSI target: $i
+ sudo tgt-admin --delete $i
+ done
+ fi
+
+ if is_service_enabled cinder; then
+ sudo rm -rf $CINDER_STATE_PATH/volumes/*
+ fi
+
+ if is_ubuntu; then
+ stop_service tgt
+ else
+ stop_service tgtd
+ fi
+
+ # Campsite rule: leave behind a volume group at least as clean as we found it
+ _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
}
# configure_cinder() - Set config files, create data dirs, etc
@@ -137,14 +184,15 @@
if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
iniset $CINDER_CONF DEFAULT secure_delete False
+ iniset $CINDER_CONF DEFAULT volume_clear none
fi
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
# Add color to logging output
- iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_id)s %(project_id)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_id)s %(project_id)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
- iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s [01;35m%(instance)s[00m"
+ iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s [01;35m%(instance)s[00m"
fi
if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then
@@ -240,13 +288,8 @@
# Remove iscsi targets
sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
- # Clean out existing volumes
- for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
- # VOLUME_NAME_PREFIX prefixes the LVs we want
- if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
- sudo lvremove -f $VOLUME_GROUP/$lv
- fi
- done
+ # Start with a clean volume group
+ _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
fi
fi
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 68e9adc..1c0f5eb 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -1,5 +1,5 @@
-# lib/mysql
-# Functions to control the configuration and operation of the MySQL database backend
+# lib/databases/mysql
+# Functions to control the configuration and operation of the **MySQL** database backend
# Dependencies:
# DATABASE_{HOST,USER,PASSWORD} must be defined
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index e1463c5..04db714 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -1,5 +1,5 @@
-# lib/postgresql
-# Functions to control the configuration and operation of the PostgreSQL database backend
+# lib/databases/postgresql
+# Functions to control the configuration and operation of the **PostgreSQL** database backend
# Dependencies:
# DATABASE_{HOST,USER,PASSWORD} must be defined
diff --git a/lib/glance b/lib/glance
index 8ba04b3..dff247a 100644
--- a/lib/glance
+++ b/lib/glance
@@ -1,5 +1,5 @@
# lib/glance
-# Functions to control the configuration and operation of the Glance service
+# Functions to control the configuration and operation of the **Glance** service
# Dependencies:
# ``functions`` file
@@ -25,8 +25,6 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
GLANCE_DIR=$DEST/glance
GLANCECLIENT_DIR=$DEST/python-glanceclient
@@ -141,7 +139,6 @@
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
-
}
# init_glance() - Initialize databases, etc.
diff --git a/lib/heat b/lib/heat
index 43115cb..a6f7286 100644
--- a/lib/heat
+++ b/lib/heat
@@ -1,5 +1,6 @@
# lib/heat
-# Install and start Heat service
+# Install and start **Heat** service
+
# To enable, add the following to localrc
# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng
@@ -8,12 +9,14 @@
# stack.sh
# ---------
-# install_XXX
-# configure_XXX
-# init_XXX
-# start_XXX
-# stop_XXX
-# cleanup_XXX
+# install_heatclient
+# install_heat
+# configure_heatclient
+# configure_heat
+# init_heat
+# start_heat
+# stop_heat
+# cleanup_heat
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -57,7 +60,7 @@
HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
- # cloudformation api
+ # Cloudformation API
HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf
cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF
iniset $HEAT_API_CFN_CONF DEFAULT debug True
@@ -86,7 +89,7 @@
iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
- # openstack api
+ # OpenStack API
HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf
cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF
iniset $HEAT_API_CONF DEFAULT debug True
@@ -139,7 +142,7 @@
iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
fi
- # cloudwatch api
+ # Cloudwatch API
HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf
cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF
iniset $HEAT_API_CW_CONF DEFAULT debug True
diff --git a/lib/keystone b/lib/keystone
index 4dddedb..34f3372 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -8,7 +8,6 @@
# ``SERVICE_TOKEN``
# ``S3_SERVICE_PORT`` (template backend only)
-
# ``stack.sh`` calls the entry points in this order:
#
# install_keystone
@@ -27,8 +26,6 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
KEYSTONE_DIR=$DEST/keystone
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
diff --git a/lib/nova b/lib/nova
index e07e61c..781cc09 100644
--- a/lib/nova
+++ b/lib/nova
@@ -1,5 +1,5 @@
# lib/nova
-# Functions to control the configuration and operation of the XXXX service
+# Functions to control the configuration and operation of the **Nova** service
# Dependencies:
# ``functions`` file
@@ -39,6 +39,12 @@
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
+# Public facing bits
+NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
+NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
+NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
+NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
# Support entry points installation of console scripts
if [[ -d $NOVA_DIR/bin ]]; then
NOVA_BIN_DIR=$NOVA_DIR/bin
@@ -170,6 +176,10 @@
s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
" -i $NOVA_API_PASTE_INI
+ iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST
+ if is_service_enabled tls-proxy; then
+ iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL
+ fi
fi
iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR
@@ -214,6 +224,11 @@
fi
fi
+ # Prepare directories and packages for baremetal driver
+ if is_baremetal; then
+ configure_baremetal_nova_dirs
+ fi
+
if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
@@ -324,9 +339,9 @@
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_SERVICE \
- --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
- --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
- --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s"
+ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
+ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
+ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
fi
fi
}
@@ -339,64 +354,73 @@
# (Re)create ``nova.conf``
rm -f $NOVA_CONF
add_nova_opt "[DEFAULT]"
- add_nova_opt "verbose=True"
- add_nova_opt "auth_strategy=keystone"
- add_nova_opt "allow_resize_to_same_host=True"
- add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI"
- add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf"
- add_nova_opt "compute_scheduler_driver=$SCHEDULER"
- add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF"
- add_nova_opt "force_dhcp_release=True"
- add_nova_opt "fixed_range=$FIXED_RANGE"
- add_nova_opt "s3_host=$SERVICE_HOST"
- add_nova_opt "s3_port=$S3_SERVICE_PORT"
- add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions"
- add_nova_opt "my_ip=$HOST_IP"
+ iniset $NOVA_CONF DEFAULT verbose "True"
+ iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
+ iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
+ iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
+ iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
+ iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER"
+ iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
+ iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
+ iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE"
+ iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
+ iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
+ iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
+ iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions"
+ iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
local dburl
database_connection_url dburl nova
- add_nova_opt "sql_connection=$dburl"
- add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
- add_nova_opt "libvirt_cpu_mode=none"
- add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
+ iniset $NOVA_CONF DEFAULT sql_connection "$dburl"
+ if is_baremetal; then
+ database_connection_url dburl nova_bm
+ iniset $NOVA_CONF baremetal sql_connection $dburl
+ fi
+ iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
+ iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
+ iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
if is_service_enabled n-api; then
- add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
+ iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
+ if is_service_enabled tls-proxy; then
+ # Set the service port for a proxy to take the original
+ iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
+ fi
fi
if is_service_enabled cinder; then
- add_nova_opt "volume_api_class=nova.volume.cinder.API"
+ iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API"
fi
if [ -n "$NOVA_STATE_PATH" ]; then
- add_nova_opt "state_path=$NOVA_STATE_PATH"
- add_nova_opt "lock_path=$NOVA_STATE_PATH"
+ iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
+ iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH"
fi
if [ -n "$NOVA_INSTANCES_PATH" ]; then
- add_nova_opt "instances_path=$NOVA_INSTANCES_PATH"
+ iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
fi
if [ "$MULTI_HOST" != "False" ]; then
- add_nova_opt "multi_host=True"
- add_nova_opt "send_arp_for_ha=True"
+ iniset $NOVA_CONF DEFAULT multi_host "True"
+ iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
fi
if [ "$SYSLOG" != "False" ]; then
- add_nova_opt "use_syslog=True"
+ iniset $NOVA_CONF DEFAULT use_syslog "True"
fi
if [ "$API_RATE_LIMIT" != "True" ]; then
- add_nova_opt "api_rate_limit=False"
+ iniset $NOVA_CONF DEFAULT api_rate_limit "False"
fi
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
# Add color to logging output
- add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_name)s %(project_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- add_nova_opt "logging_debug_format_suffix=[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
- add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s [01;35m%(instance)s[00m"
+ iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_name)s %(project_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
+ iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s [01;35m%(instance)s[00m"
else
# Show user_name and project_name instead of user_id and project_id
- add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
+ iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
fi
if is_service_enabled ceilometer; then
- add_nova_opt "instance_usage_audit=True"
- add_nova_opt "instance_usage_audit_period=hour"
- add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier"
- add_nova_opt "notification_driver=ceilometer.compute.nova_notifier"
+ iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
+ iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
+ iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier"
+ iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier"
fi
@@ -409,10 +433,20 @@
# For Example: ``EXTRA_OPTS=(foo=true bar=2)``
for I in "${EXTRA_OPTS[@]}"; do
# Attempt to convert flags to options
- add_nova_opt ${I//--}
+ iniset $NOVA_CONF DEFAULT ${I//=/ }
done
}
+function create_nova_conf_nova_network() {
+ iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN"
+ iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
+ iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
+ iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
+ if [ -n "$FLAT_INTERFACE" ]; then
+ iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
+ fi
+}
+
# init_nova() - Initialize databases, etc.
function init_nova() {
# Nova Database
@@ -421,7 +455,7 @@
# All nova components talk to a central database. We will need to do this step
# only once for an entire cluster.
- if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then
+ if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
# (Re)create nova database
# Explicitly use latin1: to avoid lp#829209, nova expects the database to
# use latin1 by default, and then upgrades the database to utf8 (see the
@@ -430,12 +464,23 @@
# (Re)create nova database
$NOVA_BIN_DIR/nova-manage db sync
+
+ # (Re)create nova baremetal database
+ if is_baremetal; then
+ recreate_database nova_bm latin1
+ $NOVA_BIN_DIR/nova-baremetal-manage db sync
+ fi
fi
# Create cache dir
sudo mkdir -p $NOVA_AUTH_CACHE_DIR
sudo chown `whoami` $NOVA_AUTH_CACHE_DIR
rm -f $NOVA_AUTH_CACHE_DIR/*
+
+ # Create the keys folder
+ sudo mkdir -p ${NOVA_STATE_PATH}/keys
+ # make sure we own NOVA_STATE_PATH and all subdirs
+ sudo chown -R `whoami` ${NOVA_STATE_PATH}
}
# install_novaclient() - Collect source and prepare
@@ -473,6 +518,27 @@
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
}
+# start_nova_api() - Start the API process ahead of other things
+function start_nova_api() {
+ # Get right service port for testing
+ local service_port=$NOVA_SERVICE_PORT
+ if is_service_enabled tls-proxy; then
+ service_port=$NOVA_SERVICE_PORT_INT
+ fi
+
+ screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
+ echo "Waiting for nova-api to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
+ echo "nova-api did not start"
+ exit 1
+ fi
+
+ # Start proxies if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+ fi
+}
+
# start_nova() - Start running processes, including screen
function start_nova() {
# The group **libvirtd** is added to the current user in this script.
diff --git a/lib/quantum b/lib/quantum
index 480aaa1..f74eead 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -5,6 +5,36 @@
# ``functions`` file
# ``DEST`` must be defined
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_quantum
+# install_quantumclient
+# install_quantum_agent_packages
+# install_quantum_third_party
+# setup_quantum
+# setup_quantumclient
+# configure_quantum
+# init_quantum
+# configure_quantum_third_party
+# init_quantum_third_party
+# start_quantum_third_party
+# create_nova_conf_quantum
+# start_quantum_service_and_check
+# create_quantum_initial_network
+# setup_quantum_debug
+# start_quantum_agents
+#
+# ``unstack.sh`` calls the entry points in this order:
+#
+# stop_quantum
+
+# Functions in lib/quantum are classified into the following categories:
+#
+# - entry points (called from stack.sh or unstack.sh)
+# - internal functions
+# - quantum exercises
+# - 3rd party programs
+
# Quantum Networking
# ------------------
@@ -31,8 +61,8 @@
set +o xtrace
-# Defaults
-# --------
+# Quantum Network Configuration
+# -----------------------------
# Set up default directories
QUANTUM_DIR=$DEST/quantum
@@ -49,7 +79,6 @@
Q_PORT=${Q_PORT:-9696}
# Default Quantum Host
Q_HOST=${Q_HOST:-$HOST_IP}
-# Which Quantum API nova should use
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
# Default auth strategy
@@ -59,6 +88,8 @@
Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
# Meta data IP
Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
+# Allow Overlapping IP among subnets
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False}
# Use quantum-debug command
Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
@@ -70,14 +101,587 @@
QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum)
Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE"
fi
-fi
+ # Provider Network Configurations
+ # --------------------------------
+
+ # The following variables control the Quantum openvswitch and
+ # linuxbridge plugins' allocation of tenant networks and
+ # availability of provider networks. If these are not configured
+ # in localrc, tenant networks will be local to the host (with no
+ # remote connectivity), and no physical resources will be
+ # available for the allocation of provider networks.
+
+ # To use GRE tunnels for tenant networks, set to True in
+ # localrc. GRE tunnels are only supported by the openvswitch
+ # plugin, and currently only on Ubuntu.
+ ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
+
+ # If using GRE tunnels for tenant networks, specify the range of
+ # tunnel IDs from which tenant networks are allocated. Can be
+ # overriden in localrc in necesssary.
+ TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
+
+ # To use VLANs for tenant networks, set to True in localrc. VLANs
+ # are supported by the openvswitch and linuxbridge plugins, each
+ # requiring additional configuration described below.
+ ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+
+ # If using VLANs for tenant networks, set in localrc to specify
+ # the range of VLAN VIDs from which tenant networks are
+ # allocated. An external network switch must be configured to
+ # trunk these VLANs between hosts for multi-host connectivity.
+ #
+ # Example: ``TENANT_VLAN_RANGE=1000:1999``
+ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
+
+ # If using VLANs for tenant networks, or if using flat or VLAN
+ # provider networks, set in localrc to the name of the physical
+ # network, and also configure OVS_PHYSICAL_BRIDGE for the
+ # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
+ # agent, as described below.
+ #
+ # Example: ``PHYSICAL_NETWORK=default``
+ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+
+ # With the openvswitch plugin, if using VLANs for tenant networks,
+ # or if using flat or VLAN provider networks, set in localrc to
+ # the name of the OVS bridge to use for the physical network. The
+ # bridge will be created if it does not already exist, but a
+ # physical interface must be manually added to the bridge as a
+ # port for external connectivity.
+ #
+ # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+ OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+
+ # With the linuxbridge plugin, if using VLANs for tenant networks,
+ # or if using flat or VLAN provider networks, set in localrc to
+ # the name of the network interface to use for the physical
+ # network.
+ #
+ # Example: ``LB_PHYSICAL_INTERFACE=eth1``
+ LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+
+ # With the openvswitch plugin, set to True in localrc to enable
+ # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+ #
+ # Example: ``OVS_ENABLE_TUNNELING=True``
+ OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+fi
# Entry Points
# ------------
-# configure_quantum_rootwrap() - configure Quantum's rootwrap
-function configure_quantum_rootwrap() {
+# configure_quantum()
+# Set common config for all quantum server and agents.
+function configure_quantum() {
+ _configure_quantum_common
+ _configure_quantum_rpc
+
+ if is_service_enabled q-svc; then
+ _configure_quantum_service
+ fi
+ if is_service_enabled q-agt; then
+ _configure_quantum_plugin_agent
+ fi
+ if is_service_enabled q-dhcp; then
+ _configure_quantum_dhcp_agent
+ fi
+ if is_service_enabled q-l3; then
+ _configure_quantum_l3_agent
+ fi
+ if is_service_enabled q-meta; then
+ _configure_quantum_metadata_agent
+ fi
+
+ _configure_quantum_debug_command
+
+ _cleanup_quantum
+}
+
+function create_nova_conf_quantum() {
+ iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API"
+ iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME"
+ iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD"
+ iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+ iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY"
+ iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME"
+ iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT"
+
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"}
+ iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE"
+ iniset $NOVA_CONF DEFAULT linuxnet_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT"
+ iniset $NOVA_CONF DEFAULT libvirt_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT"
+ fi
+ iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER"
+ iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
+ if is_service_enabled q-meta; then
+ iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True"
+ fi
+}
+
+# create_quantum_accounts() - Set up common required quantum accounts
+
+# Tenant User Roles
+# ------------------------------------------------------------------
+# service quantum admin # if enabled
+
+# Migrated from keystone_data.sh
+function create_quantum_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+ QUANTUM_USER=$(keystone user-create \
+ --name=quantum \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=quantum@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant_id $SERVICE_TENANT \
+ --user_id $QUANTUM_USER \
+ --role_id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ QUANTUM_SERVICE=$(keystone service-create \
+ --name=quantum \
+ --type=network \
+ --description="Quantum Service" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $QUANTUM_SERVICE \
+ --publicurl "http://$SERVICE_HOST:9696/" \
+ --adminurl "http://$SERVICE_HOST:9696/" \
+ --internalurl "http://$SERVICE_HOST:9696/"
+ fi
+ fi
+}
+
+function create_quantum_initial_network() {
+ TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+
+ # Create a small network
+ # Since quantum command is executed in admin context at this point,
+ # ``--tenant_id`` needs to be specified.
+ NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+
+ if is_service_enabled q-l3; then
+ # Create a router, and add the private subnet as one of its interfaces
+ ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
+ quantum router-interface-add $ROUTER_ID $SUBNET_ID
+ # Create an external network, and a subnet. Configure the external network as router gw
+ EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+ quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+ CIDR_LEN=${FLOATING_RANGE#*/}
+ sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
+ sudo ip link set $PUBLIC_BRIDGE up
+ ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
+ sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
+ fi
+ if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
+ # Explicitly set router id in l3 agent configuration
+ iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
+ fi
+ fi
+}
+
+# init_quantum() - Initialize databases, etc.
+function init_quantum() {
+ :
+}
+
+# install_quantum() - Collect source and prepare
+function install_quantum() {
+ git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
+}
+
+# install_quantumclient() - Collect source and prepare
+function install_quantumclient() {
+ git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH
+}
+
+# install_quantum_agent_packages() - Collect source and prepare
+function install_quantum_agent_packages() {
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ # Install deps
+ # FIXME add to ``files/apts/quantum``, but don't install if not needed!
+ if is_ubuntu; then
+ kernel_version=`cat /proc/version | cut -d " " -f3`
+ install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+ else
+ ### FIXME(dtroyer): Find RPMs for OpenVSwitch
+ echo "OpenVSwitch packages need to be located"
+ # Fedora does not started OVS by default
+ restart_service openvswitch
+ fi
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ install_package bridge-utils
+ fi
+}
+
+function is_quantum_ovs_base_plugin() {
+ local plugin=$1
+ if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then
+ return 0
+ fi
+ return 1
+}
+
+function setup_quantum() {
+ setup_develop $QUANTUM_DIR
+}
+
+function setup_quantumclient() {
+ setup_develop $QUANTUMCLIENT_DIR
+}
+
+# Start running processes, including screen
+function start_quantum_service_and_check() {
+ # Start the Quantum service
+ screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ echo "Waiting for Quantum to start..."
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then
+ echo "Quantum did not start"
+ exit 1
+ fi
+}
+
+# Start running processes, including screen
+function start_quantum_agents() {
+ # Start up the quantum agents if enabled
+ screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+ screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
+ screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+}
+
+# stop_quantum() - Stop running processes (non-screen)
+function stop_quantum() {
+ if is_service_enabled q-dhcp; then
+ pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
+ [ ! -z "$pid" ] && sudo kill -9 $pid
+ fi
+}
+
+# _cleanup_quantum() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function _cleanup_quantum() {
+ :
+}
+
+# _configure_quantum_common()
+# Set common config for all quantum server and agents.
+# This MUST be called before other _configure_quantum_* functions.
+function _configure_quantum_common() {
+ # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
+ if [[ ! -d $QUANTUM_CONF_DIR ]]; then
+ sudo mkdir -p $QUANTUM_CONF_DIR
+ fi
+ sudo chown `whoami` $QUANTUM_CONF_DIR
+
+ cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
+
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
+ Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
+ Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
+ Q_DB_NAME="quantum_linux_bridge"
+ Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu
+ Q_PLUGIN_CONF_FILENAME=ryu.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2"
+ fi
+
+ if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
+ echo "Quantum plugin not set.. exiting"
+ exit 1
+ fi
+
+ # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
+ mkdir -p /$Q_PLUGIN_CONF_PATH
+ Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+ cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+
+ database_connection_url dburl $Q_DB_NAME
+ iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
+ unset dburl
+
+ _quantum_setup_rootwrap
+}
+
+function _configure_quantum_debug_command() {
+ if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then
+ return
+ fi
+
+ cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE
+
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge ''
+ fi
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_dhcp_agent() {
+ AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent"
+ Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini
+
+ cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
+
+ iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
+ iniset $Q_DHCP_CONF_FILE DEFAULT debug True
+ iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $Q_DHCP_CONF_FILE
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_l3_agent() {
+ AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
+ PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+ Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
+
+ cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
+
+ iniset $Q_L3_CONF_FILE DEFAULT verbose True
+ iniset $Q_L3_CONF_FILE DEFAULT debug True
+ iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $Q_L3_CONF_FILE
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ _quantum_setup_external_bridge $PUBLIC_BRIDGE
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ''
+ fi
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_metadata_agent() {
+ AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent"
+ Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini
+
+ cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
+
+ iniset $Q_META_CONF_FILE DEFAULT verbose True
+ iniset $Q_META_CONF_FILE DEFAULT debug True
+ iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
+ iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
+}
+
+# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent
+# It is called when q-agt is enabled.
+function _configure_quantum_plugin_agent() {
+ # Configure agent for plugin
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ _configure_quantum_plugin_agent_openvswitch
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ _configure_quantum_plugin_agent_linuxbridge
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ _configure_quantum_plugin_agent_ryu
+ fi
+
+ iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
+}
+
+function _configure_quantum_plugin_agent_linuxbridge() {
+ # Setup physical network interface mappings. Override
+ # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
+ # complex physical network configurations.
+ if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
+ LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
+ fi
+ if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
+}
+
+function _configure_quantum_plugin_agent_openvswitch() {
+ # Setup integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ _quantum_setup_ovs_bridge $OVS_BRIDGE
+
+ # Setup agent for tunneling
+ if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+ # Verify tunnels are supported
+ # REVISIT - also check kernel module support for GRE and patch ports
+ OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
+ if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
+ echo "You are running OVS version $OVS_VERSION."
+ echo "OVS 1.4+ is required for tunneling between multiple hosts."
+ exit 1
+ fi
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+ iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
+ fi
+
+ # Setup physical network bridge mappings. Override
+ # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+ # complex physical network configurations.
+ if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+ OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+ # Configure bridge manually with physical interface as port for multi-node
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+ fi
+ if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
+}
+
+function _configure_quantum_plugin_agent_ryu() {
+ # Set up integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ _quantum_setup_ovs_bridge $OVS_BRIDGE
+ if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
+ sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
+}
+
+# Quantum RPC support - must be updated prior to starting any of the services
+function _configure_quantum_rpc() {
+ iniset $QUANTUM_CONF DEFAULT control_exchange quantum
+ if is_service_enabled qpid ; then
+ iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+ elif is_service_enabled zeromq; then
+ iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST
+ iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ fi
+}
+
+# _configure_quantum_service() - Set config files for quantum service
+# It is called when q-svc is enabled.
+function _configure_quantum_service() {
+ Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini
+ Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json
+
+ cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+ cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
+
+ if is_service_enabled $DATABASE_BACKENDS; then
+ recreate_database $Q_DB_NAME utf8
+ else
+ echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+ exit 1
+ fi
+
+ # Update either configuration file with plugin
+ iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
+
+ iniset $QUANTUM_CONF DEFAULT verbose True
+ iniset $QUANTUM_CONF DEFAULT debug True
+ iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
+
+ iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
+ _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken
+
+ # Configure plugin
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
+ iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
+ elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
+ else
+ echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
+ fi
+
+ # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc``
+ # for more complex physical network configurations.
+ if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+ OVS_VLAN_RANGES=$PHYSICAL_NETWORK
+ if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+ OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE
+ fi
+ fi
+ if [[ "$OVS_VLAN_RANGES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
+ fi
+
+ # Enable tunnel networks if selected
+ if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+ fi
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
+ else
+ echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
+ fi
+
+ # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc``
+ # for more complex physical network configurations.
+ if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+ LB_VLAN_RANGES=$PHYSICAL_NETWORK
+ if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+ LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE
+ fi
+ fi
+ if [[ "$LB_VLAN_RANGES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
+ fi
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+# Utility Functions
+#------------------
+
+# _quantum_setup_rootwrap() - configure Quantum's rootwrap
+function _quantum_setup_rootwrap() {
if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
return
fi
@@ -109,7 +713,7 @@
}
# Configures keystone integration for quantum service and agents
-function quantum_setup_keystone() {
+function _quantum_setup_keystone() {
local conf_file=$1
local section=$2
local use_auth_url=$3
@@ -130,39 +734,54 @@
rm -f $QUANTUM_AUTH_CACHE_DIR/*
}
-function quantum_setup_ovs_bridge() {
+function _quantum_setup_ovs_bridge() {
local bridge=$1
- for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
- if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi
- sudo ovs-vsctl --no-wait del-port $bridge $PORT
- done
- sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge
- sudo ovs-vsctl --no-wait add-br $bridge
+ quantum-ovs-cleanup --ovs_integration_bridge $bridge
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
-function quantum_setup_external_bridge() {
+function _quantum_setup_interface_driver() {
+ local conf_file=$1
+ if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
+ fi
+}
+
+function _quantum_setup_external_bridge() {
local bridge=$1
- # Create it if it does not exist
+ quantum-ovs-cleanup --external_network_bridge $bridge
sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
- # remove internal ports
- for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
- TYPE=$(sudo ovs-vsctl get interface $PORT type)
- if [[ "$TYPE" == "internal" ]]; then
- echo `sudo ip link delete $PORT` > /dev/null
- sudo ovs-vsctl --no-wait del-port $bridge $PORT
- fi
- done
# ensure no IP is configured on the public bridge
sudo ip addr flush dev $bridge
}
-function is_quantum_ovs_base_plugin() {
- local plugin=$1
- if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then
- return 0
+# Functions for Quantum Exercises
+#--------------------------------
+
+function delete_probe() {
+ local from_net="$1"
+ net_id=`_get_net_id $from_net`
+ probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
+ quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
+}
+
+function setup_quantum_debug() {
+ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
+ public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
+ private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
fi
- return 1
+}
+
+function teardown_quantum_debug() {
+ delete_probe $PUBLIC_NETWORK_NAME
+ delete_probe $PRIVATE_NETWORK_NAME
}
function _get_net_id() {
@@ -176,13 +795,6 @@
echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
}
-function delete_probe() {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
- quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
function _ping_check_quantum() {
local from_net=$1
local ip=$2
@@ -220,17 +832,59 @@
fi
}
-function setup_quantum() {
- public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
- private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
+# Quantum 3rd party programs
+#---------------------------
+# A comma-separated list of 3rd party programs
+QUANTUM_THIRD_PARTIES="ryu"
+for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ source lib/$third_party
+done
+
+# configure_quantum_third_party() - Set config files, create data dirs, etc
+function configure_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ configure_${third_party}
+ fi
+ done
}
-function teardown_quantum() {
- delete_probe $PUBLIC_NETWORK_NAME
- delete_probe $PRIVATE_NETWORK_NAME
+# init_quantum_third_party() - Initialize databases, etc.
+function init_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ init_${third_party}
+ fi
+ done
}
+# install_quantum_third_party() - Collect source and prepare
+function install_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ install_${third_party}
+ fi
+ done
+}
+
+# start_quantum_third_party() - Start running processes, including screen
+function start_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ start_${third_party}
+ fi
+ done
+}
+
+# stop_quantum_third_party - Stop running processes (non-screen)
+function stop_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ stop_${third_party}
+ fi
+ done
+}
+
+
# Restore xtrace
$XTRACE
diff --git a/lib/ryu b/lib/ryu
new file mode 100644
index 0000000..ac3462b
--- /dev/null
+++ b/lib/ryu
@@ -0,0 +1,63 @@
+# Ryu OpenFlow Controller
+# -----------------------
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+RYU_DIR=$DEST/ryu
+# Ryu API Host
+RYU_API_HOST=${RYU_API_HOST:-127.0.0.1}
+# Ryu API Port
+RYU_API_PORT=${RYU_API_PORT:-8080}
+# Ryu OFP Host
+RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1}
+# Ryu OFP Port
+RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
+# Ryu Applications
+RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
+
+function configure_ryu() {
+ setup_develop $RYU_DIR
+}
+
+function init_ryu() {
+ RYU_CONF_DIR=/etc/ryu
+ if [[ ! -d $RYU_CONF_DIR ]]; then
+ sudo mkdir -p $RYU_CONF_DIR
+ fi
+ sudo chown `whoami` $RYU_CONF_DIR
+ RYU_CONF=$RYU_CONF_DIR/ryu.conf
+ sudo rm -rf $RYU_CONF
+
+ cat <<EOF > $RYU_CONF
+--app_lists=$RYU_APPS
+--wsapi_host=$RYU_API_HOST
+--wsapi_port=$RYU_API_PORT
+--ofp_listen_host=$RYU_OFP_HOST
+--ofp_tcp_listen_port=$RYU_OFP_PORT
+EOF
+}
+
+function install_ryu() {
+ git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+}
+
+function is_ryu_required() {
+ if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
+ return 0
+ fi
+ return 1
+}
+
+function start_ryu() {
+ screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+}
+
+function stop_ryu() {
+ :
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/swift b/lib/swift
index 140e5e9..b418eda 100644
--- a/lib/swift
+++ b/lib/swift
@@ -1,5 +1,5 @@
# lib/swift
-# Functions to control the configuration and operation of the swift service
+# Functions to control the configuration and operation of the **Swift** service
# Dependencies:
# ``functions`` file
@@ -23,12 +23,10 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
-
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
+SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
# TODO: add logging to different location.
@@ -41,7 +39,8 @@
SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
# DevStack will create a loop-back disk formatted as XFS to store the
-# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes.
+# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
+# kilobytes.
# Default is 1 gigabyte.
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
@@ -70,6 +69,7 @@
CONTAINER_PORT_BASE=6011
ACCOUNT_PORT_BASE=6012
+
# Entry Points
# ------------
@@ -100,7 +100,7 @@
# changing the permissions so we can run it as our user.
USER_GROUP=$(id -g)
- sudo mkdir -p ${SWIFT_DATA_DIR}/drives
+ sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache}
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
# Create a loopback disk and format it to XFS.
@@ -212,6 +212,7 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
@@ -273,16 +274,22 @@
swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)]
+ iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache
+ # Using a sed and not iniset/iniuncomment because we want to a global
+ # modification and make sure it works for new sections.
+ sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)]
iniuncomment ${swift_node_config} app:container-server allow_versions
iniset ${swift_node_config} app:container-server allow_versions "true"
+ sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]
+ sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
done
swift_log_dir=${SWIFT_DATA_DIR}/logs
@@ -291,7 +298,6 @@
sudo chown -R $USER:adm ${swift_log_dir}
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
tee /etc/rsyslog.d/10-swift.conf
-
}
# configure_swiftclient() - Set config files, create data dirs, etc
@@ -325,6 +331,10 @@
swift-ring-builder account.builder rebalance
} && popd >/dev/null
+ # Create cache dir
+ sudo mkdir -p $SWIFT_AUTH_CACHE_DIR
+ sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR
+ rm -f $SWIFT_AUTH_CACHE_DIR/*
}
function install_swift() {
diff --git a/lib/tempest b/lib/tempest
index 1859921..fa637c1 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -1,4 +1,5 @@
# lib/tempest
+# Install and configure Tempest
# Dependencies:
# ``functions`` file
@@ -23,33 +24,29 @@
#
# install_tempest
# configure_tempest
-# init_tempest
-## start_tempest
-## stop_tempest
-## cleanup_tempest
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
-NOVA_SOURCE_DIR=$DEST/nova
TEMPEST_DIR=$DEST/tempest
TEMPEST_CONF_DIR=$TEMPEST_DIR/etc
TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf
+NOVA_SOURCE_DIR=$DEST/nova
+
BUILD_INTERVAL=3
BUILD_TIMEOUT=400
+
# Entry Points
# ------------
-
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest() {
local image_lines
@@ -66,7 +63,7 @@
local public_network_id
local tenant_networks_reachable
- #TODO(afazekas):
+ # TODO(afazekas):
# sudo python setup.py deploy
# This function exits on an error so that errors don't compound and you see
@@ -74,7 +71,7 @@
errexit=$(set +o | grep errexit)
set -o errexit
- #Save IFS
+ # Save IFS
ifs=$IFS
# Glance should already contain images to be used in tempest
@@ -85,30 +82,34 @@
# first image returned and set ``image_uuid_alt`` to the second,
# if there is more than one returned...
# ... Also ensure we only take active images, so we don't get snapshots in process
- image_lines=`glance image-list`
- IFS=$'\n\r'
- images=""
- for line in $image_lines; do
- if [ -z $DEFAULT_IMAGE_NAME ]; then
- images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`"
- else
- images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`"
+ declare -a images
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ image_uuid="$IMAGE_UUID"
+ image_uuid_alt="$IMAGE_UUID"
fi
- done
- # Create array of image UUIDs...
- IFS=" "
- images=($images)
- num_images=${#images[*]}
- echo "Found $num_images images"
- if [[ $num_images -eq 0 ]]; then
- echo "Found no valid images to use!"
- exit 1
- fi
- image_uuid=${images[0]}
- image_uuid_alt=$image_uuid
- if [[ $num_images -gt 1 ]]; then
- image_uuid_alt=${images[1]}
- fi
+ images+=($IMAGE_UUID)
+ done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+
+ case "${#images[*]}" in
+ 0)
+ echo "Found no valid images to use!"
+ exit 1
+ ;;
+ 1)
+ if [ -z "$image_uuid" ]; then
+ image_uuid=${images[0]}
+ image_uuid_alt=${images[0]}
+ fi
+ ;;
+ *)
+ if [ -z "$image_uuid" ]; then
+ image_uuid=${images[0]}
+ image_uuid_alt=${images[1]}
+ fi
+ ;;
+ esac
# Create tempest.conf from tempest.conf.sample
# copy every time, because the image UUIDS are going to change
@@ -133,12 +134,14 @@
flavor_lines=`nova flavor-list`
IFS=$'\r\n'
flavors=""
- for line in $flavor_lines; do
- if [ -z $DEFAULT_INSTANCE_TYPE ]; then
- flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`"
- else
- flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`"
+ if [[ -n "$DEFAULT_INSTANCE_TYPE" ]]; then
+ for line in $flavor_lines; do
+ f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }")
+ flavors="$flavors $f"
+ done
fi
+ for line in $flavor_lines; do
+ flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`"
done
IFS=" "
@@ -187,10 +190,16 @@
iniset $TEMPEST_CONF compute resize_available False
iniset $TEMPEST_CONF compute change_password_available False
iniset $TEMPEST_CONF compute compute_log_level ERROR
+ # Note(nati) current tempest don't create network for each tenant
+ # so reuse same tenant for now
+ if is_service_enabled quantum; then
+ TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False}
+ fi
+ iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
#Skip until #1074039 is fixed
iniset $TEMPEST_CONF compute run_ssh False
iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME}
- iniset $TEMPEST_CONF compute network_for_ssh private
+ iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME
iniset $TEMPEST_CONF compute ip_version_for_ssh 4
iniset $TEMPEST_CONF compute ssh_timeout 4
iniset $TEMPEST_CONF compute image_ref $image_uuid
@@ -199,7 +208,7 @@
iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt
iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR
iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False}
- iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+ iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
# Inherited behavior, might be wrong
iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR
# TODO(jaypipes): Create the key file here... right now, no whitebox
@@ -240,7 +249,6 @@
$errexit
}
-
# install_tempest() - Collect source and prepare
function install_tempest() {
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
diff --git a/stack.sh b/stack.sh
index cf638e8..da62353 100755
--- a/stack.sh
+++ b/stack.sh
@@ -110,7 +110,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
echo "If you wish to run this script anyway run with FORCE=yes"
@@ -322,6 +322,7 @@
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/quantum
source $TOP_DIR/lib/tempest
+source $TOP_DIR/lib/baremetal
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
@@ -329,18 +330,6 @@
NOVNC_DIR=$DEST/noVNC
SWIFT3_DIR=$DEST/swift3
-RYU_DIR=$DEST/ryu
-# Ryu API Host
-RYU_API_HOST=${RYU_API_HOST:-127.0.0.1}
-# Ryu API Port
-RYU_API_PORT=${RYU_API_PORT:-8080}
-# Ryu OFP Host
-RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1}
-# Ryu OFP Port
-RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
-# Ryu Applications
-RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-
# Should cinder perform secure deletion of volumes?
# Defaults to true, can be set to False to avoid this bug when testing:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
@@ -405,6 +394,13 @@
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
GUEST_INTERFACE_DEFAULT=eth1
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ PUBLIC_INTERFACE_DEFAULT=eth0
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+ FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
+ NET_MAN=${NET_MAN:-FlatManager}
+ STUB_NETWORK=${STUB_NETWORK:-False}
else
PUBLIC_INTERFACE_DEFAULT=br100
FLAT_NETWORK_BRIDGE_DEFAULT=br100
@@ -416,6 +412,7 @@
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
@@ -703,21 +700,7 @@
fi
if is_service_enabled q-agt; then
- if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
- # Install deps
- # FIXME add to ``files/apts/quantum``, but don't install if not needed!
- if is_ubuntu; then
- kernel_version=`cat /proc/version | cut -d " " -f3`
- install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
- else
- ### FIXME(dtroyer): Find RPMs for OpenVSwitch
- echo "OpenVSwitch packages need to be located"
- # Fedora does not started OVS by default
- restart_service openvswitch
- fi
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- install_package bridge-utils
- fi
+ install_quantum_agent_packages
fi
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
@@ -778,11 +761,9 @@
install_horizon
fi
if is_service_enabled quantum; then
- git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH
-fi
-if is_service_enabled quantum; then
- # quantum
- git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
+ install_quantum
+ install_quantumclient
+ install_quantum_third_party
fi
if is_service_enabled heat; then
install_heat
@@ -792,14 +773,12 @@
install_cinder
fi
if is_service_enabled ceilometer; then
+ install_ceilometerclient
install_ceilometer
fi
if is_service_enabled tempest; then
install_tempest
fi
-if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
- git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
-fi
# Initialization
@@ -837,8 +816,8 @@
configure_horizon
fi
if is_service_enabled quantum; then
- setup_develop $QUANTUMCLIENT_DIR
- setup_develop $QUANTUM_DIR
+ setup_quantumclient
+ setup_quantum
fi
if is_service_enabled heat; then
configure_heat
@@ -847,9 +826,6 @@
if is_service_enabled cinder; then
configure_cinder
fi
-if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
- setup_develop $RYU_DIR
-fi
if [[ $TRACK_DEPENDS = True ]] ; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
@@ -962,6 +938,7 @@
create_keystone_accounts
create_nova_accounts
create_cinder_accounts
+ create_quantum_accounts
# ``keystone_data.sh`` creates services, admin and demo users, and roles.
ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
@@ -1011,392 +988,22 @@
fi
-# Ryu
-# ---
-
-# Ryu is not a part of OpenStack project. Please ignore following block if
-# you are not interested in Ryu.
-# launch ryu manager
-if is_service_enabled ryu; then
- RYU_CONF_DIR=/etc/ryu
- if [[ ! -d $RYU_CONF_DIR ]]; then
- sudo mkdir -p $RYU_CONF_DIR
- fi
- sudo chown `whoami` $RYU_CONF_DIR
- RYU_CONF=$RYU_CONF_DIR/ryu.conf
- sudo rm -rf $RYU_CONF
-
- cat <<EOF > $RYU_CONF
---app_lists=$RYU_APPS
---wsapi_host=$RYU_API_HOST
---wsapi_port=$RYU_API_PORT
---ofp_listen_host=$RYU_OFP_HOST
---ofp_tcp_listen_port=$RYU_OFP_PORT
-EOF
- screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
-fi
-
-
# Quantum
# -------
-# Quantum Network Configuration
if is_service_enabled quantum; then
echo_summary "Configuring Quantum"
- # The following variables control the Quantum openvswitch and
- # linuxbridge plugins' allocation of tenant networks and
- # availability of provider networks. If these are not configured
- # in localrc, tenant networks will be local to the host (with no
- # remote connectivity), and no physical resources will be
- # available for the allocation of provider networks.
-
- # To use GRE tunnels for tenant networks, set to True in
- # localrc. GRE tunnels are only supported by the openvswitch
- # plugin, and currently only on Ubuntu.
- ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
-
- # If using GRE tunnels for tenant networks, specify the range of
- # tunnel IDs from which tenant networks are allocated. Can be
- # overriden in localrc in necesssary.
- TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
-
- # To use VLANs for tenant networks, set to True in localrc. VLANs
- # are supported by the openvswitch and linuxbridge plugins, each
- # requiring additional configuration described below.
- ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
- # If using VLANs for tenant networks, set in localrc to specify
- # the range of VLAN VIDs from which tenant networks are
- # allocated. An external network switch must be configured to
- # trunk these VLANs between hosts for multi-host connectivity.
- #
- # Example: ``TENANT_VLAN_RANGE=1000:1999``
- TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
- # If using VLANs for tenant networks, or if using flat or VLAN
- # provider networks, set in localrc to the name of the physical
- # network, and also configure OVS_PHYSICAL_BRIDGE for the
- # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
- # agent, as described below.
- #
- # Example: ``PHYSICAL_NETWORK=default``
- PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
-
- # With the openvswitch plugin, if using VLANs for tenant networks,
- # or if using flat or VLAN provider networks, set in localrc to
- # the name of the OVS bridge to use for the physical network. The
- # bridge will be created if it does not already exist, but a
- # physical interface must be manually added to the bridge as a
- # port for external connectivity.
- #
- # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
- OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
-
- # With the linuxbridge plugin, if using VLANs for tenant networks,
- # or if using flat or VLAN provider networks, set in localrc to
- # the name of the network interface to use for the physical
- # network.
- #
- # Example: ``LB_PHYSICAL_INTERFACE=eth1``
- LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
-
- # With the openvswitch plugin, set to True in localrc to enable
- # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
- #
- # Example: ``OVS_ENABLE_TUNNELING=True``
- OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
-
- # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
- if [[ ! -d $QUANTUM_CONF_DIR ]]; then
- sudo mkdir -p $QUANTUM_CONF_DIR
- fi
- sudo chown `whoami` $QUANTUM_CONF_DIR
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
- Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
- Q_DB_NAME="ovs_quantum"
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
- Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
- Q_DB_NAME="quantum_linux_bridge"
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu
- Q_PLUGIN_CONF_FILENAME=ryu.ini
- Q_DB_NAME="ovs_quantum"
- Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2"
- fi
-
- if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
- echo "Quantum plugin not set.. exiting"
- exit 1
- fi
-
- # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
- mkdir -p /$Q_PLUGIN_CONF_PATH
- Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
- cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
-
- database_connection_url dburl $Q_DB_NAME
- iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
- unset dburl
-
- cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
- configure_quantum_rootwrap
+ configure_quantum
+ init_quantum
fi
-# Quantum service (for controller node)
-if is_service_enabled q-svc; then
- Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini
- Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json
-
- cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
- cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
-
- if is_service_enabled $DATABASE_BACKENDS; then
- recreate_database $Q_DB_NAME utf8
- else
- echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
- fi
-
- # Update either configuration file with plugin
- iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
-
- iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
- quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken
-
- # Configure plugin
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
- iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
- elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
- else
- echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
- fi
-
- # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc``
- # for more complex physical network configurations.
- if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
- OVS_VLAN_RANGES=$PHYSICAL_NETWORK
- if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
- OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE
- fi
- fi
- if [[ "$OVS_VLAN_RANGES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
- fi
-
- # Enable tunnel networks if selected
- if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
- fi
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
- else
- echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
- fi
-
- # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc``
- # for more complex physical network configurations.
- if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
- LB_VLAN_RANGES=$PHYSICAL_NETWORK
- if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
- LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE
- fi
- fi
- if [[ "$LB_VLAN_RANGES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
- fi
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT
- iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
- fi
-fi
-
-# Quantum agent (for compute nodes)
-if is_service_enabled q-agt; then
- # Configure agent for plugin
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- # Setup integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
- quantum_setup_ovs_bridge $OVS_BRIDGE
-
- # Setup agent for tunneling
- if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
- # Verify tunnels are supported
- # REVISIT - also check kernel module support for GRE and patch ports
- OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
- if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
- echo "You are running OVS version $OVS_VERSION."
- echo "OVS 1.4+ is required for tunneling between multiple hosts."
- exit 1
- fi
- iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
- iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
- fi
-
- # Setup physical network bridge mappings. Override
- # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
- # complex physical network configurations.
- if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
- OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
-
- # Configure bridge manually with physical interface as port for multi-node
- sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
- fi
- if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
- fi
- AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- # Setup physical network interface mappings. Override
- # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
- # complex physical network configurations.
- if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
- LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
- fi
- if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
- fi
- AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- # Set up integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
- quantum_setup_ovs_bridge $OVS_BRIDGE
- if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
- sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
- fi
- AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
- fi
- # Update config w/rootwrap
- iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-fi
-
-# Quantum DHCP
-if is_service_enabled q-dhcp; then
- AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent"
-
- Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini
-
- cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
-
- # Set verbose
- iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_DHCP_CONF_FILE DEFAULT debug True
- iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
-
- # Update config w/rootwrap
- iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- fi
-fi
-
-# Quantum L3
-if is_service_enabled q-l3; then
- AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
- PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
- Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
-
- cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
-
- # Set verbose
- iniset $Q_L3_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_L3_CONF_FILE DEFAULT debug True
-
- iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
-
- iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
- if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- # Set up external bridge
- quantum_setup_external_bridge $PUBLIC_BRIDGE
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ''
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- # Set up external bridge
- quantum_setup_external_bridge $PUBLIC_BRIDGE
- fi
-fi
-
-#Quantum Metadata
-if is_service_enabled q-meta; then
- AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent"
- Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini
-
- cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
-
- # Set verbose
- iniset $Q_META_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_META_CONF_FILE DEFAULT debug True
-
- iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
-
- iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
-fi
-
-# Quantum RPC support - must be updated prior to starting any of the services
+# Some Quantum plugins require network controllers which are not
+# a part of the OpenStack project. Configure and start them.
if is_service_enabled quantum; then
- iniset $QUANTUM_CONF DEFAULT control_exchange quantum
- if is_service_enabled qpid ; then
- iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
- elif is_service_enabled zeromq; then
- iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq
- elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST
- iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
- fi
- if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
- cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
- quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
- if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge ''
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- fi
- fi
+ configure_quantum_third_party
+ init_quantum_third_party
+ start_quantum_third_party
fi
@@ -1412,9 +1019,9 @@
# Delete traces of nova networks from prior runs
sudo killall dnsmasq || true
clean_iptables
- rm -rf $NOVA_STATE_PATH/networks
- mkdir -p $NOVA_STATE_PATH/networks
-
+ rm -rf ${NOVA_STATE_PATH}/networks
+ sudo mkdir -p ${NOVA_STATE_PATH}/networks
+ sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks
# Force IP forwarding on, just on case
sudo sysctl -w net.ipv4.ip_forward=1
fi
@@ -1445,45 +1052,17 @@
# Additional Nova configuration that is dependent on other services
if is_service_enabled quantum; then
- add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
- add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
- add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
- add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
- add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
- add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"}
- add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
- add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
- add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
- fi
- add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
- add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
- if is_service_enabled q-meta; then
- add_nova_opt "service_quantum_metadata_proxy=True"
- fi
+ create_nova_conf_quantum
elif is_service_enabled n-net; then
- add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
- add_nova_opt "public_interface=$PUBLIC_INTERFACE"
- add_nova_opt "vlan_interface=$VLAN_INTERFACE"
- add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
- if [ -n "$FLAT_INTERFACE" ]; then
- add_nova_opt "flat_interface=$FLAT_INTERFACE"
- fi
+ create_nova_conf_nova_network
fi
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
- add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL"
+ iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
- add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL"
+ iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
fi
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
@@ -1493,18 +1072,18 @@
# Address on which instance vncservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
- add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN"
- add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
- add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST"
+ iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
+ iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+ iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
if is_service_enabled zeromq; then
- add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq"
+ iniset $NOVA_CONF DEFAULT rpc_backend "nova.openstack.common.rpc.impl_zmq"
elif is_service_enabled qpid; then
- add_nova_opt "rpc_backend=nova.rpc.impl_qpid"
+ iniset $NOVA_CONF DEFAULT rpc_backend "nova.rpc.impl_qpid"
elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- add_nova_opt "rabbit_host=$RABBIT_HOST"
- add_nova_opt "rabbit_password=$RABBIT_PASSWORD"
+ iniset $NOVA_CONF DEFAULT rabbit_host "$RABBIT_HOST"
+ iniset $NOVA_CONF DEFAULT rabbit_password "$RABBIT_PASSWORD"
fi
- add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
+ iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
# XenServer
@@ -1513,32 +1092,70 @@
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
echo_summary "Using XenServer virtualization driver"
read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
- add_nova_opt "compute_driver=xenapi.XenAPIDriver"
+ iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
XENAPI_USER=${XENAPI_USER:-"root"}
- add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL"
- add_nova_opt "xenapi_connection_username=$XENAPI_USER"
- add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD"
- add_nova_opt "flat_injected=False"
+ iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL"
+ iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER"
+ iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD"
+ iniset $NOVA_CONF DEFAULT flat_injected "False"
# Need to avoid crash due to new firewall support
XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
+ iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
+
+ # OpenVZ
+ # ------
+
elif [ "$VIRT_DRIVER" = 'openvz' ]; then
echo_summary "Using OpenVZ virtualization driver"
# TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
# Replace connection_type when this is fixed.
- # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection"
- add_nova_opt "connection_type=openvz"
+ # iniset $NOVA_CONF DEFAULT compute_driver "openvz.connection.OpenVzConnection"
+ iniset $NOVA_CONF DEFAULT connection_type "openvz"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
+
+ # Bare Metal
+ # ----------
+
+ elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ echo_summary "Using BareMetal driver"
+ LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+ iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver
+ iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+ iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager
+ iniset $NOVA_CONF DEFAULT scheduler_default_filters AllHostsFilter
+ iniset $NOVA_CONF baremetal driver $BM_DRIVER
+ iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH
+ iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER
+ iniset $NOVA_CONF baremetal tftp_root /tftpboot
+
+ # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
+ for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
+ # Attempt to convert flags to options
+ iniset $NOVA_CONF baremetal ${I//=/ }
+ done
+
+ # Default
+ # -------
+
else
echo_summary "Using libvirt virtualization driver"
- add_nova_opt "compute_driver=libvirt.LibvirtDriver"
+ iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
fi
fi
+# Extra things to prepare nova for baremetal, before nova starts
+if is_service_enabled nova && is_baremetal; then
+ echo_summary "Preparing for nova baremetal"
+ prepare_baremetal_toolchain
+ configure_baremetal_nova_dirs
+ if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
+ create_fake_baremetal_env
+ fi
+fi
# Launch Services
# ===============
@@ -1564,9 +1181,9 @@
CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
- add_nova_opt "s3_access_key=$ACCESS_KEY"
- add_nova_opt "s3_secret_key=$SECRET_KEY"
- add_nova_opt "s3_affix_tenant=True"
+ iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY"
+ iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY"
+ iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
fi
screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
@@ -1574,74 +1191,29 @@
# Launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
echo_summary "Starting Nova API"
- screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
- echo "Waiting for nova-api to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
- echo "nova-api did not start"
- exit 1
- fi
+ start_nova_api
fi
if is_service_enabled q-svc; then
echo_summary "Starting Quantum"
- # Start the Quantum service
- screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- echo "Waiting for Quantum to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then
- echo "Quantum did not start"
- exit 1
- fi
- # Configure Quantum elements
- # Configure internal network & subnet
-
- TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
-
- # Create a small network
- # Since quantum command is executed in admin context at this point,
- # ``--tenant_id`` needs to be specified.
- NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
- SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
- if is_service_enabled q-l3; then
- # Create a router, and add the private subnet as one of its interfaces
- ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
- quantum router-interface-add $ROUTER_ID $SUBNET_ID
- # Create an external network, and a subnet. Configure the external network as router gw
- EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
- EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
- quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
- if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- CIDR_LEN=${FLOATING_RANGE#*/}
- sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
- sudo ip link set $PUBLIC_BRIDGE up
- ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
- sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
- fi
- if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
- # Explicitly set router id in l3 agent configuration
- iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
- fi
- fi
- if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
- setup_quantum
- fi
+ start_quantum_service_and_check
+ create_quantum_initial_network
+ setup_quantum_debug
elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
# Create a small network
$NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
# Create some floating ips
- $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK
+ $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
# Create a second pool
$NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
-# Start up the quantum agents if enabled
-screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
-screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
-screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
-
+if is_service_enabled quantum; then
+ start_quantum_agents
+fi
if is_service_enabled nova; then
echo_summary "Starting Nova"
start_nova
@@ -1653,7 +1225,9 @@
if is_service_enabled ceilometer; then
echo_summary "Configuring Ceilometer"
configure_ceilometer
+ configure_ceilometerclient
echo_summary "Starting Ceilometer"
+ init_ceilometer
start_ceilometer
fi
@@ -1672,6 +1246,17 @@
start_heat
fi
+# Create account rc files
+# =======================
+
+# Creates source able script files for easier user switching.
+# This step also creates certificates for tenants and users,
+# which is helpful in image bundle steps.
+
+if is_service_enabled nova && is_service_enabled key; then
+ $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc
+fi
+
# Install Images
# ==============
@@ -1687,19 +1272,56 @@
# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then
- echo_summary "Uploading images"
TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
- # Option to upload legacy ami-tty, which works with xenserver
- if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
- IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
- fi
+ if is_baremetal; then
+ echo_summary "Creating and uploading baremetal images"
- for image_url in ${IMAGE_URLS//,/ }; do
- upload_image $image_url $TOKEN
- done
+ # build and upload separate deploy kernel & ramdisk
+ upload_baremetal_deploy $TOKEN
+
+ # upload images, separating out the kernel & ramdisk for PXE boot
+ for image_url in ${IMAGE_URLS//,/ }; do
+ upload_baremetal_image $image_url $TOKEN
+ done
+ else
+ echo_summary "Uploading images"
+
+ # Option to upload legacy ami-tty, which works with xenserver
+ if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+ IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
+ fi
+
+ for image_url in ${IMAGE_URLS//,/ }; do
+ upload_image $image_url $TOKEN
+ done
+ fi
fi
+# If we are running nova with baremetal driver, there are a few
+# last-mile configuration bits to attend to, which must happen
+# after n-api and n-sch have started.
+# Also, creating the baremetal flavor must happen after images
+# are loaded into glance, though just knowing the IDs is sufficient here
+if is_service_enabled nova && is_baremetal; then
+ # create special flavor for baremetal if we know what images to associate
+ [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \
+ create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
+
+ # otherwise user can manually add it later by calling nova-baremetal-manage
+ # otherwise user can manually add it later by calling nova-baremetal-manage
+ [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node
+
+ # NOTE: we do this here to ensure that our copy of dnsmasq is running
+ sudo pkill dnsmasq || true
+ sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
+ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
+ --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE
+
+ # ensure callback daemon is running
+ sudo pkill nova-baremetal-deploy-helper || true
+ screen_it baremetal "nova-baremetal-deploy-helper"
+fi
# Configure Tempest last to ensure that the runtime configuration of
# the various OpenStack services can be queried.
diff --git a/stackrc b/stackrc
index 9e06028..4e03a2f 100644
--- a/stackrc
+++ b/stackrc
@@ -33,6 +33,10 @@
CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git
CEILOMETER_BRANCH=master
+# ceilometer client library
+CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient
+CEILOMETERCLIENT_BRANCH=master
+
# volume service
CINDER_REPO=${GIT_BASE}/openstack/cinder
CINDER_BRANCH=master
@@ -111,6 +115,17 @@
RYU_REPO=https://github.com/osrg/ryu.git
RYU_BRANCH=master
+# diskimage-builder
+BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git
+BM_IMAGE_BUILD_BRANCH=master
+
+# bm_poseur
+# Used to simulate a hardware environment for baremetal
+# Only used if BM_USE_FAKE_ENV is set
+BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git
+BM_POSEUR_BRANCH=master
+
+
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC** or **OpenVZ** based system.
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
new file mode 100755
index 0000000..55cb8fa
--- /dev/null
+++ b/tools/create_userrc.sh
@@ -0,0 +1,254 @@
+#!/usr/bin/env bash
+
+#Warning: This script just for development purposes
+
+ACCOUNT_DIR=./accrc
+
+display_help()
+{
+cat <<EOF
+
+usage: $0 <options..>
+
+This script creates certificates and sourcable rc files per tenant/user.
+
+Target account directory hierarchy:
+target_dir-|
+ |-cacert.pem
+ |-tenant1-name|
+ | |- user1
+ | |- user1-cert.pem
+ | |- user1-pk.pem
+ | |- user2
+ | ..
+ |-tenant2-name..
+ ..
+
+Optional Arguments
+-P include password to the rc files; with -A it assume all users password is the same
+-A try with all user
+-u <username> create files just for the specified user
+-C <tanent_name> create user and tenant, the specifid tenant will be the user's tenant
+-r <name> when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member)
+-p <userpass> password for the user
+--os-username <username>
+--os-password <admin password>
+--os-tenant-name <tenant_name>
+--os-tenant-id <tenant_id>
+--os-auth-url <auth_url>
+--target-dir <target_directory>
+--skip-tenant <tenant-name>
+--debug
+
+Example:
+$0 -AP
+$0 -P -C mytenant -u myuser -p mypass
+EOF
+}
+
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@")
+then
+ #parse error
+ display_help
+ exit 1
+fi
+eval set -- $options
+ADDPASS=""
+
+# The services users usually in the service tenant.
+# rc files for service users, is out of scope.
+# Supporting different tanent for services is out of scope.
+SKIP_TENANT=",service," # tenant names are between commas(,)
+MODE=""
+ROLE=Member
+USER_NAME=""
+USER_PASS=""
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ -h|--help) display_help; exit 0 ;;
+ --os-username) export OS_USERNAME=$2; shift ;;
+ --os-password) export OS_PASSWORD=$2; shift ;;
+ --os-tenant-name) export OS_TENANT_NAME=$2; shift ;;
+ --os-tenant-id) export OS_TENANT_ID=$2; shift ;;
+ --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
+ --os-auth-url) export OS_AUTH_URL=$2; shift ;;
+ --target-dir) ACCOUNT_DIR=$2; shift ;;
+ --debug) set -o xtrace ;;
+ -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;;
+ -p) USER_PASS=$2; shift ;;
+ -A) MODE=all; ;;
+ -P) ADDPASS="yes" ;;
+ -C) MODE=create; TENANT=$2; shift ;;
+ -r) ROLE=$2; shift ;;
+ (--) shift; break ;;
+ (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;;
+ (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;;
+ esac
+ shift
+done
+
+if [ -z "$OS_PASSWORD" ]; then
+ if [ -z "$ADMIN_PASSWORD" ];then
+ echo "The admin password is required option!" >&2
+ exit 2
+ else
+ OS_PASSWORD=$ADMIN_PASSWORD
+ fi
+fi
+
+if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then
+ export OS_TENANT_NAME=admin
+fi
+
+if [ -z "$OS_USERNAME" ]; then
+ export OS_USERNAME=admin
+fi
+
+if [ -z "$OS_AUTH_URL" ]; then
+ export OS_AUTH_URL=http://localhost:5000/v2.0/
+fi
+
+USER_PASS=${USER_PASS:-$OS_PASSWORD}
+USER_NAME=${USER_NAME:-$OS_USERNAME}
+
+if [ -z "$MODE" ]; then
+ echo "You must specify at least -A or -u parameter!" >&2
+ echo
+ display_help
+ exit 3
+fi
+
+export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
+
+EC2_URL=http://localhost:8773/service/Cloud
+S3_URL=http://localhost:3333
+
+ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'`
+[ -n "$ec2" ] && EC2_URL=$ec2
+
+s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'`
+[ -n "$s3" ] && S3_URL=$s3
+
+
+mkdir -p "$ACCOUNT_DIR"
+ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"`
+EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem
+mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null
+if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then
+ echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2
+ mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null
+fi
+
+
+function add_entry(){
+ local user_id=$1
+ local user_name=$2
+ local tenant_id=$3
+ local tenant_name=$4
+ local user_passwd=$5
+
+ # The admin user can see all user's secret AWS keys, it does not looks good
+ local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1`
+ if [ -z "$line" ]; then
+ keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2
+ line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1`
+ fi
+ local ec2_access_key ec2_secret_key
+ read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'`
+ mkdir -p "$ACCOUNT_DIR/$tenant_name"
+ local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name"
+ # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN
+ # Generally the subject DN parts should be in reverse order like the Issuer
+ # The Serial does not seams correctly marked either
+ local ec2_cert="$rcfile-cert.pem"
+ local ec2_private_key="$rcfile-pk.pem"
+ # Try to preserve the original file on fail (best effort)
+ mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null
+ mv "$ec2_cert" "$ec2_cert.old" &>/dev/null
+ # It will not create certs when the password is incorrect
+ if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then
+ mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null
+ mv "$ec2_cert.old" "$ec2_cert" &>/dev/null
+ fi
+ cat >"$rcfile" <<EOF
+# you can source this file
+export EC2_ACCESS_KEY="$ec2_access_key"
+export EC2_SECRET_KEY="$ec2_secret_key"
+export EC2_URL="$EC2_URL"
+export S3_URL="$S3_URL"
+# OpenStack USER ID = $user_id
+export OS_USERNAME="$user_name"
+# Openstack Tenant ID = $tenant_id
+export OS_TENANT_NAME="$tenant_name"
+export OS_AUTH_URL="$OS_AUTH_URL"
+export EC2_CERT="$ec2_cert"
+export EC2_PRIVATE_KEY="$ec2_private_key"
+export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
+export EUCALYPTUS_CERT="$ACCOUNT_DIR/cacert.pem"
+export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
+EOF
+ if [ -n "$ADDPASS" ]; then
+ echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
+ fi
+}
+
+#admin users expected
+function create_or_get_tenant(){
+ local tenant_name=$1
+ local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'`
+ if [ -n "$tenant_id" ]; then
+ echo $tenant_id
+ else
+ keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'
+ fi
+}
+
+function create_or_get_role(){
+ local role_name=$1
+ local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'`
+ if [ -n "$role_id" ]; then
+ echo $role_id
+ else
+ keystone role-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'
+ fi
+}
+
+# Provides empty string when the user does not exists
+function get_user_id(){
+ local user_name=$1
+ keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}'
+}
+
+if [ $MODE != "create" ]; then
+# looks like I can't ask for all tenant related to a specified user
+ for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do
+ read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'`
+ if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then
+ continue;
+ fi
+ for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do
+ read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'`
+ if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then
+ continue;
+ fi
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ done
+ done
+else
+ tenant_name=$TENANT
+ tenant_id=`create_or_get_tenant "$TENANT"`
+ user_name=$USER_NAME
+ user_id=`get_user_id $user_name`
+ if [ -z "$user_id" ]; then
+ #new user
+ user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'`
+ #The password is in the cmd line. It is not a good thing
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ else
+ #new role
+ role_id=`create_or_get_role "$ROLE"`
+ keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id"
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ fi
+fi
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index c78c6f2..e270e59 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -376,35 +376,22 @@
sleep 10
done
- # output the run.sh.log
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' &
- TAIL_PID=$!
-
- function kill_tail() {
- kill -9 $TAIL_PID
- exit 1
- }
- # Let Ctrl-c kill tail and exit
- trap kill_tail SIGINT
-
- # ensure we kill off the tail if we exit the script early
- # for other reasons
- add_on_exit "kill -9 $TAIL_PID || true"
-
- # wait silently until stack.sh has finished
- set +o xtrace
- while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do
+ set +x
+ echo -n "Waiting for startup script to finish"
+ while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ]
+ do
sleep 10
+ echo -n "."
done
- set -o xtrace
+ echo "done!"
+ set -x
- # kill the tail process now stack.sh has finished
- kill -9 $TAIL_PID
+ # output the run.sh.log
+ ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log'
- # check for a failure
- if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then
- exit 1
- fi
+ # Fail if the expected text is not found
+ ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in'
+
echo "################################################################################"
echo ""
echo "All Finished!"
diff --git a/unstack.sh b/unstack.sh
index 09e0de6..fd70916 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -25,9 +25,11 @@
DATA_DIR=${DATA_DIR:-${DEST}/data}
# Get project function libraries
+source $TOP_DIR/lib/baremetal
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/quantum
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -39,8 +41,7 @@
if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
source $TOP_DIR/openrc
- source $TOP_DIR/lib/quantum
- teardown_quantum
+ teardown_quantum_debug
fi
# Shut down devstack's screen to get the bulk of OpenStack services in one shot
@@ -67,40 +68,16 @@
killall stud
fi
+# baremetal might have created a fake environment
+if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
+ cleanup_fake_baremetal_env
+fi
+
SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
# Get the iSCSI volumes
if is_service_enabled cinder; then
- TARGETS=$(sudo tgtadm --op show --mode target)
- if [ $? -ne 0 ]; then
- # If tgt driver isn't running this won't work obviously
- # So check the response and restart if need be
- echo "tgtd seems to be in a bad state, restarting..."
- if is_ubuntu; then
- restart_service tgt
- else
- restart_service tgtd
- fi
- TARGETS=$(sudo tgtadm --op show --mode target)
- fi
-
- if [[ -n "$TARGETS" ]]; then
- iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
- for i in "${iqn_list[@]}"; do
- echo removing iSCSI target: $i
- sudo tgt-admin --delete $i
- done
- fi
-
- if is_service_enabled cinder; then
- sudo rm -rf $CINDER_STATE_PATH/volumes/*
- fi
-
- if is_ubuntu; then
- stop_service tgt
- else
- stop_service tgtd
- fi
+ cleanup_cinder
fi
if [[ -n "$UNSTACK_ALL" ]]; then
@@ -119,8 +96,7 @@
fi
fi
-# Quantum dhcp agent runs dnsmasq
-if is_service_enabled q-dhcp; then
- pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }')
- [ ! -z "$pid" ] && sudo kill -9 $pid
+if is_service_enabled quantum; then
+ stop_quantum
+ stop_quantum_third_party
fi