Merge "Create tools/install_prereqs.sh"
diff --git a/.gitignore b/.gitignore
index 17cb38c..5e770c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,4 @@
files/images
stack-screenrc
*.pem
+accrc
diff --git a/AUTHORS b/AUTHORS
index cd0acac..ba68e32 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,6 +1,7 @@
Aaron Lee <aaron.lee@rackspace.com>
Aaron Rosen <arosen@nicira.com>
Adam Gandelman <adamg@canonical.com>
+Akihiro MOTOKI <motoki@da.jp.nec.com>
Andrew Laski <andrew.laski@rackspace.com>
Andy Smith <github@anarkystic.com>
Anthony Young <sleepsonthefloor@gmail.com>
diff --git a/HACKING.rst b/HACKING.rst
index e8f90c7..c4641fa 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -15,6 +15,16 @@
tracks the OpenStack trunk branches a separate branch is maintained for all
OpenStack releases starting with Diablo (stable/diablo).
+Contributing code to DevStack follows the usual OpenStack process as described
+in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__
+contains the usual links for blueprints, bugs, tec.
+
+__ contribute_
+.. _contribute: http://wiki.openstack.org/HowToContribute.
+
+__ lp_
+.. _lp: https://launchpad.net/~devstack
+
The primary script in DevStack is ``stack.sh``, which performs the bulk of the
work for DevStack's use cases. There is a subscript ``functions`` that contains
generally useful shell functions and is used by a number of the scripts in
@@ -53,8 +63,8 @@
source $TOP_DIR/openrc
``stack.sh`` is a rather large monolithic script that flows through from beginning
-to end. The process of breaking it down into project-level sub-scripts has begun
-with the introduction of ``lib/cinder`` and ``lib/ceilometer``.
+to end. The process of breaking it down into project-level sub-scripts is nearly
+complete and should make ``stack.sh`` easier to read and manage.
These library sub-scripts have a number of fixed entry points, some of which may
just be stubs. These entry points will be called by ``stack.sh`` in the
@@ -71,6 +81,12 @@
service sub-scripts. The comments in ``<>`` are meta comments describing
how to use the template and should be removed.
+In order to show the dependencies and conditions under which project functions
+are executed the top-level conditional testing for things like ``is_service_enabled``
+should be done in ``stack.sh``. There may be nested conditionals that need
+to be in the sub-script, such as testing for keystone being enabled in
+``configure_swift()``.
+
Documentation
-------------
diff --git a/exerciserc b/exerciserc
index 82c74b7..c26ec2c 100644
--- a/exerciserc
+++ b/exerciserc
@@ -26,3 +26,7 @@
# Max time to wait for a euca-delete command to propogate
export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60}
+
+# The size of the volume we want to boot from; some storage back-ends
+# do not allow a disk resize, so it's important that this can be tuned
+export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1}
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index adc3393..deb1a03 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -99,8 +99,8 @@
META_DATA_2_KEY=foo
META_DATA_3_KEY=bar
-#ensure no metadata is set
-nova aggregate-details $AGGREGATE_ID | grep {}
+#ensure no additional metadata is set
+nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
@@ -117,7 +117,7 @@
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep {}
+nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
# Test aggregate-add/remove-host
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 5ebdecc..5ada237 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -117,7 +117,7 @@
fi
# Create the bootable volume
-cinder create --display_name=$VOL_NAME --image-id $IMAGE 1
+cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE
# Wait for volume to activate
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index daff5f9..12f2732 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -51,7 +51,7 @@
truncate -s 5M /tmp/$IMAGE
euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE"
-euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
+euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2`
die_if_not_set AMI "Failure registering $BUCKET/$IMAGE"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 982653e..76df254 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -165,8 +165,11 @@
euca-terminate-instances $INSTANCE || \
die "Failure terminating instance $INSTANCE"
-# Assure it has terminated within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then
+# Assure it has terminated within a reasonable time. The behaviour of this
+# case changed with bug/836978. Requesting the status of an invalid instance
+# will now return an error message including the instance id, so we need to
+# filter that out.
+if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then
echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
exit 1
fi
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index 493e223..bc33fe8 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -1,10 +1,9 @@
#!/usr/bin/env bash
#
-# **quantum.sh**
+# **quantum-adv-test.sh**
-# We will use this test to perform integration testing of nova and
-# other components with Quantum.
+# Perform integration testing of Nova and other components with Quantum.
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
@@ -14,6 +13,7 @@
# only the first error that occured.
set -o errtrace
+
trap failed ERR
failed() {
local r=$?
@@ -30,17 +30,8 @@
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
-#------------------------------------------------------------------------------
-# Quantum config check
-#------------------------------------------------------------------------------
-# Warn if quantum is not enabled
-if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then
- echo "WARNING: Running quantum test without enabling quantum"
-fi
-
-#------------------------------------------------------------------------------
# Environment
-#------------------------------------------------------------------------------
+# -----------
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
@@ -62,9 +53,8 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
-#------------------------------------------------------------------------------
-# Test settings for quantum
-#------------------------------------------------------------------------------
+# Quantum Settings
+# ----------------
TENANTS="DEMO1"
# TODO (nati)_Test public network
@@ -106,24 +96,17 @@
DEMO1_ROUTER1_NET="demo1-net1"
DEMO2_ROUTER1_NET="demo2-net1"
-#------------------------------------------------------------------------------
-# Keystone settings.
-#------------------------------------------------------------------------------
KEYSTONE="keystone"
-#------------------------------------------------------------------------------
-# Get a token for clients that don't support service catalog
-#------------------------------------------------------------------------------
-
-# manually create a token by querying keystone (sending JSON data). Keystone
+# Manually create a token by querying keystone (sending JSON data). Keystone
# returns a token and catalog of endpoints. We use python to parse the token
# and save it.
TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'`
-#------------------------------------------------------------------------------
-# Various functions.
-#------------------------------------------------------------------------------
+# Various functions
+# -----------------
+
function foreach_tenant {
COMMAND=$1
for TENANT in ${TENANTS//,/ };do
@@ -192,10 +175,9 @@
function confirm_server_active {
local VM_UUID=$1
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server '$VM_UUID' did not become active!"
- false
-fi
-
+ echo "server '$VM_UUID' did not become active!"
+ false
+ fi
}
function add_tenant {
@@ -214,23 +196,15 @@
function remove_tenant {
local TENANT=$1
local TENANT_ID=$(get_tenant_id $TENANT)
-
$KEYSTONE tenant-delete $TENANT_ID
}
function remove_user {
local USER=$1
local USER_ID=$(get_user_id $USER)
-
$KEYSTONE user-delete $USER_ID
}
-
-
-#------------------------------------------------------------------------------
-# "Create" functions
-#------------------------------------------------------------------------------
-
function create_tenants {
source $TOP_DIR/openrc admin admin
add_tenant demo1 demo1 demo1
@@ -383,9 +357,9 @@
delete_all
}
-#------------------------------------------------------------------------------
-# Test functions.
-#------------------------------------------------------------------------------
+# Test functions
+# --------------
+
function test_functions {
IMAGE=$(get_image_id)
echo $IMAGE
@@ -400,9 +374,9 @@
echo $NETWORK_ID
}
-#------------------------------------------------------------------------------
-# Usage and main.
-#------------------------------------------------------------------------------
+# Usage and main
+# --------------
+
usage() {
echo "$0: [-h]"
echo " -h, --help Display help message"
@@ -473,10 +447,9 @@
fi
}
+# Kick off script
+# ---------------
-#-------------------------------------------------------------------------------
-# Kick off script.
-#-------------------------------------------------------------------------------
echo $*
main $*
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 42f9cb4..48a976e 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -142,7 +142,7 @@
fi
# Create a new volume
-cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
+cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE
if [[ $? != 0 ]]; then
echo "Failure creating volume $VOL_NAME"
exit 1
diff --git a/files/apts/baremetal b/files/apts/baremetal
new file mode 100644
index 0000000..54e76e0
--- /dev/null
+++ b/files/apts/baremetal
@@ -0,0 +1,9 @@
+busybox
+dnsmasq
+gcc
+ipmitool
+make
+open-iscsi
+qemu-kvm
+syslinux
+tgt
diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy
new file mode 100644
index 0000000..0a44015
--- /dev/null
+++ b/files/apts/tls-proxy
@@ -0,0 +1 @@
+stud # only available in dist:precise,quantal
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 32d4e1a..4c76c9b 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -5,9 +5,7 @@
# Tenant User Roles
# ------------------------------------------------------------------
# service glance admin
-# service quantum admin # if enabled
# service swift admin # if enabled
-# service cinder admin # if enabled
# service heat admin # if enabled
# service ceilometer admin # if enabled
# Tempest Only:
@@ -38,6 +36,7 @@
# Lookups
SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }")
# Roles
@@ -49,6 +48,7 @@
# role is also configurable in swift-proxy.conf
RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+
# Services
# --------
@@ -71,6 +71,8 @@
keystone user-role-add --tenant_id $SERVICE_TENANT \
--user_id $HEAT_USER \
--role_id $ADMIN_ROLE
+ # heat_stack_user role is for users created by Heat
+ keystone role-create --name heat_stack_user
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
HEAT_CFN_SERVICE=$(get_id keystone service-create \
--name=heat-cfn \
@@ -145,30 +147,6 @@
fi
fi
-if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
- QUANTUM_USER=$(get_id keystone user-create \
- --name=quantum \
- --pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=quantum@example.com)
- keystone user-role-add \
- --tenant_id $SERVICE_TENANT \
- --user_id $QUANTUM_USER \
- --role_id $ADMIN_ROLE
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- QUANTUM_SERVICE=$(get_id keystone service-create \
- --name=quantum \
- --type=network \
- --description="Quantum Service")
- keystone endpoint-create \
- --region RegionOne \
- --service_id $QUANTUM_SERVICE \
- --publicurl "http://$SERVICE_HOST:9696/" \
- --adminurl "http://$SERVICE_HOST:9696/" \
- --internalurl "http://$SERVICE_HOST:9696/"
- fi
-fi
-
if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then
CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \
--pass="$SERVICE_PASSWORD" \
@@ -241,25 +219,3 @@
--user_id $ALT_DEMO_USER \
--role_id $MEMBER_ROLE
fi
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- CINDER_USER=$(get_id keystone user-create --name=cinder \
- --pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=cinder@example.com)
- keystone user-role-add --tenant_id $SERVICE_TENANT \
- --user_id $CINDER_USER \
- --role_id $ADMIN_ROLE
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- CINDER_SERVICE=$(get_id keystone service-create \
- --name=cinder \
- --type=volume \
- --description="Cinder Service")
- keystone endpoint-create \
- --region RegionOne \
- --service_id $CINDER_SERVICE \
- --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
- --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
- --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
- fi
-fi
diff --git a/functions b/functions
index 7de5a44..23aee93 100644
--- a/functions
+++ b/functions
@@ -736,6 +736,8 @@
function screen_it {
NL=`echo -ne '\015'`
SCREEN_NAME=${SCREEN_NAME:-stack}
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
if is_service_enabled $1; then
# Append the service to the screen rc file
screen_rc "$1" "$2"
@@ -751,7 +753,7 @@
screen -S $SCREEN_NAME -p $1 -X log on
ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
fi
- screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL"
+ screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
fi
}
@@ -776,6 +778,47 @@
fi
}
+# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME
+# This is used for service_check when all the screen_it are called finished
+# init_service_check
+function init_service_check() {
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+ if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+ mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
+ fi
+
+ rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
+}
+
+# Helper to get the status of each running service
+# service_check
+function service_check() {
+ local service
+ local failures
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+
+ if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+ echo "No service status directory found"
+ return
+ fi
+
+ # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
+ failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null`
+
+ for service in $failures; do
+ service=`basename $service`
+ service=${service::-8}
+ echo "Error: Service $service is not running"
+ done
+
+ if [ -n "$failures" ]; then
+ echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
+ fi
+}
# ``pip install`` the dependencies of the package before ``setup.py develop``
# so pip and not distutils processes the dependency chain
@@ -974,6 +1017,14 @@
return 0
}
+# Wait for an HTTP server to start answering requests
+# wait_for_service timeout url
+function wait_for_service() {
+ local timeout=$1
+ local url=$2
+ timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done"
+}
+
# Wrapper for ``yum`` to set proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy`
# yum_install package [package ...]
diff --git a/lib/baremetal b/lib/baremetal
new file mode 100644
index 0000000..62605fb
--- /dev/null
+++ b/lib/baremetal
@@ -0,0 +1,439 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+# This file provides devstack with the environment and utilities to
+# control nova-compute's baremetal driver.
+# It sets reasonable defaults to run within a single host,
+# using virtual machines in place of physical hardware.
+# However, by changing just a few options, devstack+baremetal can in fact
+# control physical hardware resources on the same network, if you know
+# the MAC address(es) and IPMI credentials.
+#
+# At a minimum, to enable the baremetal driver, you must set these in loclarc:
+# VIRT_DRIVER=baremetal
+# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal"
+#
+#
+# We utilize diskimage-builder to create a ramdisk, and then
+# baremetal driver uses that to push a disk image onto the node(s).
+#
+# Below we define various defaults which control the behavior of the
+# baremetal compute service, and inform it of the hardware it will contorl.
+#
+# Below that, various functions are defined, which are called by devstack
+# in the following order:
+#
+# before nova-cpu starts:
+# - prepare_baremetal_toolchain
+# - configure_baremetal_nova_dirs
+#
+# after nova and glance have started:
+# - build_and_upload_baremetal_deploy_k_and_r $token
+# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
+# - upload_baremetal_image $url $token
+# - add_baremetal_node <first_mac> <second_mac>
+
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Sub-driver settings
+# -------------------
+
+# sub-driver to use for kernel deployment
+# - nova.virt.baremetal.pxe.PXE
+# - nova.virt.baremetal.tilera.TILERA
+BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE}
+
+# sub-driver to use for remote power management
+# - nova.virt.baremetal.fake.FakePowerManager, for manual power control
+# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI
+# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
+BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager}
+
+
+# These should be customized to your environment and hardware
+# -----------------------------------------------------------
+
+# whether to create a fake environment, eg. for devstack-gate
+BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV`
+
+# Extra options to pass to bm_poseur
+# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1
+# change the virtualization type: --engine qemu
+BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
+
+# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
+if [ "$BM_USE_FAKE_ENV" ]; then
+ BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
+ BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
+else
+ BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
+ # if testing on a physical network,
+ # BM_DNSMASQ_RANGE must be changed to suit your network
+ BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
+fi
+
+# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot.
+# This is passed to dnsmasq along with the kernel/ramdisk to
+# deploy via PXE.
+BM_FIRST_MAC=${BM_FIRST_MAC:-}
+
+# BM_SECOND_MAC is only important if the host has >1 NIC.
+BM_SECOND_MAC=${BM_SECOND_MAC:-}
+
+# Hostname for the baremetal nova-compute node, if not run on this host
+BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)}
+
+# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI
+BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0}
+BM_PM_USER=${BM_PM_USER:-user}
+BM_PM_PASS=${BM_PM_PASS:-pass}
+
+# BM_FLAVOR_* options are arbitrary and not necessarily related to physical
+# hardware capacity. These can be changed if you are testing
+# BaremetalHostManager with multiple nodes and different flavors.
+BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64}
+BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1}
+BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024}
+BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10}
+BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0}
+BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1}
+BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small}
+BM_FLAVOR_ID=${BM_FLAVOR_ID:-11}
+BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH}
+
+
+# Below this, we set some path and filenames.
+# Defaults are probably sufficient.
+BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
+BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur}
+
+BM_HOST_CURRENT_KERNEL=$(uname -r)
+BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd}
+BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz}
+
+# If you need to add any extra flavors to the deploy ramdisk image
+# eg, specific network drivers, specify them here
+BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-}
+
+# set URL and version for google shell-in-a-box
+BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz}
+
+
+# Functions
+# ---------
+
+# Check if baremetal is properly enabled
+# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES
+# does not contain "baremetal"
+function is_baremetal() {
+ if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then
+ return 0
+ fi
+ return 1
+}
+
+# Install diskimage-builder and shell-in-a-box
+# so that we can build the deployment kernel & ramdisk
+function prepare_baremetal_toolchain() {
+ git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+ git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
+
+ local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
+ if [[ ! -e $DEST/$shellinabox_basename ]]; then
+ cd $DEST
+ wget $BM_SHELL_IN_A_BOX
+ fi
+ if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then
+ cd $DEST
+ tar xzf $shellinabox_basename
+ fi
+ if [[ ! $(which shellinaboxd) ]]; then
+ cd $DEST/${shellinabox_basename%%.tar.gz}
+ ./configure
+ make
+ sudo make install
+ fi
+}
+
+# set up virtualized environment for devstack-gate testing
+function create_fake_baremetal_env() {
+ local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
+ # TODO(deva): add support for >1 VM
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm
+ BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
+
+ # NOTE: there is currently a limitation in baremetal driver
+ # that requires second MAC even if it is not used.
+ # Passing a fake value allows this to work.
+ # TODO(deva): remove this after driver issue is fixed.
+ BM_SECOND_MAC='12:34:56:78:90:12'
+}
+
+function cleanup_fake_baremetal_env() {
+ local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
+ sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
+}
+
+# prepare various directories needed by baremetal hypervisor
+function configure_baremetal_nova_dirs() {
+ # ensure /tftpboot is prepared
+ sudo mkdir -p /tftpboot
+ sudo mkdir -p /tftpboot/pxelinux.cfg
+ sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
+ sudo chown -R `whoami`:libvirtd /tftpboot
+
+ # ensure $NOVA_STATE_PATH/baremetal is prepared
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal/console
+ sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq
+ sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host
+ sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal
+
+ # ensure dnsmasq is installed but not running
+ # because baremetal driver will reconfigure and restart this as needed
+ if [ ! is_package_installed dnsmasq ]; then
+ install_package dnsmasq
+ fi
+ stop_service dnsmasq
+}
+
+# build deploy kernel+ramdisk, then upload them to glance
+# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID
+function upload_baremetal_deploy() {
+ token=$1
+
+ if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then
+ sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL
+ sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL
+ fi
+ if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then
+ $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \
+ -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL
+ fi
+
+ # load them into glance
+ BM_DEPLOY_KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $BM_DEPLOY_KERNEL \
+ --public --disk-format=aki \
+ < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
+ BM_DEPLOY_RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $BM_DEPLOY_RAMDISK \
+ --public --disk-format=ari \
+ < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
+}
+
+# create a basic baremetal flavor, associated with deploy kernel & ramdisk
+#
+# Usage: create_baremetal_flavor <aki_uuid> <ari_uuid>
+function create_baremetal_flavor() {
+ aki=$1
+ ari=$2
+ nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
+ $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
+ nova-manage instance_type set_key \
+ --name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH
+ nova-manage instance_type set_key \
+ --name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki
+ nova-manage instance_type set_key \
+ --name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari
+}
+
+# pull run-time kernel/ramdisk out of disk image and load into glance
+# note that $file is currently expected to be in qcow2 format
+# Sets KERNEL_ID and RAMDISK_ID
+#
+# Usage: extract_and_upload_k_and_r_from_image $token $file
+function extract_and_upload_k_and_r_from_image() {
+ token=$1
+ file=$2
+ image_name=$(basename "$file" ".qcow2")
+
+ # this call returns the file names as "$kernel,$ramdisk"
+ out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
+ -x -d $TOP_DIR/files -o bm-deploy -i $file)
+ if [ $? -ne 0 ]; then
+ die "Failed to get kernel and ramdisk from $file"
+ fi
+ XTRACE=$(set +o | grep xtrace)
+ set +o xtrace
+ out=$(echo "$out" | tail -1)
+ $XTRACE
+ OUT_KERNEL=${out%%,*}
+ OUT_RAMDISK=${out##*,}
+
+ # load them into glance
+ KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $image_name-kernel \
+ --public --disk-format=aki \
+ < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name $image_name-initrd \
+ --public --disk-format=ari \
+ < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
+}
+
+
+# Re-implementation of devstack's "upload_image" function
+#
+# Takes the same parameters, but has some peculiarities which made it
+# easier to create a separate method, rather than complicate the logic
+# of the existing function.
+function upload_baremetal_image() {
+ local image_url=$1
+ local token=$2
+
+ # Create a directory for the downloaded image tarballs.
+ mkdir -p $FILES/images
+
+ # Downloads the image (uec ami+aki style), then extracts it.
+ IMAGE_FNAME=`basename "$image_url"`
+ if [[ ! -f $FILES/$IMAGE_FNAME || \
+ "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
+ wget -c $image_url -O $FILES/$IMAGE_FNAME
+ if [[ $? -ne 0 ]]; then
+ echo "Not found: $image_url"
+ return
+ fi
+ fi
+
+ local KERNEL=""
+ local RAMDISK=""
+ local DISK_FORMAT=""
+ local CONTAINER_FORMAT=""
+ case "$IMAGE_FNAME" in
+ *.tar.gz|*.tgz)
+ # Extract ami and aki files
+ [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
+ IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
+ IMAGE_NAME="${IMAGE_FNAME%.tgz}"
+ xdir="$FILES/images/$IMAGE_NAME"
+ rm -Rf "$xdir";
+ mkdir "$xdir"
+ tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
+ KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ if [[ -z "$IMAGE_NAME" ]]; then
+ IMAGE_NAME=$(basename "$IMAGE" ".img")
+ fi
+ DISK_FORMAT=ami
+ CONTAINER_FORMAT=ami
+ ;;
+ *.qcow2)
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
+ DISK_FORMAT=qcow2
+ CONTAINER_FORMAT=bare
+ ;;
+ *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+ esac
+
+ if [ "$CONTAINER_FORMAT" = "bare" ]; then
+ extract_and_upload_k_and_r_from_image $token $IMAGE
+ elif [ "$CONTAINER_FORMAT" = "ami" ]; then
+ KERNEL_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "$IMAGE_NAME-kernel" --public \
+ --container-format aki \
+ --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "$IMAGE_NAME-ramdisk" --public \
+ --container-format ari \
+ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ else
+ # TODO(deva): add support for other image types
+ return
+ fi
+
+ glance \
+ --os-auth-token $token \
+ --os-image-url http://$GLANCE_HOSTPORT \
+ image-create \
+ --name "${IMAGE_NAME%.img}" --public \
+ --container-format $CONTAINER_FORMAT \
+ --disk-format $DISK_FORMAT \
+ ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
+ ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+
+ # override DEFAULT_IMAGE_NAME so that tempest can find the image
+ # that we just uploaded in glance
+ DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
+}
+
+function clear_baremetal_of_all_nodes() {
+ list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' )
+ for node in $list
+ do
+ nova-baremetal-manage node delete $node
+ done
+ list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' )
+ for iface in $list
+ do
+ nova-baremetal-manage interface delete $iface
+ done
+}
+
+# inform nova-baremetal about nodes, MACs, etc
+# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified
+#
+# Usage: add_baremetal_node <first_mac> <second_mac>
+function add_baremetal_node() {
+ mac_1=${1:-$BM_FIRST_MAC}
+ mac_2=${2:-$BM_SECOND_MAC}
+
+ id=$(nova-baremetal-manage node create \
+ --host=$BM_HOSTNAME --prov_mac=$mac_1 \
+ --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \
+ --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \
+ --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \
+ )
+ [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node"
+ id2=$(nova-baremetal-manage interface create \
+ --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \
+ )
+ [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id"
+}
+
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/ceilometer b/lib/ceilometer
index aa1b396..76ab254 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,9 +1,9 @@
# lib/ceilometer
-# Install and start Ceilometer service
+# Install and start **Ceilometer** service
+
# To enable, add the following to localrc
# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api
-
# Dependencies:
# - functions
# - OS_AUTH_URL for auth in api
@@ -12,12 +12,12 @@
# stack.sh
# ---------
-# install_XXX
-# configure_XXX
-# init_XXX
-# start_XXX
-# stop_XXX
-# cleanup_XXX
+# install_ceilometer
+# configure_ceilometer
+# init_ceilometer
+# start_ceilometer
+# stop_ceilometer
+# cleanup_ceilometer
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -27,17 +27,18 @@
# Defaults
# --------
-# set up default directories
+# Set up default directories
CEILOMETER_DIR=$DEST/ceilometer
+CEILOMETER_CONF_DIR=/etc/ceilometer
+CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
+CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
+
# Support potential entry-points console scripts
if [ -d $CEILOMETER_DIR/bin ] ; then
CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin
else
CEILOMETER_BIN_DIR=/usr/local/bin
fi
-CEILOMETER_CONF_DIR=/etc/ceilometer
-CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
-CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
diff --git a/lib/cinder b/lib/cinder
index a43f0a1..701effd 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -1,5 +1,5 @@
# lib/cinder
-# Install and start Cinder volume service
+# Install and start **Cinder** volume service
# Dependencies:
# - functions
@@ -31,9 +31,11 @@
CINDER_DIR=$DEST/cinder
CINDERCLIENT_DIR=$DEST/python-cinderclient
CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
+CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder}
+
CINDER_CONF_DIR=/etc/cinder
CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
-CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder}
+CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
# Support entry points installation of console scripts
if [[ -d $CINDER_DIR/bin ]]; then
@@ -49,8 +51,40 @@
# cleanup_cinder() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_cinder() {
- # This function intentionally left blank
- :
+ # ensure the volume group is cleared up because fails might
+ # leave dead volumes in the group
+ TARGETS=$(sudo tgtadm --op show --mode target)
+ if [ $? -ne 0 ]; then
+ # If tgt driver isn't running this won't work obviously
+ # So check the response and restart if need be
+ echo "tgtd seems to be in a bad state, restarting..."
+ if is_ubuntu; then
+ restart_service tgt
+ else
+ restart_service tgtd
+ fi
+ TARGETS=$(sudo tgtadm --op show --mode target)
+ fi
+
+ if [[ -n "$TARGETS" ]]; then
+ iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
+ for i in "${iqn_list[@]}"; do
+ echo removing iSCSI target: $i
+ sudo tgt-admin --delete $i
+ done
+ fi
+
+ if is_service_enabled cinder; then
+ sudo rm -rf $CINDER_STATE_PATH/volumes/*
+ fi
+
+ if is_ubuntu; then
+ stop_service tgt
+ else
+ stop_service tgtd
+ fi
+
+ sudo vgremove -f $VOLUME_GROUP
}
# configure_cinder() - Set config files, create data dirs, etc
@@ -97,7 +131,6 @@
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
- CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
@@ -105,10 +138,7 @@
iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder
iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
-
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR
- fi
+ iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR
cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
iniset $CINDER_CONF DEFAULT auth_strategy keystone
@@ -162,6 +192,46 @@
fi
}
+# create_cinder_accounts() - Set up common required cinder accounts
+
+# Tenant User Roles
+# ------------------------------------------------------------------
+# service cinder admin # if enabled
+
+# Migrated from keystone_data.sh
+create_cinder_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ # Cinder
+ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
+ CINDER_USER=$(keystone user-create \
+ --name=cinder \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=cinder@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant_id $SERVICE_TENANT \
+ --user_id $CINDER_USER \
+ --role_id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ CINDER_SERVICE=$(keystone service-create \
+ --name=cinder \
+ --type=volume \
+ --description="Cinder Volume Service" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $CINDER_SERVICE \
+ --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+ --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+ --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
+ fi
+ fi
+}
+
# init_cinder() - Initialize database and volume group
function init_cinder() {
# Force nova volumes off
@@ -212,11 +282,10 @@
fi
fi
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- # Create cache dir
- sudo mkdir -p $CINDER_AUTH_CACHE_DIR
- sudo chown `whoami` $CINDER_AUTH_CACHE_DIR
- fi
+ # Create cache dir
+ sudo mkdir -p $CINDER_AUTH_CACHE_DIR
+ sudo chown `whoami` $CINDER_AUTH_CACHE_DIR
+ rm -f $CINDER_AUTH_CACHE_DIR/*
}
# install_cinder() - Collect source and prepare
@@ -270,7 +339,11 @@
done
if is_service_enabled c-vol; then
- stop_service tgt
+ if is_ubuntu; then
+ stop_service tgt
+ else
+ stop_service tgtd
+ fi
fi
}
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 68e9adc..1c0f5eb 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -1,5 +1,5 @@
-# lib/mysql
-# Functions to control the configuration and operation of the MySQL database backend
+# lib/databases/mysql
+# Functions to control the configuration and operation of the **MySQL** database backend
# Dependencies:
# DATABASE_{HOST,USER,PASSWORD} must be defined
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 20ade85..04db714 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -1,5 +1,5 @@
-# lib/postgresql
-# Functions to control the configuration and operation of the PostgreSQL database backend
+# lib/databases/postgresql
+# Functions to control the configuration and operation of the **PostgreSQL** database backend
# Dependencies:
# DATABASE_{HOST,USER,PASSWORD} must be defined
@@ -20,14 +20,21 @@
function configure_database_postgresql {
echo_summary "Configuring and starting PostgreSQL"
- if is_fedora || is_suse; then
+ if is_fedora; then
PG_HBA=/var/lib/pgsql/data/pg_hba.conf
PG_CONF=/var/lib/pgsql/data/postgresql.conf
sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb
- else
+ elif is_ubuntu; then
PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname`
PG_HBA=$PG_DIR/pg_hba.conf
PG_CONF=$PG_DIR/postgresql.conf
+ elif is_suse; then
+ PG_HBA=/var/lib/pgsql/data/pg_hba.conf
+ PG_CONF=/var/lib/pgsql/data/postgresql.conf
+ # initdb is called when postgresql is first started
+ sudo [ -e $PG_HBA ] || start_service postgresql
+ else
+ exit_distro_not_supported "postgresql configuration"
fi
# Listen on all addresses
sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF
@@ -35,7 +42,7 @@
sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA
# Do password auth for all IPv6 clients
sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA
- start_service postgresql
+ restart_service postgresql
# If creating the role fails, chances are it already existed. Try to alter it.
sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \
diff --git a/lib/glance b/lib/glance
index b02a4b6..dff247a 100644
--- a/lib/glance
+++ b/lib/glance
@@ -1,5 +1,5 @@
# lib/glance
-# Functions to control the configuration and operation of the Glance service
+# Functions to control the configuration and operation of the **Glance** service
# Dependencies:
# ``functions`` file
@@ -25,8 +25,6 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
GLANCE_DIR=$DEST/glance
GLANCECLIENT_DIR=$DEST/python-glanceclient
@@ -95,9 +93,7 @@
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry
- fi
+ iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry
cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
iniset $GLANCE_API_CONF DEFAULT debug True
@@ -121,9 +117,7 @@
iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
fi
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
- fi
+ iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -145,7 +139,6 @@
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
-
}
# init_glance() - Initialize databases, etc.
@@ -163,13 +156,13 @@
$GLANCE_BIN_DIR/glance-manage db_sync
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- # Create cache dir
- sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api
- sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api
- sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry
- sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry
- fi
+ # Create cache dir
+ sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api
+ sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api
+ rm -f $GLANCE_AUTH_CACHE_DIR/api/*
+ sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry
+ sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry
+ rm -f $GLANCE_AUTH_CACHE_DIR/registry/*
}
# install_glanceclient() - Collect source and prepare
diff --git a/lib/heat b/lib/heat
index feaadec..a6f7286 100644
--- a/lib/heat
+++ b/lib/heat
@@ -1,5 +1,6 @@
# lib/heat
-# Install and start Heat service
+# Install and start **Heat** service
+
# To enable, add the following to localrc
# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng
@@ -8,12 +9,14 @@
# stack.sh
# ---------
-# install_XXX
-# configure_XXX
-# init_XXX
-# start_XXX
-# stop_XXX
-# cleanup_XXX
+# install_heatclient
+# install_heat
+# configure_heatclient
+# configure_heat
+# init_heat
+# start_heat
+# stop_heat
+# cleanup_heat
# Save trace setting
XTRACE=$(set +o | grep xtrace)
@@ -57,7 +60,7 @@
HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
- # cloudformation api
+ # Cloudformation API
HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf
cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF
iniset $HEAT_API_CFN_CONF DEFAULT debug True
@@ -86,7 +89,7 @@
iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
- # openstack api
+ # OpenStack API
HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf
cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF
iniset $HEAT_API_CONF DEFAULT debug True
@@ -139,7 +142,7 @@
iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
fi
- # cloudwatch api
+ # Cloudwatch API
HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf
cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF
iniset $HEAT_API_CW_CONF DEFAULT debug True
@@ -175,7 +178,7 @@
# (re)create heat database
recreate_database heat utf8
- $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD
+ $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD
$HEAT_DIR/tools/nova_create_flavors.sh
}
diff --git a/lib/horizon b/lib/horizon
index 68337ab..5d479d5 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -79,7 +79,7 @@
# Be a good citizen and use the distro tools here
sudo touch /etc/$APACHE_NAME/$APACHE_CONF
sudo a2ensite horizon
- # WSGI doesn't enable by default, enable it
+ # WSGI isn't enabled by default, enable it
sudo a2enmod wsgi
elif is_fedora; then
APACHE_NAME=httpd
@@ -88,9 +88,8 @@
elif is_suse; then
APACHE_NAME=apache2
APACHE_CONF=vhosts.d/horizon.conf
- # Append wsgi to the list of modules to load
- grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 ||
- sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2
+ # WSGI isn't enabled by default, enable it
+ sudo a2enmod wsgi
else
exit_distro_not_supported "apache configuration"
fi
diff --git a/lib/keystone b/lib/keystone
index f6a6d66..34f3372 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -4,11 +4,10 @@
# Dependencies:
# ``functions`` file
# ``BASE_SQL_CONN``
-# ``SERVICE_HOST``
+# ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
# ``SERVICE_TOKEN``
# ``S3_SERVICE_PORT`` (template backend only)
-
# ``stack.sh`` calls the entry points in this order:
#
# install_keystone
@@ -27,8 +26,6 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
KEYSTONE_DIR=$DEST/keystone
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
@@ -48,10 +45,14 @@
# Set Keystone interface configuration
KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
+KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
+
+# Public facing bits
KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
-KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
+KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
+KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
# Entry Points
@@ -77,8 +78,8 @@
if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
sudo mkdir -p $KEYSTONE_CONF_DIR
- sudo chown `whoami` $KEYSTONE_CONF_DIR
fi
+ sudo chown `whoami` $KEYSTONE_CONF_DIR
if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
@@ -88,6 +89,13 @@
# Rewrite stock ``keystone.conf``
local dburl
database_connection_url dburl keystone
+
+ if is_service_enabled tls-proxy; then
+ # Set the service ports for a proxy to take the originals
+ iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT
+ iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT
+ fi
+
iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT"
iniset $KEYSTONE_CONF sql connection $dburl
@@ -122,7 +130,7 @@
echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
fi
- sudo sed -e "
+ sed -e "
s,%SERVICE_HOST%,$SERVICE_HOST,g;
s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
" -i $KEYSTONE_CATALOG
@@ -213,9 +221,9 @@
keystone endpoint-create \
--region RegionOne \
--service_id $KEYSTONE_SERVICE \
- --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \
- --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \
- --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0"
+ --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \
+ --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \
+ --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0"
fi
# TODO(dtroyer): This is part of a series of changes...remove these when
@@ -248,11 +256,13 @@
if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
# Set up certificates
+ rm -rf $KEYSTONE_CONF_DIR/ssl
$KEYSTONE_DIR/bin/keystone-manage pki_setup
# Create cache dir
sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR
sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR
+ rm -f $KEYSTONE_AUTH_CACHE_DIR/*
fi
}
@@ -268,13 +278,25 @@
# start_keystone() - Start running processes, including screen
function start_keystone() {
+ # Get right service port for testing
+ local service_port=$KEYSTONE_SERVICE_PORT
+ if is_service_enabled tls-proxy; then
+ service_port=$KEYSTONE_SERVICE_PORT_INT
+ fi
+
# Start Keystone in a screen window
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then
echo "keystone did not start"
exit 1
fi
+
+ # Start proxies if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT &
+ start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT &
+ fi
}
# stop_keystone() - Stop running processes
diff --git a/lib/nova b/lib/nova
index 86db561..594195e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -1,5 +1,5 @@
# lib/nova
-# Functions to control the configuration and operation of the XXXX service
+# Functions to control the configuration and operation of the **Nova** service
# Dependencies:
# ``functions`` file
@@ -39,6 +39,12 @@
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
+# Public facing bits
+NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
+NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
+NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
+NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
# Support entry points installation of console scripts
if [[ -d $NOVA_DIR/bin ]]; then
NOVA_BIN_DIR=$NOVA_DIR/bin
@@ -170,11 +176,13 @@
s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
" -i $NOVA_API_PASTE_INI
+ iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST
+ if is_service_enabled tls-proxy; then
+ iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL
+ fi
fi
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR
- fi
+ iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR
if is_service_enabled n-cpu; then
# Force IP forwarding on, just on case
@@ -216,6 +224,11 @@
fi
fi
+ # Prepare directories and packages for baremetal driver
+ if is_baremetal; then
+ configure_baremetal_nova_dirs
+ fi
+
if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
@@ -231,10 +244,13 @@
if is_ubuntu; then
LIBVIRT_DAEMON=libvirt-bin
else
- # http://wiki.libvirt.org/page/SSHPolicyKitSetup
- if ! getent group libvirtd >/dev/null; then
- sudo groupadd libvirtd
- fi
+ LIBVIRT_DAEMON=libvirtd
+ fi
+
+ # For distributions using polkit to authorize access to libvirt,
+ # configure polkit accordingly.
+ # Based on http://wiki.libvirt.org/page/SSHPolicyKitSetup
+ if is_fedora; then
sudo bash -c 'cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-group:libvirtd
@@ -243,11 +259,24 @@
ResultInactive=yes
ResultActive=yes
EOF'
- LIBVIRT_DAEMON=libvirtd
+ elif is_suse; then
+ # Work around the fact that polkit-default-privs overrules pklas
+ # with 'unix-group:$group'.
+ sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+[libvirt Management Access]
+Identity=unix-user:$USER
+Action=org.libvirt.unix.manage
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
+EOF"
fi
# The user that nova runs as needs to be member of **libvirtd** group otherwise
# nova-compute will be unable to use libvirt.
+ if ! getent group libvirtd >/dev/null; then
+ sudo groupadd libvirtd
+ fi
add_user_to_group `whoami` libvirtd
# libvirt detects various settings on startup, as we potentially changed
@@ -310,9 +339,9 @@
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_SERVICE \
- --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
- --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
- --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s"
+ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
+ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
+ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
fi
fi
}
@@ -334,6 +363,7 @@
add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF"
add_nova_opt "force_dhcp_release=True"
add_nova_opt "fixed_range=$FIXED_RANGE"
+ add_nova_opt "default_floating_pool=$PUBLIC_NETWORK_NAME"
add_nova_opt "s3_host=$SERVICE_HOST"
add_nova_opt "s3_port=$S3_SERVICE_PORT"
add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions"
@@ -341,12 +371,20 @@
local dburl
database_connection_url dburl nova
add_nova_opt "sql_connection=$dburl"
+ if is_baremetal; then
+ database_connection_url dburl nova_bm
+ add_nova_opt "baremetal_sql_connection=$dburl"
+ fi
add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
add_nova_opt "libvirt_cpu_mode=none"
add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
if is_service_enabled n-api; then
add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
+ if is_service_enabled tls-proxy; then
+ # Set the service port for a proxy to take the original
+ add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT"
+ fi
fi
if is_service_enabled cinder; then
add_nova_opt "volume_api_class=nova.volume.cinder.API"
@@ -399,6 +437,16 @@
done
}
+function create_nova_conf_nova_network() {
+ add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
+ add_nova_opt "public_interface=$PUBLIC_INTERFACE"
+ add_nova_opt "vlan_interface=$VLAN_INTERFACE"
+ add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
+ if [ -n "$FLAT_INTERFACE" ]; then
+ add_nova_opt "flat_interface=$FLAT_INTERFACE"
+ fi
+}
+
# init_nova() - Initialize databases, etc.
function init_nova() {
# Nova Database
@@ -407,7 +455,7 @@
# All nova components talk to a central database. We will need to do this step
# only once for an entire cluster.
- if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then
+ if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
# (Re)create nova database
# Explicitly use latin1: to avoid lp#829209, nova expects the database to
# use latin1 by default, and then upgrades the database to utf8 (see the
@@ -416,13 +464,23 @@
# (Re)create nova database
$NOVA_BIN_DIR/nova-manage db sync
+
+ # (Re)create nova baremetal database
+ if is_baremetal; then
+ recreate_database nova_bm latin1
+ $NOVA_BIN_DIR/nova-baremetal-manage db sync
+ fi
fi
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- # Create cache dir
- sudo mkdir -p $NOVA_AUTH_CACHE_DIR
- sudo chown `whoami` $NOVA_AUTH_CACHE_DIR
- fi
+ # Create cache dir
+ sudo mkdir -p $NOVA_AUTH_CACHE_DIR
+ sudo chown `whoami` $NOVA_AUTH_CACHE_DIR
+ rm -f $NOVA_AUTH_CACHE_DIR/*
+
+ # Create the keys folder
+ sudo mkdir -p ${NOVA_STATE_PATH}/keys
+ # make sure we own NOVA_STATE_PATH and all subdirs
+ sudo chown -R `whoami` ${NOVA_STATE_PATH}
}
# install_novaclient() - Collect source and prepare
@@ -460,6 +518,27 @@
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
}
+# start_nova_api() - Start the API process ahead of other things
+function start_nova_api() {
+ # Get right service port for testing
+ local service_port=$NOVA_SERVICE_PORT
+ if is_service_enabled tls-proxy; then
+ service_port=$NOVA_SERVICE_PORT_INT
+ fi
+
+ screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
+ echo "Waiting for nova-api to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
+ echo "nova-api did not start"
+ exit 1
+ fi
+
+ # Start proxies if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+ fi
+}
+
# start_nova() - Start running processes, including screen
function start_nova() {
# The group **libvirtd** is added to the current user in this script.
diff --git a/lib/quantum b/lib/quantum
index 4e9f298..ea0e311 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -5,6 +5,36 @@
# ``functions`` file
# ``DEST`` must be defined
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_quantum
+# install_quantumclient
+# install_quantum_agent_packages
+# install_quantum_third_party
+# setup_quantum
+# setup_quantumclient
+# configure_quantum
+# init_quantum
+# configure_quantum_third_party
+# init_quantum_third_party
+# start_quantum_third_party
+# create_nova_conf_quantum
+# start_quantum_service_and_check
+# create_quantum_initial_network
+# setup_quantum_debug
+# start_quantum_agents
+#
+# ``unstack.sh`` calls the entry points in this order:
+#
+# stop_quantum
+
+# Functions in lib/quantum are classified into the following categories:
+#
+# - entry points (called from stack.sh or unstack.sh)
+# - internal functions
+# - quantum exercises
+# - 3rd party programs
+
# Quantum Networking
# ------------------
@@ -31,8 +61,8 @@
set +o xtrace
-# Defaults
-# --------
+# Quantum Network Configuration
+# -----------------------------
# Set up default directories
QUANTUM_DIR=$DEST/quantum
@@ -49,7 +79,6 @@
Q_PORT=${Q_PORT:-9696}
# Default Quantum Host
Q_HOST=${Q_HOST:-$HOST_IP}
-# Which Quantum API nova should use
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
# Default auth strategy
@@ -59,6 +88,8 @@
Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
# Meta data IP
Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
+# Allow Overlapping IP among subnets
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False}
# Use quantum-debug command
Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
@@ -70,14 +101,587 @@
QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum)
Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE"
fi
-fi
+ # Provider Network Configurations
+ # --------------------------------
+
+ # The following variables control the Quantum openvswitch and
+ # linuxbridge plugins' allocation of tenant networks and
+ # availability of provider networks. If these are not configured
+ # in localrc, tenant networks will be local to the host (with no
+ # remote connectivity), and no physical resources will be
+ # available for the allocation of provider networks.
+
+ # To use GRE tunnels for tenant networks, set to True in
+ # localrc. GRE tunnels are only supported by the openvswitch
+ # plugin, and currently only on Ubuntu.
+ ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
+
+ # If using GRE tunnels for tenant networks, specify the range of
+ # tunnel IDs from which tenant networks are allocated. Can be
+ # overriden in localrc in necesssary.
+ TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
+
+ # To use VLANs for tenant networks, set to True in localrc. VLANs
+ # are supported by the openvswitch and linuxbridge plugins, each
+ # requiring additional configuration described below.
+ ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+
+ # If using VLANs for tenant networks, set in localrc to specify
+ # the range of VLAN VIDs from which tenant networks are
+ # allocated. An external network switch must be configured to
+ # trunk these VLANs between hosts for multi-host connectivity.
+ #
+ # Example: ``TENANT_VLAN_RANGE=1000:1999``
+ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
+
+ # If using VLANs for tenant networks, or if using flat or VLAN
+ # provider networks, set in localrc to the name of the physical
+ # network, and also configure OVS_PHYSICAL_BRIDGE for the
+ # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
+ # agent, as described below.
+ #
+ # Example: ``PHYSICAL_NETWORK=default``
+ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+
+ # With the openvswitch plugin, if using VLANs for tenant networks,
+ # or if using flat or VLAN provider networks, set in localrc to
+ # the name of the OVS bridge to use for the physical network. The
+ # bridge will be created if it does not already exist, but a
+ # physical interface must be manually added to the bridge as a
+ # port for external connectivity.
+ #
+ # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+ OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+
+ # With the linuxbridge plugin, if using VLANs for tenant networks,
+ # or if using flat or VLAN provider networks, set in localrc to
+ # the name of the network interface to use for the physical
+ # network.
+ #
+ # Example: ``LB_PHYSICAL_INTERFACE=eth1``
+ LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+
+ # With the openvswitch plugin, set to True in localrc to enable
+ # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+ #
+ # Example: ``OVS_ENABLE_TUNNELING=True``
+ OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+fi
# Entry Points
# ------------
-# configure_quantum_rootwrap() - configure Quantum's rootwrap
-function configure_quantum_rootwrap() {
+# configure_quantum()
+# Set common config for all quantum server and agents.
+function configure_quantum() {
+ _configure_quantum_common
+ _configure_quantum_rpc
+
+ if is_service_enabled q-svc; then
+ _configure_quantum_service
+ fi
+ if is_service_enabled q-agt; then
+ _configure_quantum_plugin_agent
+ fi
+ if is_service_enabled q-dhcp; then
+ _configure_quantum_dhcp_agent
+ fi
+ if is_service_enabled q-l3; then
+ _configure_quantum_l3_agent
+ fi
+ if is_service_enabled q-meta; then
+ _configure_quantum_metadata_agent
+ fi
+
+ _configure_quantum_debug_command
+
+ _cleanup_quantum
+}
+
+function create_nova_conf_quantum() {
+ add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
+ add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
+ add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
+ add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+ add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
+ add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
+ add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
+
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"}
+ add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
+ add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ fi
+ add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
+ add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
+ if is_service_enabled q-meta; then
+ add_nova_opt "service_quantum_metadata_proxy=True"
+ fi
+}
+
+# create_quantum_accounts() - Set up common required quantum accounts
+
+# Tenant User Roles
+# ------------------------------------------------------------------
+# service quantum admin # if enabled
+
+# Migrated from keystone_data.sh
+function create_quantum_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+ QUANTUM_USER=$(keystone user-create \
+ --name=quantum \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=quantum@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant_id $SERVICE_TENANT \
+ --user_id $QUANTUM_USER \
+ --role_id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ QUANTUM_SERVICE=$(keystone service-create \
+ --name=quantum \
+ --type=network \
+ --description="Quantum Service" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $QUANTUM_SERVICE \
+ --publicurl "http://$SERVICE_HOST:9696/" \
+ --adminurl "http://$SERVICE_HOST:9696/" \
+ --internalurl "http://$SERVICE_HOST:9696/"
+ fi
+ fi
+}
+
+function create_quantum_initial_network() {
+ TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+
+ # Create a small network
+ # Since quantum command is executed in admin context at this point,
+ # ``--tenant_id`` needs to be specified.
+ NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+
+ if is_service_enabled q-l3; then
+ # Create a router, and add the private subnet as one of its interfaces
+ ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
+ quantum router-interface-add $ROUTER_ID $SUBNET_ID
+ # Create an external network, and a subnet. Configure the external network as router gw
+ EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+ quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+ CIDR_LEN=${FLOATING_RANGE#*/}
+ sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
+ sudo ip link set $PUBLIC_BRIDGE up
+ ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
+ sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
+ fi
+ if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
+ # Explicitly set router id in l3 agent configuration
+ iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
+ fi
+ fi
+}
+
+# init_quantum() - Initialize databases, etc.
+function init_quantum() {
+ :
+}
+
+# install_quantum() - Collect source and prepare
+function install_quantum() {
+ git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
+}
+
+# install_quantumclient() - Collect source and prepare
+function install_quantumclient() {
+ git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH
+}
+
+# install_quantum_agent_packages() - Collect source and prepare
+function install_quantum_agent_packages() {
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ # Install deps
+ # FIXME add to ``files/apts/quantum``, but don't install if not needed!
+ if is_ubuntu; then
+ kernel_version=`cat /proc/version | cut -d " " -f3`
+ install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+ else
+ ### FIXME(dtroyer): Find RPMs for OpenVSwitch
+ echo "OpenVSwitch packages need to be located"
+ # Fedora does not started OVS by default
+ restart_service openvswitch
+ fi
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ install_package bridge-utils
+ fi
+}
+
+function is_quantum_ovs_base_plugin() {
+ local plugin=$1
+ if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then
+ return 0
+ fi
+ return 1
+}
+
+function setup_quantum() {
+ setup_develop $QUANTUM_DIR
+}
+
+function setup_quantumclient() {
+ setup_develop $QUANTUMCLIENT_DIR
+}
+
+# Start running processes, including screen
+function start_quantum_service_and_check() {
+ # Start the Quantum service
+ screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ echo "Waiting for Quantum to start..."
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then
+ echo "Quantum did not start"
+ exit 1
+ fi
+}
+
+# Start running processes, including screen
+function start_quantum_agents() {
+ # Start up the quantum agents if enabled
+ screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+ screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
+ screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+}
+
+# stop_quantum() - Stop running processes (non-screen)
+function stop_quantum() {
+ if is_service_enabled q-dhcp; then
+ pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
+ [ ! -z "$pid" ] && sudo kill -9 $pid
+ fi
+}
+
+# _cleanup_quantum() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function _cleanup_quantum() {
+ :
+}
+
+# _configure_quantum_common()
+# Set common config for all quantum server and agents.
+# This MUST be called before other _configure_quantum_* functions.
+function _configure_quantum_common() {
+ # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
+ if [[ ! -d $QUANTUM_CONF_DIR ]]; then
+ sudo mkdir -p $QUANTUM_CONF_DIR
+ fi
+ sudo chown `whoami` $QUANTUM_CONF_DIR
+
+ cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
+
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
+ Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
+ Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
+ Q_DB_NAME="quantum_linux_bridge"
+ Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu
+ Q_PLUGIN_CONF_FILENAME=ryu.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2"
+ fi
+
+ if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
+ echo "Quantum plugin not set.. exiting"
+ exit 1
+ fi
+
+ # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
+ mkdir -p /$Q_PLUGIN_CONF_PATH
+ Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+ cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+
+ database_connection_url dburl $Q_DB_NAME
+ iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
+ unset dburl
+
+ _quantum_setup_rootwrap
+}
+
+function _configure_quantum_debug_command() {
+ if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then
+ return
+ fi
+
+ cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE
+
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge ''
+ fi
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_dhcp_agent() {
+ AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent"
+ Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini
+
+ cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
+
+ iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
+ iniset $Q_DHCP_CONF_FILE DEFAULT debug True
+ iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $Q_DHCP_CONF_FILE
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_l3_agent() {
+ AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
+ PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+ Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
+
+ cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
+
+ iniset $Q_L3_CONF_FILE DEFAULT verbose True
+ iniset $Q_L3_CONF_FILE DEFAULT debug True
+ iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
+ _quantum_setup_interface_driver $Q_L3_CONF_FILE
+
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ _quantum_setup_external_bridge $PUBLIC_BRIDGE
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ''
+ fi
+
+ if [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+function _configure_quantum_metadata_agent() {
+ AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent"
+ Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini
+
+ cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
+
+ iniset $Q_META_CONF_FILE DEFAULT verbose True
+ iniset $Q_META_CONF_FILE DEFAULT debug True
+ iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+ iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
+ iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+ _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
+}
+
+# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent
+# It is called when q-agt is enabled.
+function _configure_quantum_plugin_agent() {
+ # Configure agent for plugin
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ _configure_quantum_plugin_agent_openvswitch
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ _configure_quantum_plugin_agent_linuxbridge
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ _configure_quantum_plugin_agent_ryu
+ fi
+
+ iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
+}
+
+function _configure_quantum_plugin_agent_linuxbridge() {
+ # Setup physical network interface mappings. Override
+ # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
+ # complex physical network configurations.
+ if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
+ LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
+ fi
+ if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
+}
+
+function _configure_quantum_plugin_agent_openvswitch() {
+ # Setup integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ _quantum_setup_ovs_bridge $OVS_BRIDGE
+
+ # Setup agent for tunneling
+ if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+ # Verify tunnels are supported
+ # REVISIT - also check kernel module support for GRE and patch ports
+ OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
+ if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
+ echo "You are running OVS version $OVS_VERSION."
+ echo "OVS 1.4+ is required for tunneling between multiple hosts."
+ exit 1
+ fi
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+ iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
+ fi
+
+ # Setup physical network bridge mappings. Override
+ # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+ # complex physical network configurations.
+ if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+ OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+ # Configure bridge manually with physical interface as port for multi-node
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+ fi
+ if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
+}
+
+function _configure_quantum_plugin_agent_ryu() {
+ # Set up integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ _quantum_setup_ovs_bridge $OVS_BRIDGE
+ if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
+ sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
+}
+
+# Quantum RPC support - must be updated prior to starting any of the services
+function _configure_quantum_rpc() {
+ iniset $QUANTUM_CONF DEFAULT control_exchange quantum
+ if is_service_enabled qpid ; then
+ iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+ elif is_service_enabled zeromq; then
+ iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST
+ iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ fi
+}
+
+# _configure_quantum_service() - Set config files for quantum service
+# It is called when q-svc is enabled.
+function _configure_quantum_service() {
+ Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini
+ Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json
+
+ cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+ cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
+
+ if is_service_enabled $DATABASE_BACKENDS; then
+ recreate_database $Q_DB_NAME utf8
+ else
+ echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+ exit 1
+ fi
+
+ # Update either configuration file with plugin
+ iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
+
+ iniset $QUANTUM_CONF DEFAULT verbose True
+ iniset $QUANTUM_CONF DEFAULT debug True
+ iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
+
+ iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
+ _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken
+
+ # Configure plugin
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
+ iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
+ elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
+ else
+ echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
+ fi
+
+ # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc``
+ # for more complex physical network configurations.
+ if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+ OVS_VLAN_RANGES=$PHYSICAL_NETWORK
+ if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+ OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE
+ fi
+ fi
+ if [[ "$OVS_VLAN_RANGES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
+ fi
+
+ # Enable tunnel networks if selected
+ if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+ fi
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
+ else
+ echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
+ fi
+
+ # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc``
+ # for more complex physical network configurations.
+ if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+ LB_VLAN_RANGES=$PHYSICAL_NETWORK
+ if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+ LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE
+ fi
+ fi
+ if [[ "$LB_VLAN_RANGES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
+ fi
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+ fi
+}
+
+# Utility Functions
+#------------------
+
+# _quantum_setup_rootwrap() - configure Quantum's rootwrap
+function _quantum_setup_rootwrap() {
if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
return
fi
@@ -109,7 +713,7 @@
}
# Configures keystone integration for quantum service and agents
-function quantum_setup_keystone() {
+function _quantum_setup_keystone() {
local conf_file=$1
local section=$2
local use_auth_url=$3
@@ -123,47 +727,61 @@
iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
iniset $conf_file $section admin_user $Q_ADMIN_USERNAME
iniset $conf_file $section admin_password $SERVICE_PASSWORD
- if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
- iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR
- # Create cache dir
- sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR
- sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR
- fi
+ iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR
+ # Create cache dir
+ sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR
+ sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR
+ rm -f $QUANTUM_AUTH_CACHE_DIR/*
}
-function quantum_setup_ovs_bridge() {
+function _quantum_setup_ovs_bridge() {
local bridge=$1
- for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
- if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi
- sudo ovs-vsctl --no-wait del-port $bridge $PORT
- done
- sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge
- sudo ovs-vsctl --no-wait add-br $bridge
+ quantum-ovs-cleanup --ovs_integration_bridge $bridge
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
-function quantum_setup_external_bridge() {
+function _quantum_setup_interface_driver() {
+ local conf_file=$1
+ if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
+ fi
+}
+
+function _quantum_setup_external_bridge() {
local bridge=$1
- # Create it if it does not exist
+ quantum-ovs-cleanup --external_network_bridge $bridge
sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
- # remove internal ports
- for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
- TYPE=$(sudo ovs-vsctl get interface $PORT type)
- if [[ "$TYPE" == "internal" ]]; then
- echo `sudo ip link delete $PORT` > /dev/null
- sudo ovs-vsctl --no-wait del-port $bridge $PORT
- fi
- done
# ensure no IP is configured on the public bridge
sudo ip addr flush dev $bridge
}
-function is_quantum_ovs_base_plugin() {
- local plugin=$1
- if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then
- return 0
+# Functions for Quantum Exercises
+#--------------------------------
+
+function delete_probe() {
+ local from_net="$1"
+ net_id=`_get_net_id $from_net`
+ probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
+ quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
+}
+
+function setup_quantum_debug() {
+ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
+ public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
+ private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
fi
- return 1
+}
+
+function teardown_quantum_debug() {
+ delete_probe $PUBLIC_NETWORK_NAME
+ delete_probe $PRIVATE_NETWORK_NAME
}
function _get_net_id() {
@@ -177,13 +795,6 @@
echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
}
-function delete_probe() {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
- quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
function _ping_check_quantum() {
local from_net=$1
local ip=$2
@@ -221,17 +832,59 @@
fi
}
-function setup_quantum() {
- public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
- private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
+# Quantum 3rd party programs
+#---------------------------
+# A comma-separated list of 3rd party programs
+QUANTUM_THIRD_PARTIES="ryu"
+for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ source lib/$third_party
+done
+
+# configure_quantum_third_party() - Set config files, create data dirs, etc
+function configure_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ configure_${third_party}
+ fi
+ done
}
-function teardown_quantum() {
- delete_probe $PUBLIC_NETWORK_NAME
- delete_probe $PRIVATE_NETWORK_NAME
+# init_quantum_third_party() - Initialize databases, etc.
+function init_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ init_${third_party}
+ fi
+ done
}
+# install_quantum_third_party() - Collect source and prepare
+function install_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ install_${third_party}
+ fi
+ done
+}
+
+# start_quantum_third_party() - Start running processes, including screen
+function start_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ start_${third_party}
+ fi
+ done
+}
+
+# stop_quantum_third_party - Stop running processes (non-screen)
+function stop_quantum_third_party() {
+ for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do
+ if is_service_enabled $third_party; then
+ stop_${third_party}
+ fi
+ done
+}
+
+
# Restore xtrace
$XTRACE
diff --git a/lib/ryu b/lib/ryu
new file mode 100644
index 0000000..ac3462b
--- /dev/null
+++ b/lib/ryu
@@ -0,0 +1,63 @@
+# Ryu OpenFlow Controller
+# -----------------------
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+RYU_DIR=$DEST/ryu
+# Ryu API Host
+RYU_API_HOST=${RYU_API_HOST:-127.0.0.1}
+# Ryu API Port
+RYU_API_PORT=${RYU_API_PORT:-8080}
+# Ryu OFP Host
+RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1}
+# Ryu OFP Port
+RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
+# Ryu Applications
+RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
+
+function configure_ryu() {
+ setup_develop $RYU_DIR
+}
+
+function init_ryu() {
+ RYU_CONF_DIR=/etc/ryu
+ if [[ ! -d $RYU_CONF_DIR ]]; then
+ sudo mkdir -p $RYU_CONF_DIR
+ fi
+ sudo chown `whoami` $RYU_CONF_DIR
+ RYU_CONF=$RYU_CONF_DIR/ryu.conf
+ sudo rm -rf $RYU_CONF
+
+ cat <<EOF > $RYU_CONF
+--app_lists=$RYU_APPS
+--wsapi_host=$RYU_API_HOST
+--wsapi_port=$RYU_API_PORT
+--ofp_listen_host=$RYU_OFP_HOST
+--ofp_tcp_listen_port=$RYU_OFP_PORT
+EOF
+}
+
+function install_ryu() {
+ git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+}
+
+function is_ryu_required() {
+ if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
+ return 0
+ fi
+ return 1
+}
+
+function start_ryu() {
+ screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+}
+
+function stop_ryu() {
+ :
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/swift b/lib/swift
index 140e5e9..8934264 100644
--- a/lib/swift
+++ b/lib/swift
@@ -1,5 +1,5 @@
# lib/swift
-# Functions to control the configuration and operation of the swift service
+# Functions to control the configuration and operation of the **Swift** service
# Dependencies:
# ``functions`` file
@@ -23,12 +23,10 @@
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
-
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
+SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
# TODO: add logging to different location.
@@ -70,6 +68,7 @@
CONTAINER_PORT_BASE=6011
ACCOUNT_PORT_BASE=6012
+
# Entry Points
# ------------
@@ -212,6 +211,7 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
@@ -291,7 +291,6 @@
sudo chown -R $USER:adm ${swift_log_dir}
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
tee /etc/rsyslog.d/10-swift.conf
-
}
# configure_swiftclient() - Set config files, create data dirs, etc
@@ -325,6 +324,10 @@
swift-ring-builder account.builder rebalance
} && popd >/dev/null
+ # Create cache dir
+ sudo mkdir -p $SWIFT_AUTH_CACHE_DIR
+ sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR
+ rm -f $SWIFT_AUTH_CACHE_DIR/*
}
function install_swift() {
diff --git a/lib/tempest b/lib/tempest
index 7fa15df..190d77f 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -1,4 +1,5 @@
# lib/tempest
+# Install and configure Tempest
# Dependencies:
# ``functions`` file
@@ -23,33 +24,29 @@
#
# install_tempest
# configure_tempest
-# init_tempest
-## start_tempest
-## stop_tempest
-## cleanup_tempest
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+
# Defaults
# --------
-# <define global variables here that belong to this project>
-
# Set up default directories
-NOVA_SOURCE_DIR=$DEST/nova
TEMPEST_DIR=$DEST/tempest
TEMPEST_CONF_DIR=$TEMPEST_DIR/etc
TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf
+NOVA_SOURCE_DIR=$DEST/nova
+
BUILD_INTERVAL=3
BUILD_TIMEOUT=400
+
# Entry Points
# ------------
-
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest() {
local image_lines
@@ -63,8 +60,10 @@
local flavors
local flavors_ref
local flavor_lines
+ local public_network_id
+ local tenant_networks_reachable
- #TODO(afazekas):
+ # TODO(afazekas):
# sudo python setup.py deploy
# This function exits on an error so that errors don't compound and you see
@@ -72,7 +71,7 @@
errexit=$(set +o | grep errexit)
set -o errexit
- #Save IFS
+ # Save IFS
ifs=$IFS
# Glance should already contain images to be used in tempest
@@ -153,6 +152,17 @@
flavor_ref_alt=${flavors[1]}
fi
+ if [ "$Q_USE_NAMESPACE" != "False" ]; then
+ tenant_networks_reachable=false
+ else
+ tenant_networks_reachable=true
+ fi
+
+ if is_service_enabled q-l3; then
+ public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \
+ awk '{print $2}')
+ fi
+
# Timeouts
iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT
iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT
@@ -177,7 +187,7 @@
#Skip until #1074039 is fixed
iniset $TEMPEST_CONF compute run_ssh False
iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME}
- iniset $TEMPEST_CONF compute network_for_ssh private
+ iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME
iniset $TEMPEST_CONF compute ip_version_for_ssh 4
iniset $TEMPEST_CONF compute ssh_timeout 4
iniset $TEMPEST_CONF compute image_ref $image_uuid
@@ -186,7 +196,7 @@
iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt
iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR
iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False}
- iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+ iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
# Inherited behavior, might be wrong
iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR
# TODO(jaypipes): Create the key file here... right now, no whitebox
@@ -205,8 +215,14 @@
# compute admin
iniset $TEMPEST_CONF "compute-admin" password "$password"
+ # network admin
+ iniset $TEMPEST_CONF "network-admin" password "$password"
+
# network
iniset $TEMPEST_CONF network api_version 2.0
+ iniset $TEMPEST_CONF network password "$password"
+ iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable"
+ iniset $TEMPEST_CONF network public_network_id "$public_network_id"
#boto
iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
@@ -221,7 +237,6 @@
$errexit
}
-
# install_tempest() - Collect source and prepare
function install_tempest() {
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
diff --git a/lib/tls b/lib/tls
new file mode 100644
index 0000000..1e2a899
--- /dev/null
+++ b/lib/tls
@@ -0,0 +1,314 @@
+# lib/tls
+# Functions to control the configuration and operation of the TLS proxy service
+
+# Dependencies:
+# !! source _before_ any services that use ``SERVICE_HOST``
+# ``functions`` file
+# ``DEST``, ``DATA_DIR`` must be defined
+# ``HOST_IP``, ``SERVICE_HOST``
+# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+
+# Entry points:
+# configure_CA
+# init_CA
+
+# configure_proxy
+# start_tls_proxy
+
+# make_root_ca
+# make_int_ca
+# new_cert $INT_CA_DIR int-server "abc"
+# start_tls_proxy HOST_IP 5000 localhost 5000
+
+
+if is_service_enabled tls-proxy; then
+ # TODO(dtroyer): revisit this below after the search for HOST_IP has been done
+ TLS_IP=${TLS_IP:-$SERVICE_IP}
+
+ # Set the default ``SERVICE_PROTOCOL`` for TLS
+ SERVICE_PROTOCOL=https
+fi
+
+# Make up a hostname for cert purposes
+# will be added to /etc/hosts?
+DEVSTACK_HOSTNAME=secure.devstack.org
+DEVSTACK_CERT_NAME=devstack-cert
+DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem
+
+# CA configuration
+ROOT_CA_DIR=${ROOT_CA_DIR:-$DATA_DIR/CA/root-ca}
+INT_CA_DIR=${INT_CA_DIR:-$DATA_DIR/CA/int-ca}
+
+ORG_NAME="OpenStack"
+ORG_UNIT_NAME="DevStack"
+
+# Stud configuration
+STUD_PROTO="--tls"
+STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH'
+
+
+# CA Functions
+# ============
+
+# There may be more than one, get specific
+OPENSSL=${OPENSSL:-/usr/bin/openssl}
+
+# Do primary CA configuration
+function configure_CA() {
+ # build common config file
+
+ # Verify ``TLS_IP`` is good
+ if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then
+ # auto-discover has changed the IP
+ TLS_IP=$HOST_IP
+ fi
+}
+
+# Creates a new CA directory structure
+# create_CA_base ca-dir
+function create_CA_base() {
+ local ca_dir=$1
+
+ if [[ -d $ca_dir ]]; then
+ # Bail out it exists
+ return 0
+ fi
+
+ for i in certs crl newcerts private; do
+ mkdir -p $ca_dir/$i
+ done
+ chmod 710 $ca_dir/private
+ echo "01" >$ca_dir/serial
+ cp /dev/null $ca_dir/index.txt
+}
+
+
+# Create a new CA configuration file
+# create_CA_config ca-dir common-name
+function create_CA_config() {
+ local ca_dir=$1
+ local common_name=$2
+
+ echo "
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir = $ca_dir
+policy = policy_match
+database = \$dir/index.txt
+serial = \$dir/serial
+certs = \$dir/certs
+crl_dir = \$dir/crl
+new_certs_dir = \$dir/newcerts
+certificate = \$dir/cacert.pem
+private_key = \$dir/private/cacert.key
+RANDFILE = \$dir/private/.rand
+default_md = default
+
+[ req ]
+default_bits = 1024
+default_md = sha1
+
+prompt = no
+distinguished_name = ca_distinguished_name
+
+x509_extensions = ca_extensions
+
+[ ca_distinguished_name ]
+organizationName = $ORG_NAME
+organizationalUnitName = $ORG_UNIT_NAME Certificate Authority
+commonName = $common_name
+
+[ policy_match ]
+countryName = optional
+stateOrProvinceName = optional
+organizationName = match
+organizationalUnitName = optional
+commonName = supplied
+
+[ ca_extensions ]
+basicConstraints = critical,CA:true
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always, issuer
+keyUsage = cRLSign, keyCertSign
+
+" >$ca_dir/ca.conf
+}
+
+# Create a new signing configuration file
+# create_signing_config ca-dir
+function create_signing_config() {
+ local ca_dir=$1
+
+ echo "
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir = $ca_dir
+policy = policy_match
+database = \$dir/index.txt
+serial = \$dir/serial
+certs = \$dir/certs
+crl_dir = \$dir/crl
+new_certs_dir = \$dir/newcerts
+certificate = \$dir/cacert.pem
+private_key = \$dir/private/cacert.key
+RANDFILE = \$dir/private/.rand
+default_md = default
+
+[ req ]
+default_bits = 1024
+default_md = sha1
+
+prompt = no
+distinguished_name = req_distinguished_name
+
+x509_extensions = req_extensions
+
+[ req_distinguished_name ]
+organizationName = $ORG_NAME
+organizationalUnitName = $ORG_UNIT_NAME Server Farm
+
+[ policy_match ]
+countryName = optional
+stateOrProvinceName = optional
+organizationName = match
+organizationalUnitName = optional
+commonName = supplied
+
+[ req_extensions ]
+basicConstraints = CA:false
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always, issuer
+keyUsage = digitalSignature, keyEncipherment, keyAgreement
+extendedKeyUsage = serverAuth, clientAuth
+subjectAltName = \$ENV::SUBJECT_ALT_NAME
+
+" >$ca_dir/signing.conf
+}
+
+# Create root and intermediate CAs and an initial server cert
+# init_CA
+function init_CA {
+ # Ensure CAs are built
+ make_root_CA $ROOT_CA_DIR
+ make_int_CA $INT_CA_DIR $ROOT_CA_DIR
+
+ # Create the CA bundle
+ cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem
+
+ if [[ ! -r $DEVSTACK_CERT ]]; then
+ if [[ -n "$TLS_IP" ]]; then
+ # Lie to let incomplete match routines work
+ TLS_IP="DNS:$TLS_IP"
+ fi
+ make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
+
+ # Create a cert bundle
+ cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
+ fi
+}
+
+
+# make_cert creates and signs a new certificate with the given commonName and CA
+# make_cert ca-dir cert-name "common-name" ["alt-name" ...]
+function make_cert() {
+ local ca_dir=$1
+ local cert_name=$2
+ local common_name=$3
+ local alt_names=$4
+
+ # Generate a signing request
+ $OPENSSL req \
+ -sha1 \
+ -newkey rsa \
+ -nodes \
+ -keyout $ca_dir/private/$cert_name.key \
+ -out $ca_dir/$cert_name.csr \
+ -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}"
+
+ if [[ -z "$alt_names" ]]; then
+ alt_names="DNS:${common_name}"
+ else
+ alt_names="DNS:${common_name},${alt_names}"
+ fi
+
+ # Sign the request valid for 1 year
+ SUBJECT_ALT_NAME="$alt_names" \
+ $OPENSSL ca -config $ca_dir/signing.conf \
+ -extensions req_extensions \
+ -days 365 \
+ -notext \
+ -in $ca_dir/$cert_name.csr \
+ -out $ca_dir/$cert_name.crt \
+ -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \
+ -batch
+}
+
+
+# Make an intermediate CA to sign everything else
+# make_int_CA ca-dir signing-ca-dir
+function make_int_CA() {
+ local ca_dir=$1
+ local signing_ca_dir=$2
+
+ # Create the root CA
+ create_CA_base $ca_dir
+ create_CA_config $ca_dir 'Intermediate CA'
+ create_signing_config $ca_dir
+
+ # Create a signing certificate request
+ $OPENSSL req -config $ca_dir/ca.conf \
+ -sha1 \
+ -newkey rsa \
+ -nodes \
+ -keyout $ca_dir/private/cacert.key \
+ -out $ca_dir/cacert.csr \
+ -outform PEM
+
+ # Sign the intermediate request valid for 1 year
+ $OPENSSL ca -config $signing_ca_dir/ca.conf \
+ -extensions ca_extensions \
+ -days 365 \
+ -notext \
+ -in $ca_dir/cacert.csr \
+ -out $ca_dir/cacert.pem \
+ -batch
+}
+
+# Make a root CA to sign other CAs
+# make_root_CA ca-dir
+function make_root_CA() {
+ local ca_dir=$1
+
+ # Create the root CA
+ create_CA_base $ca_dir
+ create_CA_config $ca_dir 'Root CA'
+
+ # Create a self-signed certificate valid for 5 years
+ $OPENSSL req -config $ca_dir/ca.conf \
+ -x509 \
+ -nodes \
+ -newkey rsa \
+ -days 21360 \
+ -keyout $ca_dir/private/cacert.key \
+ -out $ca_dir/cacert.pem \
+ -outform PEM
+}
+
+
+# Proxy Functions
+# ===============
+
+# Starts the TLS proxy for the given IP/ports
+# start_tls_proxy front-host front-port back-host back-port
+function start_tls_proxy() {
+ local f_host=$1
+ local f_port=$2
+ local b_host=$3
+ local b_port=$4
+
+ stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
+}
diff --git a/openrc b/openrc
index 08ef98b..3ef44fd 100644
--- a/openrc
+++ b/openrc
@@ -26,6 +26,14 @@
# Load local configuration
source $RC_DIR/stackrc
+# Load the last env variables if available
+if [[ -r $TOP_DIR/.stackenv ]]; then
+ source $TOP_DIR/.stackenv
+fi
+
+# Get some necessary configuration
+source $RC_DIR/lib/tls
+
# The introduction of Keystone to the OpenStack ecosystem has standardized the
# term **tenant** as the entity that owns resources. In some places references
# still exist to the original Nova term **project** for this use. Also,
@@ -49,6 +57,7 @@
# which is convenient for some localrc configurations.
HOST_IP=${HOST_IP:-127.0.0.1}
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
# Some exercises call glance directly. On a single-node installation, Glance
# should be listening on HOST_IP. If its running elsewhere, it can be set here
@@ -61,7 +70,10 @@
#
# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We
# will use the 1.1 *compute api*
-export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0
+export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0
+
+# Set the pointer to our CA certificate chain. Harmless if TLS is not used.
+export OS_CACERT=$INT_CA_DIR/ca-chain.pem
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
diff --git a/stack.sh b/stack.sh
index c8b8db4..7306b58 100755
--- a/stack.sh
+++ b/stack.sh
@@ -90,6 +90,11 @@
# Sanity Check
# ============
+# Clean up last environment var cache
+if [[ -r $TOP_DIR/.stackenv ]]; then
+ rm $TOP_DIR/.stackenv
+fi
+
# Import database configuration
source $TOP_DIR/lib/database
@@ -105,7 +110,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
echo "If you wish to run this script anyway run with FORCE=yes"
@@ -288,6 +293,7 @@
# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
# Configure services to use syslog instead of writing to individual log files
SYSLOG=`trueorfalse False $SYSLOG`
@@ -305,6 +311,7 @@
# ==================
# Get project function libraries
+source $TOP_DIR/lib/tls
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/keystone
source $TOP_DIR/lib/glance
@@ -315,6 +322,7 @@
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/quantum
source $TOP_DIR/lib/tempest
+source $TOP_DIR/lib/baremetal
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
@@ -322,18 +330,6 @@
NOVNC_DIR=$DEST/noVNC
SWIFT3_DIR=$DEST/swift3
-RYU_DIR=$DEST/ryu
-# Ryu API Host
-RYU_API_HOST=${RYU_API_HOST:-127.0.0.1}
-# Ryu API Port
-RYU_API_PORT=${RYU_API_PORT:-8080}
-# Ryu OFP Host
-RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1}
-# Ryu OFP Port
-RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
-# Ryu Applications
-RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-
# Should cinder perform secure deletion of volumes?
# Defaults to true, can be set to False to avoid this bug when testing:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
@@ -398,6 +394,13 @@
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
GUEST_INTERFACE_DEFAULT=eth1
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ PUBLIC_INTERFACE_DEFAULT=eth0
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+ FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
+ NET_MAN=${NET_MAN:-FlatManager}
+ STUB_NETWORK=${STUB_NETWORK:-False}
else
PUBLIC_INTERFACE_DEFAULT=br100
FLAT_NETWORK_BRIDGE_DEFAULT=br100
@@ -409,6 +412,7 @@
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
@@ -535,9 +539,9 @@
# Set ``LOGFILE`` to turn on logging
# Append '.xxxxxxxx' to the given name to maintain history
# where 'xxxxxxxx' is a representation of the date the file was created
+TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then
LOGDAYS=${LOGDAYS:-7}
- TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
fi
@@ -678,21 +682,7 @@
fi
if is_service_enabled q-agt; then
- if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
- # Install deps
- # FIXME add to ``files/apts/quantum``, but don't install if not needed!
- if is_ubuntu; then
- kernel_version=`cat /proc/version | cut -d " " -f3`
- install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
- else
- ### FIXME(dtroyer): Find RPMs for OpenVSwitch
- echo "OpenVSwitch packages need to be located"
- # Fedora does not started OVS by default
- restart_service openvswitch
- fi
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- install_package bridge-utils
- fi
+ install_quantum_agent_packages
fi
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
@@ -753,11 +743,9 @@
install_horizon
fi
if is_service_enabled quantum; then
- git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH
-fi
-if is_service_enabled quantum; then
- # quantum
- git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
+ install_quantum
+ install_quantumclient
+ install_quantum_third_party
fi
if is_service_enabled heat; then
install_heat
@@ -772,9 +760,6 @@
if is_service_enabled tempest; then
install_tempest
fi
-if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
- git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
-fi
# Initialization
@@ -812,8 +797,8 @@
configure_horizon
fi
if is_service_enabled quantum; then
- setup_develop $QUANTUMCLIENT_DIR
- setup_develop $QUANTUM_DIR
+ setup_quantumclient
+ setup_quantum
fi
if is_service_enabled heat; then
configure_heat
@@ -822,9 +807,6 @@
if is_service_enabled cinder; then
configure_cinder
fi
-if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
- setup_develop $RYU_DIR
-fi
if [[ $TRACK_DEPENDS = True ]] ; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
@@ -835,6 +817,12 @@
exit 0
fi
+if is_service_enabled tls-proxy; then
+ configure_CA
+ init_CA
+ # Add name to /etc/hosts
+ # don't be naive and add to existing line!
+fi
# Syslog
# ------
@@ -905,24 +893,33 @@
# Set a reasonable status bar
screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+# Initialize the directory for service status check
+init_service_check
# Keystone
# --------
if is_service_enabled key; then
echo_summary "Starting Keystone"
- configure_keystone
init_keystone
start_keystone
# Set up a temporary admin URI for Keystone
- SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+ SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+
+ if is_service_enabled tls-proxy; then
+ export OS_CACERT=$INT_CA_DIR/ca-chain.pem
+ # Until the client support is fixed, just use the internal endpoint
+ SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
+ fi
# Do the keystone-specific bits from keystone_data.sh
export OS_SERVICE_TOKEN=$SERVICE_TOKEN
export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
create_keystone_accounts
create_nova_accounts
+ create_cinder_accounts
+ create_quantum_accounts
# ``keystone_data.sh`` creates services, admin and demo users, and roles.
ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
@@ -972,392 +969,22 @@
fi
-# Ryu
-# ---
-
-# Ryu is not a part of OpenStack project. Please ignore following block if
-# you are not interested in Ryu.
-# launch ryu manager
-if is_service_enabled ryu; then
- RYU_CONF_DIR=/etc/ryu
- if [[ ! -d $RYU_CONF_DIR ]]; then
- sudo mkdir -p $RYU_CONF_DIR
- fi
- sudo chown `whoami` $RYU_CONF_DIR
- RYU_CONF=$RYU_CONF_DIR/ryu.conf
- sudo rm -rf $RYU_CONF
-
- cat <<EOF > $RYU_CONF
---app_lists=$RYU_APPS
---wsapi_host=$RYU_API_HOST
---wsapi_port=$RYU_API_PORT
---ofp_listen_host=$RYU_OFP_HOST
---ofp_tcp_listen_port=$RYU_OFP_PORT
-EOF
- screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
-fi
-
-
# Quantum
# -------
-# Quantum Network Configuration
if is_service_enabled quantum; then
echo_summary "Configuring Quantum"
- # The following variables control the Quantum openvswitch and
- # linuxbridge plugins' allocation of tenant networks and
- # availability of provider networks. If these are not configured
- # in localrc, tenant networks will be local to the host (with no
- # remote connectivity), and no physical resources will be
- # available for the allocation of provider networks.
-
- # To use GRE tunnels for tenant networks, set to True in
- # localrc. GRE tunnels are only supported by the openvswitch
- # plugin, and currently only on Ubuntu.
- ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
-
- # If using GRE tunnels for tenant networks, specify the range of
- # tunnel IDs from which tenant networks are allocated. Can be
- # overriden in localrc in necesssary.
- TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
-
- # To use VLANs for tenant networks, set to True in localrc. VLANs
- # are supported by the openvswitch and linuxbridge plugins, each
- # requiring additional configuration described below.
- ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
- # If using VLANs for tenant networks, set in localrc to specify
- # the range of VLAN VIDs from which tenant networks are
- # allocated. An external network switch must be configured to
- # trunk these VLANs between hosts for multi-host connectivity.
- #
- # Example: ``TENANT_VLAN_RANGE=1000:1999``
- TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
- # If using VLANs for tenant networks, or if using flat or VLAN
- # provider networks, set in localrc to the name of the physical
- # network, and also configure OVS_PHYSICAL_BRIDGE for the
- # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
- # agent, as described below.
- #
- # Example: ``PHYSICAL_NETWORK=default``
- PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
-
- # With the openvswitch plugin, if using VLANs for tenant networks,
- # or if using flat or VLAN provider networks, set in localrc to
- # the name of the OVS bridge to use for the physical network. The
- # bridge will be created if it does not already exist, but a
- # physical interface must be manually added to the bridge as a
- # port for external connectivity.
- #
- # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
- OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
-
- # With the linuxbridge plugin, if using VLANs for tenant networks,
- # or if using flat or VLAN provider networks, set in localrc to
- # the name of the network interface to use for the physical
- # network.
- #
- # Example: ``LB_PHYSICAL_INTERFACE=eth1``
- LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
-
- # With the openvswitch plugin, set to True in localrc to enable
- # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
- #
- # Example: ``OVS_ENABLE_TUNNELING=True``
- OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
-
- # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
- if [[ ! -d $QUANTUM_CONF_DIR ]]; then
- sudo mkdir -p $QUANTUM_CONF_DIR
- fi
- sudo chown `whoami` $QUANTUM_CONF_DIR
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
- Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
- Q_DB_NAME="ovs_quantum"
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
- Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
- Q_DB_NAME="quantum_linux_bridge"
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu
- Q_PLUGIN_CONF_FILENAME=ryu.ini
- Q_DB_NAME="ovs_quantum"
- Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2"
- fi
-
- if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
- echo "Quantum plugin not set.. exiting"
- exit 1
- fi
-
- # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
- mkdir -p /$Q_PLUGIN_CONF_PATH
- Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
- cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
-
- database_connection_url dburl $Q_DB_NAME
- iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
- unset dburl
-
- cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
- configure_quantum_rootwrap
+ configure_quantum
+ init_quantum
fi
-# Quantum service (for controller node)
-if is_service_enabled q-svc; then
- Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini
- Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json
-
- cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
- cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
-
- if is_service_enabled $DATABASE_BACKENDS; then
- recreate_database $Q_DB_NAME utf8
- else
- echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
- fi
-
- # Update either configuration file with plugin
- iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
-
- iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
- quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken
-
- # Configure plugin
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
- iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
- elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
- else
- echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
- fi
-
- # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc``
- # for more complex physical network configurations.
- if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
- OVS_VLAN_RANGES=$PHYSICAL_NETWORK
- if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
- OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE
- fi
- fi
- if [[ "$OVS_VLAN_RANGES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
- fi
-
- # Enable tunnel networks if selected
- if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
- fi
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
- else
- echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
- fi
-
- # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc``
- # for more complex physical network configurations.
- if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
- LB_VLAN_RANGES=$PHYSICAL_NETWORK
- if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
- LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE
- fi
- fi
- if [[ "$LB_VLAN_RANGES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
- fi
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT
- iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
- fi
-fi
-
-# Quantum agent (for compute nodes)
-if is_service_enabled q-agt; then
- # Configure agent for plugin
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- # Setup integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
- quantum_setup_ovs_bridge $OVS_BRIDGE
-
- # Setup agent for tunneling
- if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
- # Verify tunnels are supported
- # REVISIT - also check kernel module support for GRE and patch ports
- OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
- if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
- echo "You are running OVS version $OVS_VERSION."
- echo "OVS 1.4+ is required for tunneling between multiple hosts."
- exit 1
- fi
- iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
- iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
- fi
-
- # Setup physical network bridge mappings. Override
- # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
- # complex physical network configurations.
- if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
- OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
-
- # Configure bridge manually with physical interface as port for multi-node
- sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
- fi
- if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
- fi
- AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- # Setup physical network interface mappings. Override
- # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
- # complex physical network configurations.
- if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
- LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
- fi
- if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
- fi
- AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- # Set up integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
- quantum_setup_ovs_bridge $OVS_BRIDGE
- if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
- sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
- fi
- AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
- fi
- # Update config w/rootwrap
- iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-fi
-
-# Quantum DHCP
-if is_service_enabled q-dhcp; then
- AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent"
-
- Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini
-
- cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
-
- # Set verbose
- iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_DHCP_CONF_FILE DEFAULT debug True
- iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
-
- # Update config w/rootwrap
- iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- fi
-fi
-
-# Quantum L3
-if is_service_enabled q-l3; then
- AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
- PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
- Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
-
- cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
-
- # Set verbose
- iniset $Q_L3_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_L3_CONF_FILE DEFAULT debug True
-
- iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
-
- iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
- if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- # Set up external bridge
- quantum_setup_external_bridge $PUBLIC_BRIDGE
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ''
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- # Set up external bridge
- quantum_setup_external_bridge $PUBLIC_BRIDGE
- fi
-fi
-
-#Quantum Metadata
-if is_service_enabled q-meta; then
- AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent"
- Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini
-
- cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
-
- # Set verbose
- iniset $Q_META_CONF_FILE DEFAULT verbose True
- # Set debug
- iniset $Q_META_CONF_FILE DEFAULT debug True
-
- iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
-
- iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
-
- iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-
- quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
-fi
-
-# Quantum RPC support - must be updated prior to starting any of the services
+# Some Quantum plugins require network controllers which are not
+# a part of the OpenStack project. Configure and start them.
if is_service_enabled quantum; then
- iniset $QUANTUM_CONF DEFAULT control_exchange quantum
- if is_service_enabled qpid ; then
- iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
- elif is_service_enabled zeromq; then
- iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq
- elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST
- iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
- fi
- if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
- cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
- quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
- if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge ''
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
- fi
- fi
+ configure_quantum_third_party
+ init_quantum_third_party
+ start_quantum_third_party
fi
@@ -1373,9 +1000,9 @@
# Delete traces of nova networks from prior runs
sudo killall dnsmasq || true
clean_iptables
- rm -rf $NOVA_STATE_PATH/networks
- mkdir -p $NOVA_STATE_PATH/networks
-
+ rm -rf ${NOVA_STATE_PATH}/networks
+ sudo mkdir -p ${NOVA_STATE_PATH}/networks
+ sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks
# Force IP forwarding on, just on case
sudo sysctl -w net.ipv4.ip_forward=1
fi
@@ -1406,37 +1033,9 @@
# Additional Nova configuration that is dependent on other services
if is_service_enabled quantum; then
- add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
- add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
- add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
- add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
- add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
- add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
-
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"}
- add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
- add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
- add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
- fi
- add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
- add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
- if is_service_enabled q-meta; then
- add_nova_opt "service_quantum_metadata_proxy=True"
- fi
+ create_nova_conf_quantum
elif is_service_enabled n-net; then
- add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
- add_nova_opt "public_interface=$PUBLIC_INTERFACE"
- add_nova_opt "vlan_interface=$VLAN_INTERFACE"
- add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
- if [ -n "$FLAT_INTERFACE" ]; then
- add_nova_opt "flat_interface=$FLAT_INTERFACE"
- fi
+ create_nova_conf_nova_network
fi
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
@@ -1484,6 +1083,10 @@
# Need to avoid crash due to new firewall support
XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
+
+ # OpenVZ
+ # ------
+
elif [ "$VIRT_DRIVER" = 'openvz' ]; then
echo_summary "Using OpenVZ virtualization driver"
# TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
@@ -1492,6 +1095,25 @@
add_nova_opt "connection_type=openvz"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+
+ # Bare Metal
+ # ----------
+
+ elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ echo_summary "Using BareMetal driver"
+ add_nova_opt "compute_driver=nova.virt.baremetal.driver.BareMetalDriver"
+ LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+ add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ add_nova_opt "baremetal_driver=$BM_DRIVER"
+ add_nova_opt "baremetal_tftp_root=/tftpboot"
+ add_nova_opt "baremetal_instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH"
+ add_nova_opt "baremetal_power_manager=$BM_POWER_MANAGER"
+ add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager"
+ add_nova_opt "scheduler_default_filters=AllHostsFilter"
+
+ # Default
+ # -------
+
else
echo_summary "Using libvirt virtualization driver"
add_nova_opt "compute_driver=libvirt.LibvirtDriver"
@@ -1500,6 +1122,15 @@
fi
fi
+# Extra things to prepare nova for baremetal, before nova starts
+if is_service_enabled nova && is_baremetal; then
+ echo_summary "Preparing for nova baremetal"
+ prepare_baremetal_toolchain
+ configure_baremetal_nova_dirs
+ if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
+ create_fake_baremetal_env
+ fi
+fi
# Launch Services
# ===============
@@ -1535,74 +1166,29 @@
# Launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
echo_summary "Starting Nova API"
- screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
- echo "Waiting for nova-api to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
- echo "nova-api did not start"
- exit 1
- fi
+ start_nova_api
fi
if is_service_enabled q-svc; then
echo_summary "Starting Quantum"
- # Start the Quantum service
- screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- echo "Waiting for Quantum to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then
- echo "Quantum did not start"
- exit 1
- fi
- # Configure Quantum elements
- # Configure internal network & subnet
-
- TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
-
- # Create a small network
- # Since quantum command is executed in admin context at this point,
- # ``--tenant_id`` needs to be specified.
- NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
- SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
- if is_service_enabled q-l3; then
- # Create a router, and add the private subnet as one of its interfaces
- ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
- quantum router-interface-add $ROUTER_ID $SUBNET_ID
- # Create an external network, and a subnet. Configure the external network as router gw
- EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
- EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
- quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
- if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- CIDR_LEN=${FLOATING_RANGE#*/}
- sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
- sudo ip link set $PUBLIC_BRIDGE up
- ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
- sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
- fi
- if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
- # Explicitly set router id in l3 agent configuration
- iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
- fi
- fi
- if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
- setup_quantum
- fi
+ start_quantum_service_and_check
+ create_quantum_initial_network
+ setup_quantum_debug
elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
# Create a small network
$NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
# Create some floating ips
- $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK
+ $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
# Create a second pool
$NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
-# Start up the quantum agents if enabled
-screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
-screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
-screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
-
+if is_service_enabled quantum; then
+ start_quantum_agents
+fi
if is_service_enabled nova; then
echo_summary "Starting Nova"
start_nova
@@ -1633,6 +1219,17 @@
start_heat
fi
+# Create account rc files
+# =======================
+
+# Creates source able script files for easier user switching.
+# This step also creates certificates for tenants and users,
+# which is helpful in image bundle steps.
+
+if is_service_enabled nova && is_service_enabled key; then
+ $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc
+fi
+
# Install Images
# ==============
@@ -1648,19 +1245,56 @@
# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then
- echo_summary "Uploading images"
TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
- # Option to upload legacy ami-tty, which works with xenserver
- if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
- IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
- fi
+ if is_baremetal; then
+ echo_summary "Creating and uploading baremetal images"
- for image_url in ${IMAGE_URLS//,/ }; do
- upload_image $image_url $TOKEN
- done
+ # build and upload separate deploy kernel & ramdisk
+ upload_baremetal_deploy $TOKEN
+
+ # upload images, separating out the kernel & ramdisk for PXE boot
+ for image_url in ${IMAGE_URLS//,/ }; do
+ upload_baremetal_image $image_url $TOKEN
+ done
+ else
+ echo_summary "Uploading images"
+
+ # Option to upload legacy ami-tty, which works with xenserver
+ if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+ IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
+ fi
+
+ for image_url in ${IMAGE_URLS//,/ }; do
+ upload_image $image_url $TOKEN
+ done
+ fi
fi
+# If we are running nova with baremetal driver, there are a few
+# last-mile configuration bits to attend to, which must happen
+# after n-api and n-sch have started.
+# Also, creating the baremetal flavor must happen after images
+# are loaded into glance, though just knowing the IDs is sufficient here
+if is_service_enabled nova && is_baremetal; then
+ # create special flavor for baremetal if we know what images to associate
+ [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \
+ create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
+
+ # otherwise user can manually add it later by calling nova-baremetal-manage
+ # otherwise user can manually add it later by calling nova-baremetal-manage
+ [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node
+
+ # NOTE: we do this here to ensure that our copy of dnsmasq is running
+ sudo pkill dnsmasq || true
+ sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
+ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
+ --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE
+
+ # ensure callback daemon is running
+ sudo pkill nova-baremetal-deploy-helper || true
+ screen_it baremetal "nova-baremetal-deploy-helper"
+fi
# Configure Tempest last to ensure that the runtime configuration of
# the various OpenStack services can be queried.
@@ -1672,6 +1306,14 @@
echo '**************************************************'
fi
+# Save some values we generated for later use
+CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
+echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
+for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
+ SERVICE_HOST SERVICE_PROTOCOL TLS_IP; do
+ echo $i=${!i} >>$TOP_DIR/.stackenv
+done
+
# Run local script
# ================
@@ -1682,6 +1324,8 @@
$TOP_DIR/local.sh
fi
+# Check the status of running services
+service_check
# Fin
# ===
diff --git a/stackrc b/stackrc
index 8ac6ec5..0e84db8 100644
--- a/stackrc
+++ b/stackrc
@@ -6,6 +6,9 @@
# Destination path for installation
DEST=/opt/stack
+# Destination for working data
+DATA_DIR=${DEST}/data
+
# Select the default database
DATABASE_TYPE=mysql
@@ -14,7 +17,7 @@
# ``disable_service`` functions in ``localrc``.
# For example, to enable Swift add this to ``localrc``:
# enable_service swift
-ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
@@ -108,6 +111,17 @@
RYU_REPO=https://github.com/osrg/ryu.git
RYU_BRANCH=master
+# diskimage-builder
+BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git
+BM_IMAGE_BUILD_BRANCH=master
+
+# bm_poseur
+# Used to simulate a hardware environment for baremetal
+# Only used if BM_USE_FAKE_ENV is set
+BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git
+BM_POSEUR_BRANCH=master
+
+
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC** or **OpenVZ** based system.
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
deleted file mode 100755
index 0924180..0000000
--- a/tools/configure_tempest.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-echo "$0 is scheduled for delete!!" >&2
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
new file mode 100755
index 0000000..e39c157
--- /dev/null
+++ b/tools/create_userrc.sh
@@ -0,0 +1,254 @@
+#!/usr/bin/env bash
+
+#Warning: This script just for development purposes
+
+ACCOUNT_DIR=./accrc
+
+display_help()
+{
+cat <<EOF
+
+usage: $0 <options..>
+
+This script creates certificates and sourcable rc files per tenant/user.
+
+Target account directory hierarchy:
+target_dir-|
+ |-cacert.pem
+ |-tenant1-name|
+ | |- user1
+ | |- user1-cert.pem
+ | |- user1-pk.pem
+ | |- user2
+ | ..
+ |-tenant2-name..
+ ..
+
+Optional Arguments
+-P include password to the rc files; with -A it assume all users password is the same
+-A try with all user
+-u <username> create files just for the specified user
+-C <tanent_name> create user and tenant, the specifid tenant will be the user's tenant
+-r <name> when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member)
+-p <userpass> password for the user
+--os-username <username>
+--os-password <admin password>
+--os-tenant-name <tenant_name>
+--os-tenant-id <tenant_id>
+--os-auth-url <auth_url>
+--target-dir <target_directory>
+--skip-tenant <tenant-name>
+--debug
+
+Example:
+$0 -AP
+$0 -P -C mytenant -u myuser -p mypass
+EOF
+}
+
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@")
+then
+ #parse error
+ display_help
+ exit 1
+fi
+eval set -- $options
+ADDPASS=""
+
+# The services users usually in the service tenant.
+# rc files for service users, is out of scope.
+# Supporting different tanent for services is out of scope.
+SKIP_TENANT=",service," # tenant names are between commas(,)
+MODE=""
+ROLE=Member
+USER_NAME=""
+USER_PASS=""
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ -h|--help) display_help; exit 0 ;;
+ --os-username) export OS_USERNAME=$2; shift ;;
+ --os-password) export OS_PASSWORD=$2; shift ;;
+ --os-tenant-name) export OS_TENANT_NAME=$2; shift ;;
+ --os-tenant-id) export OS_TENANT_ID=$2; shift ;;
+ --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
+ --os-auth-url) export OS_AUTH_URL=$2; shift ;;
+ --target-dir) ACCOUNT_DIR=$2; shift ;;
+ --debug) set -o xtrace ;;
+ -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;;
+ -p) USER_PASS=$2; shift ;;
+ -A) MODE=all; ;;
+ -P) ADDPASS="yes" ;;
+ -C) MODE=create; TENANT=$2; shift ;;
+ -r) ROLE=$2; shift ;;
+ (--) shift; break ;;
+ (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;;
+ (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;;
+ esac
+ shift
+done
+
+if [ -z "$OS_PASSWORD" ]; then
+ if [ -z "$ADMIN_PASSWORD" ];then
+ echo "The admin password is required option!" >&2
+ exit 2
+ else
+ OS_PASSWORD=$ADMIN_PASSWORD
+ fi
+fi
+
+if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then
+ export OS_TENANT_NAME=admin
+fi
+
+if [ -z "$OS_USERNAME" ]; then
+ export OS_USERNAME=admin
+fi
+
+if [ -z "$OS_AUTH_URL" ]; then
+ export OS_AUTH_URL=http://localhost:5000/v2.0/
+fi
+
+USER_PASS=${USER_PASS:-$OS_PASSWORD}
+USER_NAME=${USER_NAME:-$OS_USERNAME}
+
+if [ -z "$MODE" ]; then
+ echo "You must specify at least -A or -u parameter!" >&2
+ echo
+ display_help
+ exit 3
+fi
+
+export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
+
+EC2_URL=http://localhost:8773/service/Cloud
+S3_URL=http://localhost:3333
+
+ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'`
+[ -n "$ec2" ] && EC2_URL=$ec2
+
+s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'`
+[ -n "$s3" ] && S3_URL=$s3
+
+
+mkdir -p "$ACCOUNT_DIR"
+ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"`
+EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem
+mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null
+if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then
+ echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2
+ mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null
+fi
+
+
+function add_entry(){
+ local user_id=$1
+ local user_name=$2
+ local tenant_id=$3
+ local tenant_name=$4
+ local user_passwd=$5
+
+ # The admin user can see all user's secret AWS keys, it does not looks good
+ local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1`
+ if [ -z "$line" ]; then
+ keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2
+ line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1`
+ fi
+ local ec2_access_key ec2_secret_key
+ read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'`
+ mkdir -p "$ACCOUNT_DIR/$tenant_name"
+ local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name"
+ # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN
+ # Generally the subject DN parts should be in reverse order like the Issuer
+ # The Serial does not seams correctly marked either
+ local ec2_cert="$rcfile-cert.pem"
+ local ec2_private_key="$rcfile-pk.pem"
+ # Try to preserve the original file on fail (best effort)
+ mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null
+ mv "$ec2_cert" "$ec2_cert.old" &>/dev/null
+ # It will not create certs when the password is incorrect
+ if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then
+ mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null
+ mv "$ec2_cert.old" "$ec2_cert" &>/dev/null
+ fi
+ cat >"$rcfile" <<EOF
+# you can source this file
+export EC2_ACCESS_KEY=$ec2_access_key
+export EC2_SECRET_KEY=$ec2_secret_key
+export EC2_URL=$EC2_URL
+export S3_URL=$S3_URL
+# OpenStack USER ID = $user_id
+export OS_USERNAME="$user_name"
+# Openstack Tenant ID = $tenant_id
+export OS_TENANT_NAME="$tenant_name"
+export OS_AUTH_URL="$OS_AUTH_URL"
+export EC2_CERT="$ec2_cert"
+export EC2_PRIVATE_KEY="$ec2_private_key"
+export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
+export EUCALYPTUS_CERT="$ACCOUNT_DIR/cacert.pem"
+export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
+EOF
+ if [ -n "$ADDPASS" ]; then
+ echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
+ fi
+}
+
+#admin users expected
+function create_or_get_tenant(){
+ local tenant_name=$1
+ local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'`
+ if [ -n "$tenant_id" ]; then
+ echo $tenant_id
+ else
+ keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'
+ fi
+}
+
+function create_or_get_role(){
+ local role_name=$1
+ local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'`
+ if [ -n "$role_id" ]; then
+ echo $role_id
+ else
+ keystone tenant-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'
+ fi
+}
+
+# Provides empty string when the user does not exists
+function get_user_id(){
+ local user_name=$1
+ keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}'
+}
+
+if [ $MODE != "create" ]; then
+# looks like I can't ask for all tenant related to a specified user
+ for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do
+ read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'`
+ if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then
+ continue;
+ fi
+ for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do
+ read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'`
+ if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then
+ continue;
+ fi
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ done
+ done
+else
+ tenant_name=$TENANT
+ tenant_id=`create_or_get_tenant "$TENANT"`
+ user_name=$USER_NAME
+ user_id=`get_user_id $user_name`
+ if [ -z "$user_id" ]; then
+ #new user
+ user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'`
+ #The password is in the cmd line. It is not a good thing
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ else
+ #new role
+ role_id=`create_or_get_role "$ROLE"`
+ keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id"
+ add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+ fi
+fi
diff --git a/tools/info.sh b/tools/info.sh
index f01dbea..ef1f338 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -92,6 +92,8 @@
PKG_DIR=$FILES/apts
elif is_fedora; then
PKG_DIR=$FILES/rpms
+elif is_suse; then
+ PKG_DIR=$FILES/rpms-suse
else
exit_distro_not_supported "list of packages"
fi
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index c78c6f2..e270e59 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -376,35 +376,22 @@
sleep 10
done
- # output the run.sh.log
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' &
- TAIL_PID=$!
-
- function kill_tail() {
- kill -9 $TAIL_PID
- exit 1
- }
- # Let Ctrl-c kill tail and exit
- trap kill_tail SIGINT
-
- # ensure we kill off the tail if we exit the script early
- # for other reasons
- add_on_exit "kill -9 $TAIL_PID || true"
-
- # wait silently until stack.sh has finished
- set +o xtrace
- while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do
+ set +x
+ echo -n "Waiting for startup script to finish"
+ while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ]
+ do
sleep 10
+ echo -n "."
done
- set -o xtrace
+ echo "done!"
+ set -x
- # kill the tail process now stack.sh has finished
- kill -9 $TAIL_PID
+ # output the run.sh.log
+ ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log'
- # check for a failure
- if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then
- exit 1
- fi
+ # Fail if the expected text is not found
+ ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in'
+
echo "################################################################################"
echo ""
echo "All Finished!"
diff --git a/unstack.sh b/unstack.sh
index 34195c2..fd70916 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -25,9 +25,11 @@
DATA_DIR=${DATA_DIR:-${DEST}/data}
# Get project function libraries
+source $TOP_DIR/lib/baremetal
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/quantum
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -39,8 +41,7 @@
if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
source $TOP_DIR/openrc
- source $TOP_DIR/lib/quantum
- teardown_quantum
+ teardown_quantum_debug
fi
# Shut down devstack's screen to get the bulk of OpenStack services in one shot
@@ -62,40 +63,21 @@
stop_horizon
fi
+# Kill TLS proxies
+if is_service_enabled tls-proxy; then
+ killall stud
+fi
+
+# baremetal might have created a fake environment
+if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
+ cleanup_fake_baremetal_env
+fi
+
SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
# Get the iSCSI volumes
if is_service_enabled cinder; then
- TARGETS=$(sudo tgtadm --op show --mode target)
- if [ $? -ne 0 ]; then
- # If tgt driver isn't running this won't work obviously
- # So check the response and restart if need be
- echo "tgtd seems to be in a bad state, restarting..."
- if is_ubuntu; then
- restart_service tgt
- else
- restart_service tgtd
- fi
- TARGETS=$(sudo tgtadm --op show --mode target)
- fi
-
- if [[ -n "$TARGETS" ]]; then
- iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
- for i in "${iqn_list[@]}"; do
- echo removing iSCSI target: $i
- sudo tgt-admin --delete $i
- done
- fi
-
- if is_service_enabled cinder; then
- sudo rm -rf $CINDER_STATE_PATH/volumes/*
- fi
-
- if is_ubuntu; then
- stop_service tgt
- else
- stop_service tgtd
- fi
+ cleanup_cinder
fi
if [[ -n "$UNSTACK_ALL" ]]; then
@@ -114,8 +96,7 @@
fi
fi
-# Quantum dhcp agent runs dnsmasq
-if is_service_enabled q-dhcp; then
- pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }')
- [ ! -z "$pid" ] && sudo kill -9 $pid
+if is_service_enabled quantum; then
+ stop_quantum
+ stop_quantum_third_party
fi