Merge "Multi-node setup: Fix keystone host"
diff --git a/README.md b/README.md
index 483d1b0..a738554 100644
--- a/README.md
+++ b/README.md
@@ -85,19 +85,21 @@
# Swift
-Swift is not installed by default, you can enable easily by adding this to your `localrc`:
+Swift is enabled by default configured with only one replica to avoid being IO/memory intensive on a small vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background.
- enable_service swift
+If you would like to disable Swift you can add this to your `localrc` :
+
+ disable_service s-proxy s-object s-container s-account
If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`:
disable_all_services
- enable_service key mysql swift
+ enable_service key mysql s-proxy s-object s-container s-account
-If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against.
+If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable `SWIFT_REPLICAS` in your `localrc` (usually to 3).
+
+# Swift S3
If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
-
-By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`.
diff --git a/clean.sh b/clean.sh
new file mode 100755
index 0000000..cf24f27
--- /dev/null
+++ b/clean.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# **clean.sh**
+
+# ``clean.sh`` does its best to eradicate traces of a Grenade
+# run except for the following:
+# - both base and target code repos are left alone
+# - packages (system and pip) are left alone
+
+# This means that all data files are removed. More??
+
+# Keep track of the current devstack directory.
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Load local configuration
+source $TOP_DIR/stackrc
+
+# Get the variables that are set in stack.sh
+source $TOP_DIR/.stackenv
+
+# Determine what system we are running on. This provides ``os_VENDOR``,
+# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# and ``DISTRO``
+GetDistro
+
+
+# Import database library
+source $TOP_DIR/lib/database
+source $TOP_DIR/lib/rpc_backend
+
+source $TOP_DIR/lib/tls
+source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/glance
+source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
+source $TOP_DIR/lib/quantum
+source $TOP_DIR/lib/baremetal
+source $TOP_DIR/lib/ldap
+
+
+# See if there is anything running...
+# need to adapt when run_service is merged
+SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }')
+if [[ -n "$SESSION" ]]; then
+ # Let unstack.sh do its thing first
+ $TOP_DIR/unstack.sh --all
+fi
+
+# Clean projects
+cleanup_cinder
+cleanup_glance
+cleanup_keystone
+cleanup_nova
+cleanup_quantum
+cleanup_swift
+
+# cinder doesn't clean up the volume group as it might be used elsewhere...
+# clean it up if it is a loop device
+VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}')
+if [[ -n "$VG_DEV" ]]; then
+ sudo losetup -d $VG_DEV
+fi
+
+#if mount | grep $DATA_DIR/swift/drives; then
+# sudo umount $DATA_DIR/swift/drives/sdb1
+#fi
+
+
+# Clean out /etc
+sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift
+
+# Clean out tgt
+sudo rm /etc/tgt/conf.d/*
+
+# Clean up the message queue
+cleanup_rpc_backend
+cleanup_database
+
+# Clean up networking...
+# should this be in nova?
+# FIXED_IP_ADDR in br100
+
+# Clean up files
+#rm -f .stackenv
diff --git a/exercise.sh b/exercise.sh
index 5b3c56e..3516738 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -17,9 +17,19 @@
# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
SKIP_EXERCISES=${SKIP_EXERCISES:-""}
-# Locate the scripts we should run
-EXERCISE_DIR=$(dirname "$0")/exercises
-basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
+# comma separated list of script basenames to run
+# to run only euca.sh use RUN_EXERCISES=euca
+basenames=${RUN_EXERCISES:-""}
+
+EXERCISE_DIR=$TOP_DIR/exercises
+
+if [ -z "${basenames}" ] ; then
+ # Locate the scripts we should run
+ basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
+else
+ # If RUN_EXERCISES was specified, ignore SKIP_EXERCISES.
+ SKIP_EXERCISES=
+fi
# Track the state of each script
passes=""
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 894da74..1e92500 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -136,7 +136,7 @@
# Swift client
# ------------
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
STATUS_SWIFT="Skipped"
else
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index c84e84e..dd8e56e 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -144,7 +144,8 @@
# Swift client
# ------------
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
STATUS_SWIFT="Skipped"
else
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index 5c4b16e..a1fb2ad 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -235,7 +235,7 @@
source $TOP_DIR/openrc $TENANT $TENANT
local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
- quantum-debug probe-create $NET_ID
+ quantum-debug probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
}
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 46ac2c5..c4ec3e9 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -35,7 +35,7 @@
# If swift is not enabled we exit with exitcode 55 which mean
# exercise is skipped.
-is_service_enabled swift || exit 55
+is_service_enabled s-proxy || exit 55
# Container name
CONTAINER=ex-swift
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index a6fab09..72b5b1e 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -52,7 +52,7 @@
# Services
# --------
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
# Nova needs ResellerAdmin role to download images when accessing
# swift through the s3 api.
@@ -123,7 +123,8 @@
fi
# Swift
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
SWIFT_USER=$(get_id keystone user-create \
--name=swift \
--pass="$SERVICE_PASSWORD" \
@@ -190,7 +191,7 @@
fi
# S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
S3_SERVICE=$(get_id keystone service-create \
--name=s3 \
diff --git a/functions b/functions
index f5032d5..fe50547 100644
--- a/functions
+++ b/functions
@@ -123,6 +123,37 @@
}
+# Get the default value for HOST_IP
+# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
+function get_default_host_ip() {
+ local fixed_range=$1
+ local floating_range=$2
+ local host_ip_iface=$3
+ local host_ip=$4
+
+ # Find the interface used for the default route
+ host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
+ # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
+ if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
+ host_ip=""
+ host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'`
+ for IP in $host_ips; do
+ # Attempt to filter out IP addresses that are part of the fixed and
+ # floating range. Note that this method only works if the ``netaddr``
+ # python library is installed. If it is not installed, an error
+ # will be printed and the first IP from the interface will be used.
+ # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
+ # address.
+ if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
+ host_ip=$IP
+ break;
+ fi
+ done
+ fi
+ echo $host_ip
+}
+
+
function _get_package_dir() {
local pkg_dir
if is_ubuntu; then
@@ -181,6 +212,10 @@
if [[ ! $file_to_parse =~ ceilometer ]]; then
file_to_parse="${file_to_parse} ceilometer"
fi
+ elif [[ $service == s-* ]]; then
+ if [[ ! $file_to_parse =~ swift ]]; then
+ file_to_parse="${file_to_parse} swift"
+ fi
elif [[ $service == n-* ]]; then
if [[ ! $file_to_parse =~ nova ]]; then
file_to_parse="${file_to_parse} nova"
@@ -618,6 +653,9 @@
# **ceilometer** returns true if any service enabled start with **ceilometer**
# **glance** returns true if any service enabled start with **g-**
# **quantum** returns true if any service enabled start with **q-**
+# **swift** returns true if any service enabled start with **s-**
+# For backward compatibility if we have **swift** in ENABLED_SERVICES all the
+# **s-** services will be enabled. This will be deprecated in the future.
#
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
@@ -630,6 +668,8 @@
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
[[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+ [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
+ [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
done
return 1
}
@@ -735,6 +775,21 @@
}
+# Distro-agnostic package uninstaller
+# uninstall_package package [package ...]
+function uninstall_package() {
+ if is_ubuntu; then
+ apt_get purge "$@"
+ elif is_fedora; then
+ yum remove -y "$@"
+ elif is_suse; then
+ rpm -e "$@"
+ else
+ exit_distro_not_supported "uninstalling packages"
+ fi
+}
+
+
# Distro-agnostic function to tell if a package is installed
# is_package_installed package [package ...]
function is_package_installed() {
@@ -803,26 +858,69 @@
}
+# _run_process() is designed to be backgrounded by run_process() to simulate a
+# fork. It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it(). The log filename is derived
+# from the service name and global-and-now-misnamed SCREEN_LOGDIR
+# _run_process service "command-line"
+function _run_process() {
+ local service=$1
+ local command="$2"
+
+ # Undo logging redirections and close the extra descriptors
+ exec 1>&3
+ exec 2>&3
+ exec 3>&-
+ exec 6>&-
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+ # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+ export PYTHONUNBUFFERED=1
+ fi
+
+ exec /bin/bash -c "$command"
+ die "$service exec failure: $command"
+}
+
+
+# run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command. This is meant to duplicate the semantics
+# of screen_it() without screen. PIDs are written to
+# $SERVICE_DIR/$SCREEN_NAME/$service.pid
+# run_process service "command-line"
+function run_process() {
+ local service=$1
+ local command="$2"
+
+ # Spawn the child process
+ _run_process "$service" "$command" &
+ echo $!
+}
+
+
# Helper to launch a service in a named screen
# screen_it service "command-line"
function screen_it {
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
- SCREEN_DEV=`trueorfalse True $SCREEN_DEV`
+ USE_SCREEN=$(trueorfalse True $USE_SCREEN)
if is_service_enabled $1; then
# Append the service to the screen rc file
screen_rc "$1" "$2"
- screen -S $SCREEN_NAME -X screen -t $1
+ if [[ "$USE_SCREEN" = "True" ]]; then
+ screen -S $SCREEN_NAME -X screen -t $1
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
- screen -S $SCREEN_NAME -p $1 -X log on
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
- fi
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+ screen -S $SCREEN_NAME -p $1 -X log on
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+ fi
- if [[ "$SCREEN_DEV" = "True" ]]; then
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
@@ -831,7 +929,8 @@
NL=`echo -ne '\015'`
screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
else
- screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\""
+ # Spawn directly without screen
+ run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
fi
}
diff --git a/lib/baremetal b/lib/baremetal
index 57048a1..5326dd1 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -33,7 +33,7 @@
# baremetal driver uses that to push a disk image onto the node(s).
#
# Below we define various defaults which control the behavior of the
-# baremetal compute service, and inform it of the hardware it will contorl.
+# baremetal compute service, and inform it of the hardware it will control.
#
# Below that, various functions are defined, which are called by devstack
# in the following order:
@@ -395,7 +395,7 @@
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
- # override DEFAULT_IMAGE_NAME so that tempest can find the image
+ # override DEFAULT_IMAGE_NAME so that tempest can find the image
# that we just uploaded in glance
DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
}
diff --git a/lib/cinder b/lib/cinder
index b3e1904..7688ad9 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -53,6 +53,11 @@
# Support for multi lvm backend configuration (default is no support)
CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
+# Should cinder perform secure deletion of volumes?
+# Defaults to true, can be set to False to avoid this bug when testing:
+# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
+CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+
# Name of the lvm volume groups to use/create for iscsi volumes
# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
diff --git a/lib/database b/lib/database
index ebab333..79b77a2 100644
--- a/lib/database
+++ b/lib/database
@@ -42,6 +42,11 @@
# This is not an error as multi-node installs will do this on the compute nodes
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database {
+ cleanup_database_$DATABASE_TYPE
+}
+
# Set the database type based on the configuration
function initialize_database_backends {
for backend in $DATABASE_BACKENDS; do
diff --git a/lib/databases/mysql b/lib/databases/mysql
index ec65c36..0633ab0 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -10,6 +10,24 @@
register_database mysql
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database_mysql {
+ if is_ubuntu; then
+ # Get ruthless with mysql
+ stop_service $MYSQL
+ sudo aptitude purge -y ~nmysql-server
+ sudo rm -rf /var/lib/mysql
+ return
+ elif is_fedora; then
+ MYSQL=mysqld
+ elif is_suse; then
+ MYSQL=mysql
+ else
+ return
+ fi
+ stop_service $MYSQL
+}
+
function recreate_database_mysql {
local db=$1
local charset=$2
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 7d4a6c5..efc206f 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -10,6 +10,20 @@
register_database postgresql
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database_postgresql {
+ stop_service postgresql
+ if is_ubuntu; then
+ # Get ruthless with mysql
+ sudo aptitude purge -y ~npostgresql
+ return
+ elif is_fedora; then
+ uninstall_package postgresql-server
+ else
+ return
+ fi
+}
+
function recreate_database_postgresql {
local db=$1
local charset=$2
diff --git a/lib/glance b/lib/glance
index 9ec2112..edf6982 100644
--- a/lib/glance
+++ b/lib/glance
@@ -59,8 +59,7 @@
function cleanup_glance() {
# kill instances (nova)
# delete image files (glance)
- # This function intentionally left blank
- :
+ sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
}
# configure_glanceclient() - Set config files, create data dirs, etc
diff --git a/lib/keystone b/lib/keystone
index 26c0277..805cb6f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -59,6 +59,9 @@
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+# Set the tenant for service accounts in Keystone
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
+
# Entry Points
# ------------
@@ -148,7 +151,7 @@
cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
# Add swift endpoints to service catalog if swift is enabled
- if is_service_enabled swift; then
+ if is_service_enabled s-proxy; then
echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
diff --git a/lib/ldap b/lib/ldap
index 0a0d197..9d415c5 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -37,6 +37,12 @@
#update ldap olcdb
sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE
+ # On fedora we need to manually add cosine and inetorgperson schemas
+ if is_fedora; then
+ sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
+ sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
+ fi
+
# add our top level ldap nodes
if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then
printf "LDAP already configured for OpenStack\n"
diff --git a/lib/nova b/lib/nova
index bdf5d22..9632a8c 100644
--- a/lib/nova
+++ b/lib/nova
@@ -65,6 +65,62 @@
QEMU_CONF=/etc/libvirt/qemu.conf
+NOVNC_DIR=$DEST/noVNC
+SPICE_DIR=$DEST/spice-html5
+
+
+# Nova Network Configuration
+# --------------------------
+
+# Set defaults according to the virt driver
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ PUBLIC_INTERFACE_DEFAULT=eth3
+ GUEST_INTERFACE_DEFAULT=eth1
+ # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
+ PUBLIC_INTERFACE_DEFAULT=eth0
+ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+ STUB_NETWORK=${STUB_NETWORK:-False}
+else
+ PUBLIC_INTERFACE_DEFAULT=br100
+ GUEST_INTERFACE_DEFAULT=eth0
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+fi
+
+NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
+VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
+EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
+
+# If you are using the FlatDHCP network mode on multiple hosts, set the
+# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
+# have an IP or you risk breaking things.
+#
+# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
+# hiccup while the network is moved from the flat interface to the flat network
+# bridge. This will happen when you launch your first instance. Upon launch
+# you will lose all connectivity to the node, and the VM launch will probably
+# fail.
+#
+# If you are running on a single node and don't need to access the VMs from
+# devices other than that node, you can set ``FLAT_INTERFACE=``
+# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
+FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+
+# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
+# allows network operations and routing for a VM to occur on the server that is
+# running the VM - removing a SPOF and bandwidth bottleneck.
+MULTI_HOST=`trueorfalse False $MULTI_HOST`
+
+# Test floating pool and range are used for testing. They are defined
+# here until the admin APIs can replace nova-manage
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
+
# Entry Points
# ------------
@@ -106,6 +162,8 @@
# Clean out the instances directory.
sudo rm -rf $NOVA_INSTANCES_PATH/*
fi
+
+ sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
}
# configure_novaclient() - Set config files, create data dirs, etc
@@ -308,9 +366,6 @@
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
fi
fi
-
- # Clean up old instances
- cleanup_nova
fi
}
@@ -371,7 +426,7 @@
iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER"
iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
- iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE"
+ iniset $NOVA_CONF DEFAULT fixed_range ""
iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
@@ -440,6 +495,49 @@
# Replace the first '=' with ' ' for iniset syntax
iniset $NOVA_CONF DEFAULT ${I/=/ }
done
+
+ # All nova-compute workers need to know the vnc configuration options
+ # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+ if is_service_enabled n-cpu; then
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
+ XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+ iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
+ SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
+ iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
+ fi
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
+ else
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+ fi
+
+ if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
+ # Address on which instance vncservers will listen on compute hosts.
+ # For multi-host, this should be the management ip of the compute host.
+ VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
+ iniset $NOVA_CONF DEFAULT vnc_enabled true
+ iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
+ iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+ else
+ iniset $NOVA_CONF DEFAULT vnc_enabled false
+ fi
+
+ if is_service_enabled n-spice; then
+ # Address on which instance spiceservers will listen on compute hosts.
+ # For multi-host, this should be the management ip of the compute host.
+ SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+ SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
+ iniset $NOVA_CONF spice enabled true
+ iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
+ iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
+ else
+ iniset $NOVA_CONF spice enabled false
+ fi
+
+ iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
+ iniset_rpc_backend nova $NOVA_CONF DEFAULT
+ iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
}
# create_nova_cache_dir() - Part of the init_nova() process
@@ -451,7 +549,7 @@
}
function create_nova_conf_nova_network() {
- iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN"
+ iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
diff --git a/lib/quantum b/lib/quantum
index 66360d4..efdd43d 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -53,7 +53,7 @@
# that must be set in localrc for connectivity across hosts with
# Quantum.
#
-# With Quantum networking the NET_MAN variable is ignored.
+# With Quantum networking the NETWORK_MANAGER variable is ignored.
# Save trace setting
@@ -181,6 +181,13 @@
# Hardcoding for 1 service plugin for now
source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+# Use security group or not
+if has_quantum_plugin_security_group; then
+ Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
+else
+ Q_USE_SECGROUP=False
+fi
+
# Entry Points
# ------------
@@ -211,8 +218,6 @@
fi
_configure_quantum_debug_command
-
- _cleanup_quantum
}
function create_nova_conf_quantum() {
@@ -224,6 +229,11 @@
iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME"
iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT"
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+ iniset $NOVA_CONF DEFAULT security_group_api quantum
+ fi
+
# set NOVA_VIF_DRIVER and optionally set options in nova_conf
quantum_plugin_create_nova_conf
@@ -367,13 +377,13 @@
# Start running processes, including screen
function start_quantum_agents() {
# Start up the quantum agents if enabled
- screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
- screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
- screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+ screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+ screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+ screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
if is_service_enabled q-lbaas; then
- screen_it q-lbaas "python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
}
@@ -385,9 +395,9 @@
fi
}
-# _cleanup_quantum() - Remove residual data files, anything left over from previous
+# cleanup_quantum() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
-function _cleanup_quantum() {
+function cleanup_quantum() {
:
}
@@ -417,6 +427,7 @@
cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME`
+ iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
_quantum_setup_rootwrap
}
@@ -536,7 +547,6 @@
iniset $QUANTUM_CONF DEFAULT verbose True
iniset $QUANTUM_CONF DEFAULT debug True
- iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE
iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
@@ -571,7 +581,12 @@
sudo chown -R root:root $Q_CONF_ROOTWRAP_D
sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
# Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d
- sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+ # location moved in newer versions, prefer new location
+ if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then
+ sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE
+ else
+ sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+ fi
sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
sudo chown root:root $Q_RR_CONF_FILE
sudo chmod 0644 $Q_RR_CONF_FILE
@@ -643,9 +658,9 @@
function setup_quantum_debug() {
if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id
private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id
fi
}
diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md
index 5411de0..05bfb85 100644
--- a/lib/quantum_plugins/README.md
+++ b/lib/quantum_plugins/README.md
@@ -32,3 +32,5 @@
* ``quantum_plugin_configure_plugin_agent``
* ``quantum_plugin_configure_service``
* ``quantum_plugin_setup_interface_driver``
+* ``has_quantum_plugin_security_group``:
+ return 0 if the plugin support quantum security group otherwise return 1
diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight
index 7d3fd96..4857f49 100644
--- a/lib/quantum_plugins/bigswitch_floodlight
+++ b/lib/quantum_plugins/bigswitch_floodlight
@@ -51,5 +51,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 1 means False here
+ return 1
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade
index ac91143..6e26ad7 100644
--- a/lib/quantum_plugins/brocade
+++ b/lib/quantum_plugins/brocade
@@ -45,5 +45,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$BRCD_XTRACE
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 11bc585..324e255 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -48,6 +48,11 @@
if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
fi
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
}
@@ -76,5 +81,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index 8c150b1..6eefb02 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -141,5 +141,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index dda1239..ab16483 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -8,7 +8,7 @@
source $TOP_DIR/lib/quantum_plugins/ovs_base
function quantum_plugin_create_nova_conf() {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ _quantum_ovs_base_configure_nova_vif_driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
@@ -43,6 +43,7 @@
# Setup integration bridge
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
_quantum_ovs_base_setup_bridge $OVS_BRIDGE
+ _quantum_ovs_base_configure_firewall_driver
# Setup agent for tunneling
if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
@@ -139,5 +140,9 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index ab988d9..2ada0db 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -39,6 +39,14 @@
iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
}
+function _quantum_ovs_base_configure_firewall_driver() {
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
+}
+
function _quantum_ovs_base_configure_l3_agent() {
iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
@@ -48,5 +56,15 @@
sudo ip addr flush dev $PUBLIC_BRIDGE
}
+function _quantum_ovs_base_configure_nova_vif_driver() {
+ # The hybrid VIF driver needs to be specified when Quantum Security Group
+ # is enabled (until vif_security attributes are supported in VIF extension)
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ else
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ fi
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index d1d7382..1139232 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -9,7 +9,7 @@
source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value
function quantum_plugin_create_nova_conf() {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ _quantum_ovs_base_configure_nova_vif_driver
iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE"
}
@@ -52,6 +52,8 @@
fi
iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE
AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_configure_service() {
@@ -64,5 +66,10 @@
iniset $conf_file DEFAULT ovs_use_veth True
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 02614ea..bbd51f0 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -43,6 +43,38 @@
fi
}
+# clean up after rpc backend - eradicate all traces so changing backends
+# produces a clean switch
+function cleanup_rpc_backend {
+ if is_service_enabled rabbit; then
+ # Obliterate rabbitmq-server
+ uninstall_package rabbitmq-server
+ sudo killall epmd
+ if is_ubuntu; then
+ # And the Erlang runtime too
+ sudo aptitude purge -y ~nerlang
+ fi
+ elif is_service_enabled qpid; then
+ if is_fedora; then
+ uninstall_package qpid-cpp-server-daemon
+ elif is_ubuntu; then
+ uninstall_package qpidd
+ else
+ exit_distro_not_supported "qpid installation"
+ fi
+ elif is_service_enabled zeromq; then
+ if is_fedora; then
+ uninstall_package zeromq python-zmq
+ elif is_ubuntu; then
+ uninstall_package libzmq1 python-zmq
+ elif is_suse; then
+ uninstall_package libzmq1 python-pyzmq
+ else
+ exit_distro_not_supported "zeromq installation"
+ fi
+ fi
+}
+
# install rpc backend
function install_rpc_backend() {
if is_service_enabled rabbit; then
diff --git a/lib/swift b/lib/swift
index 2f772fb..d50b554 100644
--- a/lib/swift
+++ b/lib/swift
@@ -28,6 +28,7 @@
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
+SWIFT3_DIR=$DEST/swift3
# TODO: add logging to different location.
@@ -35,9 +36,16 @@
# Default is the common DevStack data directory.
SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
-# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
+# Set ``SWIFT_CONF_DIR`` to the location of the configuration files.
# Default is ``/etc/swift``.
-SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
+# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
+SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
+
+if is_service_enabled s-proxy && is_service_enabled swift3; then
+ # If we are using swift3, we can default the s3 port to swift instead
+ # of nova-objectstore
+ S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+fi
# DevStack will create a loop-back disk formatted as XFS to store the
# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
@@ -45,6 +53,10 @@
# Default is 1 gigabyte.
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
+# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
+# Default is ``staticweb, tempurl, bulk, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb bulk}
+
# The ring uses a configurable number of bits from a path’s MD5 hash as
# a partition index that designates a device. The number of bits kept
# from the hash is known as the partition power, and 2 to the partition
@@ -56,17 +68,18 @@
SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
-# configured for your Swift cluster. By default the three replicas would need a
-# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
-# only some quick testing.
-SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
+# configured for your Swift cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test swift replication you may want to set this
+# up to 3.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
# Port bases used in port number calclution for the service "nodes"
# The specified port number will be used, the additinal ports calculated by
# base_port + node_num * 10
-OBJECT_PORT_BASE=6010
+OBJECT_PORT_BASE=6013
CONTAINER_PORT_BASE=6011
ACCOUNT_PORT_BASE=6012
@@ -76,18 +89,19 @@
# cleanup_swift() - Remove residual data files
function cleanup_swift() {
- rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
+ rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
fi
if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
rm ${SWIFT_DATA_DIR}/drives/images/swift.img
fi
+ rm -rf ${SWIFT_DATA_DIR}/run/
}
# configure_swift() - Set config files, create data dirs and loop image
function configure_swift() {
- local swift_auth_server
+ local swift_pipeline=" "
local node_number
local swift_node_config
local swift_log_dir
@@ -143,13 +157,13 @@
sudo chown -R $USER: ${node}
done
- sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server
- sudo chown -R $USER: ${SWIFT_CONFIG_DIR}
+ sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
+ sudo chown -R $USER: ${SWIFT_CONF_DIR}
- if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
+ if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
# Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
# Create a symlink if the config dir is moved
- sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
+ sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift
fi
# Swift use rsync to synchronize between all the different
@@ -180,14 +194,14 @@
swift_auth_server=tempauth
fi
- SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf
+ SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf
cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
- iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR}
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
@@ -198,10 +212,21 @@
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
- # Only enable Swift3 if we have it enabled in ENABLED_SERVICES
- is_service_enabled swift3 && swift3=swift3 || swift3=""
-
- iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server"
+ # By default Swift will be installed with the tempauth middleware
+ # which has some default username and password if you have
+ # configured keystone it will configure swift with it.
+ if is_service_enabled key;then
+ if is_service_enabled swift3;then
+ swift_pipeline=" s3token swift3 "
+ fi
+ swift_pipeline+=" authtoken keystoneauth "
+ else
+ if is_service_enabled swift3;then
+ swift_pipeline=" swift3 "
+ fi
+ swift_pipeline+=" tempauth "
+ fi
+ sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
@@ -237,8 +262,8 @@
EOF
fi
- cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
- iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
+ cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
+ iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
# This function generates an object/account/proxy configuration
# emulating 4 nodes on different ports
@@ -257,7 +282,7 @@
iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
iniuncomment ${swift_node_config} DEFAULT swift_dir
- iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+ iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
iniuncomment ${swift_node_config} DEFAULT devices
iniset ${swift_node_config} DEFAULT devices ${node_path}
@@ -273,7 +298,7 @@
}
for node_number in ${SWIFT_REPLICAS_SEQ}; do
- swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf
+ swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)]
iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache
@@ -281,14 +306,14 @@
# modification and make sure it works for new sections.
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
- swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf
+ swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)]
iniuncomment ${swift_node_config} app:container-server allow_versions
iniset ${swift_node_config} app:container-server allow_versions "true"
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
- swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf
+ swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
@@ -315,7 +340,7 @@
# This is where we create three different rings for swift with
# different object servers binding on different ports.
- pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
+ pushd ${SWIFT_CONF_DIR} >/dev/null && {
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
@@ -362,19 +387,36 @@
sudo systemctl start xinetd.service
fi
- # First spawn all the swift services then kill the
- # proxy service so we can run it in foreground in screen.
- # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
- # ignore it just in case
+ # By default with only one replica we are launching the proxy,
+ # container, account and object server in screen in foreground and
+ # other services in background. If we have SWIFT_REPLICAS set to something
+ # greater than one we first spawn all the swift services then kill the proxy
+ # service so we can run it in foreground in screen. ``swift-init ...
+ # {stop|restart}`` exits with '1' if no servers are running, ignore it just
+ # in case
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
- swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
- screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
+ if [[ ${SWIFT_REPLICAS} == 1 ]]; then
+ todo="object container account"
+ fi
+ for type in proxy ${todo}; do
+ swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
+ done
+ screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
+ if [[ ${SWIFT_REPLICAS} == 1 ]]; then
+ for type in object container account;do
+ screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
+ done
+ fi
}
# stop_swift() - Stop running processes (non-screen)
function stop_swift() {
# screen normally killed by unstack.sh
- swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+ if type -p swift-init >/dev/null; then
+ swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+ fi
+ # Dump the proxy server
+ sudo pkill -f swift-proxy-server
}
# Restore xtrace
diff --git a/stack.sh b/stack.sh
index c39d855..cfce6be 100755
--- a/stack.sh
+++ b/stack.sh
@@ -223,27 +223,9 @@
FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-# Find the interface used for the default route
-HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
-# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
-if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then
- HOST_IP=""
- HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'`
- for IP in $HOST_IPS; do
- # Attempt to filter out IP addresses that are part of the fixed and
- # floating range. Note that this method only works if the ``netaddr``
- # python library is installed. If it is not installed, an error
- # will be printed and the first IP from the interface will be used.
- # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
- # address.
- if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then
- HOST_IP=$IP
- break;
- fi
- done
- if [ "$HOST_IP" == "" ]; then
- die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
- fi
+HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
+if [ "$HOST_IP" == "" ]; then
+ die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
fi
# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
@@ -287,19 +269,12 @@
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-NOVNC_DIR=$DEST/noVNC
-SPICE_DIR=$DEST/spice-html5
-SWIFT3_DIR=$DEST/swift3
-# Should cinder perform secure deletion of volumes?
-# Defaults to true, can be set to False to avoid this bug when testing:
-# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
-CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
-# Name of the LVM volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+# Interactive Configuration
+# -------------------------
+
+# Do all interactive config up front before the logging spew begins
# Generic helper to configure passwords
function read_password {
@@ -344,66 +319,7 @@
}
-# Nova Network Configuration
-# --------------------------
-
-# FIXME: more documentation about why these are important options. Also
-# we should make sure we use the same variable names as the option names.
-
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- PUBLIC_INTERFACE_DEFAULT=eth3
- # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
- FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
- GUEST_INTERFACE_DEFAULT=eth1
-elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
- PUBLIC_INTERFACE_DEFAULT=eth0
- FLAT_NETWORK_BRIDGE_DEFAULT=br100
- FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
- FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
- NET_MAN=${NET_MAN:-FlatManager}
- STUB_NETWORK=${STUB_NETWORK:-False}
-else
- PUBLIC_INTERFACE_DEFAULT=br100
- FLAT_NETWORK_BRIDGE_DEFAULT=br100
- GUEST_INTERFACE_DEFAULT=eth0
-fi
-
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-NET_MAN=${NET_MAN:-FlatDHCPManager}
-EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
-
-# Test floating pool and range are used for testing. They are defined
-# here until the admin APIs can replace nova-manage
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
-# allows network operations and routing for a VM to occur on the server that is
-# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=`trueorfalse False $MULTI_HOST`
-
-# If you are using the FlatDHCP network mode on multiple hosts, set the
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
-# have an IP or you risk breaking things.
-#
-# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
-# hiccup while the network is moved from the flat interface to the flat network
-# bridge. This will happen when you launch your first instance. Upon launch
-# you will lose all connectivity to the node, and the VM launch will probably
-# fail.
-#
-# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set ``FLAT_INTERFACE=``
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
-
-## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
-
# Database Configuration
-# ----------------------
# To select between database backends, add the following to ``localrc``:
#
@@ -416,8 +332,7 @@
initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
-# RabbitMQ or Qpid
-# --------------------------
+# Queue Configuration
# Rabbit connection info
if is_service_enabled rabbit; then
@@ -425,53 +340,45 @@
read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
fi
-if is_service_enabled swift; then
- # If we are using swift3, we can default the s3 port to swift instead
- # of nova-objectstore
- if is_service_enabled swift3;then
- S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+
+# Keystone
+
+if is_service_enabled key; then
+ # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
+ # just a string and is not a 'real' Keystone token.
+ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+ # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
+ read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
+ # Horizon currently truncates usernames and passwords at 20 characters
+ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
+
+ # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
+ # service in ``localrc`` (e.g. ``enable_service ldap``).
+ # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
+ # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the
+ # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
+ # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
+ # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
+
+ # only request ldap password if the service is enabled
+ if is_service_enabled ldap; then
+ read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
fi
+fi
+
+
+# Swift
+
+if is_service_enabled s-proxy; then
# We only ask for Swift Hash if we have enabled swift service.
# ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
-# Set default port for nova-objectstore
-S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
-
-# Keystone
-# --------
-
-# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
-# just a string and is not a 'real' Keystone token.
-read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
-# Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
-read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
-# Horizon currently truncates usernames and passwords at 20 characters
-read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
-# Keystone can now optionally install OpenLDAP by adding ldap to the list
-# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap).
-# If OpenLDAP has already been installed but you need to clear out
-# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes
-# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the
-# Keystone Identity Driver (keystone.identity.backends.ldap.Identity)
-# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap)
-# in the localrc file.
-
-
-# only request ldap password if the service is enabled
-if is_service_enabled ldap; then
- read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
-fi
-
-# Set the tenant for service accounts in Keystone
-SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
-
-# Log files
-# ---------
+# Configure logging
+# -----------------
# Draw a spinner so the user knows something is happening
function spinner() {
@@ -662,12 +569,12 @@
git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
# glance, swift middleware and nova api needs keystone middleware
-if is_service_enabled key g-api n-api swift; then
+if is_service_enabled key g-api n-api s-proxy; then
# unified auth system (manages accounts/tokens)
install_keystone
fi
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
install_swiftclient
install_swift
if is_service_enabled swift3; then
@@ -719,21 +626,23 @@
echo_summary "Configuring OpenStack projects"
-# Set up our checkouts so they are installed into python path
-# allowing ``import nova`` or ``import glance.client``
+# Set up our checkouts so they are installed in the python path
configure_keystoneclient
configure_novaclient
setup_develop $OPENSTACKCLIENT_DIR
-if is_service_enabled key g-api n-api swift; then
+
+if is_service_enabled key g-api n-api s-proxy; then
configure_keystone
fi
-if is_service_enabled swift; then
+
+if is_service_enabled s-proxy; then
configure_swift
configure_swiftclient
if is_service_enabled swift3; then
setup_develop $SWIFT3_DIR
fi
fi
+
if is_service_enabled g-api n-api; then
configure_glance
fi
@@ -743,19 +652,25 @@
configure_glanceclient
if is_service_enabled nova; then
+ # First clean up old instances
+ cleanup_nova
configure_nova
fi
+
if is_service_enabled horizon; then
configure_horizon
fi
+
if is_service_enabled quantum; then
setup_quantumclient
setup_quantum
fi
+
if is_service_enabled heat; then
configure_heat
configure_heatclient
fi
+
if is_service_enabled cinder; then
configure_cinder
fi
@@ -777,6 +692,7 @@
# don't be naive and add to existing line!
fi
+
# Syslog
# ------
@@ -816,8 +732,17 @@
# Configure screen
# ----------------
-if [ -z "$SCREEN_HARDSTATUS" ]; then
- SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+if [[ "$USE_SCREEN" == "True" ]]; then
+ # Create a new named screen to run processes in
+ screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
+ sleep 1
+
+ # Set a reasonable status bar
+ if [ -z "$SCREEN_HARDSTATUS" ]; then
+ SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+ fi
+ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
fi
# Clear screen rc file
@@ -826,12 +751,6 @@
echo -n > $SCREENRC
fi
-# Create a new named screen to run processes in
-screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
-sleep 1
-
-# Set a reasonable status bar
-screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
# Initialize the directory for service status check
init_service_check
@@ -911,7 +830,7 @@
init_glance
# Store the images in swift if enabled.
- if is_service_enabled swift; then
+ if is_service_enabled s-proxy; then
iniset $GLANCE_API_CONF DEFAULT default_store swift
iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
@@ -970,7 +889,7 @@
# Storage Service
# ---------------
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
echo_summary "Configuring Swift"
init_swift
fi
@@ -996,48 +915,6 @@
elif is_service_enabled n-net; then
create_nova_conf_nova_network
fi
- # All nova-compute workers need to know the vnc configuration options
- # These settings don't hurt anything if n-xvnc and n-novnc are disabled
- if is_service_enabled n-cpu; then
- NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
- iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
- XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
- iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
- SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
- iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
- fi
- if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
- else
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
- fi
-
- if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
- # Address on which instance vncservers will listen on compute hosts.
- # For multi-host, this should be the management ip of the compute host.
- VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
- iniset $NOVA_CONF DEFAULT vnc_enabled true
- iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
- iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
- else
- iniset $NOVA_CONF DEFAULT vnc_enabled false
- fi
-
- if is_service_enabled n-spice; then
- # Address on which instance spiceservers will listen on compute hosts.
- # For multi-host, this should be the management ip of the compute host.
- SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
- SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
- iniset $NOVA_CONF spice enabled true
- iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
- iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
- else
- iniset $NOVA_CONF spice enabled false
- fi
-
- iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
- iniset_rpc_backend nova $NOVA_CONF DEFAULT
- iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
# XenServer
@@ -1110,13 +987,14 @@
fi
fi
+
# Launch Services
# ===============
# Only run the services specified in ``ENABLED_SERVICES``
# Launch Swift Services
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
echo_summary "Starting Swift"
start_swift
fi
@@ -1199,6 +1077,7 @@
start_heat
fi
+
# Create account rc files
# =======================
@@ -1309,6 +1188,7 @@
# Check the status of running services
service_check
+
# Fin
# ===
diff --git a/stackrc b/stackrc
index d418a0e..34ccfa2 100644
--- a/stackrc
+++ b/stackrc
@@ -30,8 +30,8 @@
# stuffing text into the screen windows so that a developer can use
# ctrl-c, up-arrow, enter to restart the service. Starting services
# this way is slightly unreliable, and a bit slower, so this can
-# be disabled for automated testing by setting this value to false.
-SCREEN_DEV=True
+# be disabled for automated testing by setting this value to False.
+USE_SCREEN=True
# Repositories
# ------------
@@ -196,5 +196,17 @@
# 5Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+
+# Set default port for nova-objectstore
+S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
+
+# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
+
+# Compatibility until it's eradicated from CI
+USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh
index f1242ee..52b9b4e 100755
--- a/tools/build_bm_multi.sh
+++ b/tools/build_bm_multi.sh
@@ -6,7 +6,7 @@
SHELL_AFTER_RUN=no
# Variables common amongst all hosts in the cluster
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
+COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
# Helper to launch containers
function run_bm {
diff --git a/unstack.sh b/unstack.sh
index a086d5c..3ac2985 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,7 +63,7 @@
fi
# Swift runs daemons
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
stop_swift
cleanup_swift
fi