Merge "Install gettext from apt on Trusty (Ubuntu 14.04)"
diff --git a/exercises/trove.sh b/exercises/trove.sh
index d48d5fe..053f872 100755
--- a/exercises/trove.sh
+++ b/exercises/trove.sh
@@ -35,8 +35,12 @@
is_service_enabled trove || exit 55
-# can we get a list versions
-curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!"
+# can try to get datastore id
+DSTORE_ID=$(trove datastore-list | tail -n +4 |head -3 | get_field 1)
+die_if_not_set $LINENO DSTORE_ID "Trove API not functioning!"
+
+DV_ID=$(trove datastore-version-list $DSTORE_ID | tail -n +4 | get_field 1)
+die_if_not_set $LINENO DV_ID "Trove API not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
new file mode 100644
index 0000000..5fb34ea
--- /dev/null
+++ b/extras.d/60-ceph.sh
@@ -0,0 +1,44 @@
+# ceph.sh - DevStack extras script to install Ceph
+
+if is_service_enabled ceph; then
+ if [[ "$1" == "source" ]]; then
+ # Initial source
+ source $TOP_DIR/lib/ceph
+ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+ echo_summary "Installing Ceph"
+ install_ceph
+ echo_summary "Configuring Ceph"
+ configure_ceph
+ # NOTE (leseb): Do everything here because we need to have Ceph started before the main
+ # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
+ echo_summary "Initializing Ceph"
+ init_ceph
+ start_ceph
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ if is_service_enabled glance; then
+ echo_summary "Configuring Glance for Ceph"
+ configure_ceph_glance
+ fi
+ if is_service_enabled nova; then
+ echo_summary "Configuring Nova for Ceph"
+ configure_ceph_nova
+ fi
+ if is_service_enabled cinder; then
+ echo_summary "Configuring Cinder for Ceph"
+ configure_ceph_cinder
+ # NOTE (leseb): the part below is a requirement from Cinder in order to attach volumes
+ # so we should run the following within the if statement.
+ echo_summary "Configuring libvirt secret"
+ import_libvirt_secret_ceph
+ fi
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ stop_ceph
+ cleanup_ceph
+ fi
+
+ if [[ "$1" == "clean" ]]; then
+ cleanup_ceph
+ fi
+fi
diff --git a/files/apts/ceph b/files/apts/ceph
new file mode 100644
index 0000000..69863ab
--- /dev/null
+++ b/files/apts/ceph
@@ -0,0 +1,2 @@
+ceph # NOPRIME
+xfsprogs
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
new file mode 100644
index 0000000..8d46500
--- /dev/null
+++ b/files/rpms-suse/ceph
@@ -0,0 +1,3 @@
+ceph # NOPRIME
+xfsprogs
+lsb
diff --git a/files/rpms/ceph b/files/rpms/ceph
new file mode 100644
index 0000000..5483735
--- /dev/null
+++ b/files/rpms/ceph
@@ -0,0 +1,3 @@
+ceph # NOPRIME
+xfsprogs
+redhat-lsb-core
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 9fafecb..15ed973 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -1,4 +1,5 @@
MySQL-python
+dnsmasq # for q-dhcp
dnsmasq-utils # for dhcp_release
ebtables
iptables
diff --git a/files/rpms/nova b/files/rpms/nova
index fa472a8..6097991 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,5 +1,6 @@
MySQL-python
curl
+dnsmasq # for nova-network
dnsmasq-utils # for dhcp_release
conntrack-tools
ebtables
diff --git a/functions b/functions
index ca8ef80..cd9e078 100644
--- a/functions
+++ b/functions
@@ -546,6 +546,40 @@
}
fi
+
+# create_disk - Create backing disk
+function create_disk {
+ local node_number
+ local disk_image=${1}
+ local storage_data_dir=${2}
+ local loopback_disk_size=${3}
+
+ # Create a loopback disk and format it to XFS.
+ if [[ -e ${disk_image} ]]; then
+ if egrep -q ${storage_data_dir} /proc/mounts; then
+ sudo umount ${storage_data_dir}/drives/sdb1
+ sudo rm -f ${disk_image}
+ fi
+ fi
+
+ sudo mkdir -p ${storage_data_dir}/drives/images
+
+ sudo truncate -s ${loopback_disk_size} ${disk_image}
+
+ # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
+ # a single inode. Keeping the default inode size (256) will result in multiple
+ # inodes being used to store xattr. Retrieving the xattr will be slower
+ # since we have to read multiple inodes. This statement is true for both
+ # Swift and Ceph.
+ sudo mkfs.xfs -f -i size=1024 ${disk_image}
+
+ # Mount the disk with mount options to make it as efficient as possible
+ if ! egrep -q ${storage_data_dir} /proc/mounts; then
+ sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
+ ${disk_image} ${storage_data_dir}
+ fi
+}
+
# Restore xtrace
$XTRACE
diff --git a/functions-common b/functions-common
index 4b660de..2df5e1d 100644
--- a/functions-common
+++ b/functions-common
@@ -500,6 +500,7 @@
# ``get_release_name_from_branch branch-name``
function get_release_name_from_branch {
local branch=$1
+
if [[ $branch =~ "stable/" ]]; then
echo ${branch#*/}
else
@@ -510,72 +511,73 @@
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
-# Set global RECLONE=yes to simulate a clone when dest-dir exists
-# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists
+# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo
# does not exist (default is False, meaning the repo will be cloned).
-# Uses global ``OFFLINE``
+# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE``
# git_clone remote dest-dir branch
function git_clone {
- GIT_REMOTE=$1
- GIT_DEST=$2
- GIT_REF=$3
+ local git_remote=$1
+ local git_dest=$2
+ local git_ref=$3
+ local orig_dir=$(pwd)
+
RECLONE=$(trueorfalse False $RECLONE)
- local orig_dir=`pwd`
if [[ "$OFFLINE" = "True" ]]; then
echo "Running in offline mode, clones already exist"
# print out the results so we know what change was used in the logs
- cd $GIT_DEST
+ cd $git_dest
git show --oneline | head -1
cd $orig_dir
return
fi
- if echo $GIT_REF | egrep -q "^refs"; then
+ if echo $git_ref | egrep -q "^refs"; then
# If our branch name is a gerrit style refs/changes/...
- if [[ ! -d $GIT_DEST ]]; then
+ if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git_timed clone $GIT_REMOTE $GIT_DEST
+ git_timed clone $git_remote $git_dest
fi
- cd $GIT_DEST
- git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+ cd $git_dest
+ git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
- if [[ ! -d $GIT_DEST ]]; then
+ if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
- git_timed clone $GIT_REMOTE $GIT_DEST
- cd $GIT_DEST
+ git_timed clone $git_remote $git_dest
+ cd $git_dest
# This checkout syntax works for both branches and tags
- git checkout $GIT_REF
+ git checkout $git_ref
elif [[ "$RECLONE" = "True" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
- cd $GIT_DEST
+ cd $git_dest
# set the url to pull from and fetch
- git remote set-url origin $GIT_REMOTE
+ git remote set-url origin $git_remote
git_timed fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
- find $GIT_DEST -name '*.pyc' -delete
+ find $git_dest -name '*.pyc' -delete
- # handle GIT_REF accordingly to type (tag, branch)
- if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
- git_update_tag $GIT_REF
- elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
- git_update_branch $GIT_REF
- elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
- git_update_remote_branch $GIT_REF
+ # handle git_ref accordingly to type (tag, branch)
+ if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
+ git_update_tag $git_ref
+ elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then
+ git_update_branch $git_ref
+ elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then
+ git_update_remote_branch $git_ref
else
- die $LINENO "$GIT_REF is neither branch nor tag"
+ die $LINENO "$git_ref is neither branch nor tag"
fi
fi
fi
# print out the results so we know what change was used in the logs
- cd $GIT_DEST
+ cd $git_dest
git show --oneline | head -1
cd $orig_dir
}
@@ -614,35 +616,32 @@
# git update using reference as a branch.
# git_update_branch ref
function git_update_branch {
+ local git_branch=$1
- GIT_BRANCH=$1
-
- git checkout -f origin/$GIT_BRANCH
+ git checkout -f origin/$git_branch
# a local branch might not exist
- git branch -D $GIT_BRANCH || true
- git checkout -b $GIT_BRANCH
+ git branch -D $git_branch || true
+ git checkout -b $git_branch
}
# git update using reference as a branch.
# git_update_remote_branch ref
function git_update_remote_branch {
+ local git_branch=$1
- GIT_BRANCH=$1
-
- git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+ git checkout -b $git_branch -t origin/$git_branch
}
# git update using reference as a tag. Be careful editing source at that repo
# as working copy will be in a detached mode
# git_update_tag ref
function git_update_tag {
+ local git_tag=$1
- GIT_TAG=$1
-
- git tag -d $GIT_TAG
+ git tag -d $git_tag
# fetching given tag only
- git_timed fetch origin tag $GIT_TAG
- git checkout -f $GIT_TAG
+ git_timed fetch origin tag $git_tag
+ git checkout -f $git_tag
}
@@ -993,7 +992,7 @@
# Distro-agnostic package installer
# install_package package [package ...]
function update_package_repo {
- if [[ "NO_UPDATE_REPOS" = "True" ]]; then
+ if [[ "$NO_UPDATE_REPOS" = "True" ]]; then
return 0
fi
diff --git a/lib/ceph b/lib/ceph
new file mode 100644
index 0000000..32a4760
--- /dev/null
+++ b/lib/ceph
@@ -0,0 +1,286 @@
+# lib/ceph
+# Functions to control the configuration and operation of the **Ceph** storage service
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
+
+# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
+#
+# - install_ceph
+# - configure_ceph
+# - init_ceph
+# - start_ceph
+# - stop_ceph
+# - cleanup_ceph
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
+# Default is the common DevStack data directory.
+CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
+CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
+
+# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
+# Default is ``/etc/ceph``.
+CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
+
+# DevStack will create a loop-back disk formatted as XFS to store the
+# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
+# kilobytes.
+# Default is 1 gigabyte.
+CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G
+CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
+
+# Common
+CEPH_FSID=$(uuidgen)
+CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
+
+# Glance
+GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
+GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
+GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
+GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
+
+# Nova
+NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
+NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
+NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
+
+# Cinder
+CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
+CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
+CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
+CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
+CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
+
+# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
+# configured for your Ceph cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test Ceph replication feel free to increase this value
+CEPH_REPLICAS=${CEPH_REPLICAS:-1}
+CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
+
+# Functions
+# ------------
+
+# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
+# so it can connect to the Ceph cluster while attaching a Cinder block device
+function import_libvirt_secret_ceph {
+ cat > secret.xml <<EOF
+<secret ephemeral='no' private='no'>
+ <uuid>${CINDER_CEPH_UUID}</uuid>
+ <usage type='ceph'>
+ <name>client.${CINDER_CEPH_USER} secret</name>
+ </usage>
+</secret>
+EOF
+ sudo virsh secret-define --file secret.xml
+ sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
+ sudo rm -f secret.xml
+}
+
+# cleanup_ceph() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ceph {
+ sudo pkill -f ceph-mon
+ sudo pkill -f ceph-osd
+ sudo rm -rf ${CEPH_DATA_DIR}/*/*
+ sudo rm -rf ${CEPH_CONF_DIR}/*
+ if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
+ sudo umount ${CEPH_DATA_DIR}
+ fi
+ if [[ -e ${CEPH_DISK_IMAGE} ]]; then
+ sudo rm -f ${CEPH_DISK_IMAGE}
+ fi
+ uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
+ VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+ sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1
+}
+
+# configure_ceph() - Set config files, create data dirs, etc
+function configure_ceph {
+ local count=0
+
+ # create a backing file disk
+ create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
+
+ # populate ceph directory
+ sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
+
+ # create ceph monitor initial key and directory
+ sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *'
+ sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
+
+ # create a default ceph configuration file
+ sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
+[global]
+fsid = ${CEPH_FSID}
+mon_initial_members = $(hostname)
+mon_host = ${SERVICE_HOST}
+auth_cluster_required = cephx
+auth_service_required = cephx
+auth_client_required = cephx
+filestore_xattr_use_omap = true
+osd crush chooseleaf type = 0
+osd journal size = 100
+EOF
+
+ # bootstrap the ceph monitor
+ sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
+ if is_ubuntu; then
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
+ sudo initctl emit ceph-mon id=$(hostname)
+ else
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
+ sudo service ceph start mon.$(hostname)
+ fi
+
+ # wait for the admin key to come up otherwise we will not be able to do the actions below
+ until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
+ echo_summary "Waiting for the Ceph admin key to be ready..."
+
+ count=$(($count + 1))
+ if [ $count -eq 3 ]; then
+ die $LINENO "Maximum of 3 retries reached"
+ fi
+ sleep 5
+ done
+
+ # change pool replica size according to the CEPH_REPLICAS set by the user
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
+
+ # create a simple rule to take OSDs instead of host with CRUSH
+ # then apply this rules to the default pool
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
+ RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
+ fi
+
+ # create the OSD(s)
+ for rep in ${CEPH_REPLICAS_SEQ}; do
+ OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
+ sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
+ sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
+
+ # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
+ # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
+ # from the init script.
+ if is_ubuntu; then
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
+ else
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
+ fi
+ done
+}
+
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
+function configure_ceph_glance {
+ # configure Glance service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+ iniset $GLANCE_API_CONF DEFAULT default_store rbd
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_ceph_conf $CEPH_CONF_FILE
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_user $GLANCE_CEPH_USER
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_pool $GLANCE_CEPH_POOL
+ iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
+}
+
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
+function configure_ceph_nova {
+ # configure Nova service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
+ iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
+ iniset $NOVA_CONF libvirt inject_key false
+ iniset $NOVA_CONF libvirt inject_partition -2
+ iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
+ iniset $NOVA_CONF libvirt images_type rbd
+ iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
+ iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
+}
+
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
+function configure_ceph_cinder {
+ # Configure Cinder service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
+
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+}
+
+# init_ceph() - Initialize databases, etc.
+function init_ceph {
+ # clean up from previous (possibly aborted) runs
+ # make sure to kill all ceph processes first
+ sudo pkill -f ceph-mon || true
+ sudo pkill -f ceph-osd || true
+}
+
+# install_ceph() - Collect source and prepare
+function install_ceph {
+ # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
+ # leveraging the list in stack.sh
+ if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
+ NO_UPDATE_REPOS=False
+ install_package ceph
+ else
+ exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
+ fi
+}
+
+# start_ceph() - Start running processes, including screen
+function start_ceph {
+ if is_ubuntu; then
+ sudo initctl emit ceph-mon id=$(hostname)
+ for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
+ sudo start ceph-osd id=${id}
+ done
+ else
+ sudo service ceph start
+ fi
+}
+
+# stop_ceph() - Stop running processes (non-screen)
+function stop_ceph {
+ if is_ubuntu; then
+ sudo service ceph-mon-all stop > /dev/null 2>&1
+ sudo service ceph-osd-all stop > /dev/null 2>&1
+ else
+ sudo service ceph stop > /dev/null 2>&1
+ fi
+}
+
+
+# Restore xtrace
+$XTRACE
+
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/cinder b/lib/cinder
index a51e4a0..38ce4d6 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -238,21 +238,21 @@
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
enabled_backends=""
- default_type=""
+ default_name=""
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
BE_TYPE=${be%%:*}
BE_NAME=${be##*:}
if type configure_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
configure_cinder_backend_${BE_TYPE} ${BE_NAME}
fi
- if [[ -z "$default_type" ]]; then
- default_type=$BE_TYPE
+ if [[ -z "$default_name" ]]; then
+ default_name=$BE_NAME
fi
enabled_backends+=$BE_NAME,
done
iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
- if [[ -n "$default_type" ]]; then
- iniset $CINDER_CONF DEFAULT default_volume_type ${default_type}
+ if [[ -n "$default_name" ]]; then
+ iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
fi
fi
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
new file mode 100644
index 0000000..e9d2a02
--- /dev/null
+++ b/lib/cinder_backends/ceph
@@ -0,0 +1,79 @@
+# lib/cinder_backends/ceph
+# Configure the ceph backend
+
+# Enable with:
+#
+# CINDER_ENABLED_BACKENDS+=,ceph:ceph
+#
+# Optional parameters:
+# CINDER_BAK_CEPH_POOL=<pool-name>
+# CINDER_BAK_CEPH_USER=<user>
+# CINDER_BAK_CEPH_POOL_PG=<pg-num>
+# CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_ceph_backend_lvm - called from configure_cinder()
+
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_ceph - Set config files, create data dirs, etc
+# configure_cinder_backend_ceph $name
+function configure_cinder_backend_ceph {
+ local be_name=$1
+
+ iniset $CINDER_CONF $be_name volume_backend_name $be_name
+ iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
+ iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+ iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
+ iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
+ iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
+ iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
+ iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
+ iniset $CINDER_CONF DEFAULT glance_api_version 2
+
+ if is_service_enabled c-bak; then
+ # Configure Cinder backup service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
+ iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+ iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+ iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+ iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+ fi
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/infra b/lib/infra
index e2f7dad..e18c66e 100644
--- a/lib/infra
+++ b/lib/infra
@@ -10,7 +10,6 @@
# ``stack.sh`` calls the entry points in this order:
#
-# - unfubar_setuptools
# - install_infra
# Save trace setting
@@ -26,19 +25,6 @@
# Entry Points
# ------------
-# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools
-function unfubar_setuptools {
- # this is a giant game of who's on first, but it does consistently work
- # there is hope that upstream python packaging fixes this in the future
- echo_summary "Unbreaking setuptools"
- pip_install -U setuptools
- pip_install -U pip
- uninstall_package python-setuptools
- pip_install -U setuptools
- pip_install -U pip
-}
-
-
# install_infra() - Collect source and prepare
function install_infra {
# bring down global requirements
diff --git a/lib/ironic b/lib/ironic
index 08ac278..b56abcb 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -110,6 +110,7 @@
function install_ironicclient {
git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH
setup_develop $IRONICCLIENT_DIR
+ sudo install -D -m 0644 -o $STACK_USER {$IRONICCLIENT_DIR/tools/,/etc/bash_completion.d/}ironic.bash_completion
}
# cleanup_ironic() - Remove residual data files, anything left over from previous
diff --git a/lib/keystone b/lib/keystone
index 3703008..547646a 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -496,6 +496,9 @@
_cleanup_keystone_apache_wsgi
}
+function is_keystone_enabled {
+ return is_service_enabled key
+}
# Restore xtrace
$XTRACE
diff --git a/lib/neutron b/lib/neutron
index 5ceeb62..2763f26 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -85,6 +85,8 @@
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
+# Default name for Neutron database
+Q_DB_NAME=${Q_DB_NAME:-neutron}
# Default Neutron Plugin
Q_PLUGIN=${Q_PLUGIN:-ml2}
# Default Neutron Port
@@ -143,6 +145,17 @@
Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
fi
+
+# Distributed Virtual Router (DVR) configuration
+# Can be:
+# legacy - No DVR functionality
+# dvr_snat - Controller or single node DVR
+# dvr - Compute node in multi-node DVR
+Q_DVR_MODE=${Q_DVR_MODE:-legacy}
+if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population
+fi
+
# Provider Network Configurations
# --------------------------------
@@ -303,6 +316,10 @@
if is_service_enabled q-meta; then
_configure_neutron_metadata_agent
fi
+
+ if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+ _configure_dvr
+ fi
if is_service_enabled ceilometer; then
_configure_neutron_ceilometer_notifications
fi
@@ -371,9 +388,9 @@
"network" "Neutron Service")
get_or_create_endpoint $NEUTRON_SERVICE \
"$REGION_NAME" \
- "http://$SERVICE_HOST:9696/" \
- "http://$SERVICE_HOST:9696/" \
- "http://$SERVICE_HOST:9696/"
+ "http://$SERVICE_HOST:$Q_PORT/" \
+ "http://$SERVICE_HOST:$Q_PORT/" \
+ "http://$SERVICE_HOST:$Q_PORT/"
fi
fi
}
@@ -574,7 +591,7 @@
fi
# delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
@@ -608,7 +625,7 @@
Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
- iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME`
+ iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
# If addition config files are set, make sure their path name is set as well
@@ -670,14 +687,6 @@
iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
- # Define extra "DEFAULT" configuration options when q-dhcp is configured by
- # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``.
- # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)``
- for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ }
- done
-
_neutron_setup_interface_driver $Q_DHCP_CONF_FILE
neutron_plugin_configure_dhcp_agent
@@ -756,6 +765,12 @@
neutron_vpn_configure_common
}
+function _configure_dvr {
+ iniset $NEUTRON_CONF DEFAULT router_distributed True
+ iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
+}
+
+
# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
# It is called when q-agt is enabled.
function _configure_neutron_plugin_agent {
@@ -793,14 +808,6 @@
iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
_neutron_setup_keystone $NEUTRON_CONF keystone_authtoken
- # Define extra "DEFAULT" configuration options when q-svc is configured by
- # defining the array ``Q_SRV_EXTRA_DEFAULT_OPTS``.
- # For Example: ``Q_SRV_EXTRA_DEFAULT_OPTS=(foo=true bar=2)``
- for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- iniset $NEUTRON_CONF DEFAULT ${I/=/ }
- done
-
# Configuration for neutron notifations to nova.
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index be8fd96..7192a05 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -25,7 +25,7 @@
install_package bridge-utils
* ``neutron_plugin_configure_common`` :
set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``,
- ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``
+ ``Q_PLUGIN_CLASS``
* ``neutron_plugin_configure_debug_command``
* ``neutron_plugin_configure_dhcp_agent``
* ``neutron_plugin_configure_l3_agent``
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index efdd9ef..9e84f2e 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -19,7 +19,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch
Q_PLUGIN_CONF_FILENAME=restproxy.ini
- Q_DB_NAME="restproxy_neutron"
Q_PLUGIN_CLASS="neutron.plugins.bigswitch.plugin.NeutronRestProxyV2"
BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10}
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index e4cc754..511fb71 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -20,7 +20,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade
Q_PLUGIN_CONF_FILENAME=brocade.ini
- Q_DB_NAME="brcd_neutron"
Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2"
}
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index dccf400..da90ee3 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -197,7 +197,6 @@
Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
fi
Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2"
- Q_DB_NAME=cisco_neutron
}
function neutron_plugin_configure_debug_command {
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index cce108a..7dafdc0 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -18,7 +18,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane
Q_PLUGIN_CONF_FILENAME=heleos_conf.ini
- Q_DB_NAME="ovs_neutron"
Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin"
}
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
index 3aef9d0..39b0040 100644
--- a/lib/neutron_plugins/ibm
+++ b/lib/neutron_plugins/ibm
@@ -60,7 +60,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
- Q_DB_NAME="sdnve_neutron"
Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
}
diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge
index 113a7df..5f989ae 100644
--- a/lib/neutron_plugins/linuxbridge
+++ b/lib/neutron_plugins/linuxbridge
@@ -10,7 +10,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge
Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
- Q_DB_NAME="neutron_linux_bridge"
Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2"
}
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index c5373d6..6ccd502 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -26,7 +26,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
Q_PLUGIN_CONF_FILENAME=midonet.ini
- Q_DB_NAME="neutron_midonet"
Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
}
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 8e131bb..f7f7838 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -50,7 +50,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2
Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
- Q_DB_NAME="neutron_ml2"
Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin"
# The ML2 plugin delegates L3 routing/NAT functionality to
# the L3 service plugin which must therefore be specified.
@@ -112,6 +111,12 @@
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
+
+ if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True
+ fi
}
function has_neutron_plugin_security_group {
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index d76f7d4..f8d98c3 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -39,7 +39,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec
Q_PLUGIN_CONF_FILENAME=nec.ini
- Q_DB_NAME="neutron_nec"
Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2"
}
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 86f09d2..52d85a2 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -20,7 +20,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage
Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini
- Q_DB_NAME="nuage_neutron"
Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin"
Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions
#Nuage specific Neutron defaults. Actual value must be set and sourced
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
index 06f1eee..e5f0d71 100644
--- a/lib/neutron_plugins/oneconvergence
+++ b/lib/neutron_plugins/oneconvergence
@@ -19,7 +19,6 @@
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
- Q_DB_NAME='oc_nvsd_neutron'
}
# Configure plugin specific information
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index fc81092..c468132 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -10,7 +10,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch
Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini
- Q_DB_NAME="ovs_neutron"
Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2"
}
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 26c5489..616a236 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -7,6 +7,7 @@
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""}
function is_neutron_ovs_base_plugin {
# Yes, we use OVS.
@@ -17,6 +18,9 @@
local bridge=$1
neutron-ovs-cleanup
sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
+ if [[ $OVS_DATAPATH_TYPE != "" ]]; then
+ sudo ovs-vsctl set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}
+ fi
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 178bca7..37b9e4c 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -17,7 +17,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
Q_PLUGIN_CONF_FILENAME=plumgrid.ini
- Q_DB_NAME="plumgrid_neutron"
Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu
index ceb89fa..f45a797 100644
--- a/lib/neutron_plugins/ryu
+++ b/lib/neutron_plugins/ryu
@@ -25,7 +25,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu
Q_PLUGIN_CONF_FILENAME=ryu.ini
- Q_DB_NAME="ovs_neutron"
Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2"
}
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index c7672db..5802ebf 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -40,7 +40,6 @@
function neutron_plugin_configure_common {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
Q_PLUGIN_CONF_FILENAME=nsx.ini
- Q_DB_NAME="neutron_nsx"
Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
}
diff --git a/lib/nova b/lib/nova
index 5d879db..6b1afd9 100644
--- a/lib/nova
+++ b/lib/nova
@@ -173,14 +173,15 @@
clean_iptables
# Destroy old instances
- instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+ local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
fi
# Logout and delete iscsi sessions
- tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+ local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+ local target
for target in $tgts; do
sudo iscsiadm --mode node -T $target --logout || true
done
@@ -218,14 +219,14 @@
sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
# Specify rootwrap.conf as first parameter to nova-rootwrap
- ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
+ local rootwrap_sudoer_cmd="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for nova
- TEMPFILE=`mktemp`
- echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
- chmod 0440 $TEMPFILE
- sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
+ local tempfile=`mktemp`
+ echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >$tempfile
+ chmod 0440 $tempfile
+ sudo chown root:root $tempfile
+ sudo mv $tempfile /etc/sudoers.d/nova-rootwrap
}
# configure_nova() - Set config files, create data dirs, etc
@@ -274,7 +275,7 @@
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
if is_ubuntu; then
if [[ ! "$DISTRO" > natty ]]; then
- cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
+ local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
sudo mkdir -p /cgroup
if ! grep -q cgroup /etc/fstab; then
echo "$cgline" | sudo tee -a /etc/fstab
@@ -328,29 +329,29 @@
# Migrated from keystone_data.sh
create_nova_accounts() {
- SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
- ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+ local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
# Nova
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
- NOVA_USER=$(get_or_create_user "nova" \
- "$SERVICE_PASSWORD" $SERVICE_TENANT)
- get_or_add_user_role $ADMIN_ROLE $NOVA_USER $SERVICE_TENANT
+ local nova_user=$(get_or_create_user "nova" \
+ "$SERVICE_PASSWORD" $service_tenant)
+ get_or_add_user_role $admin_role $nova_user $service_tenant
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- NOVA_SERVICE=$(get_or_create_service "nova" \
+ local nova_service=$(get_or_create_service "nova" \
"compute" "Nova Compute Service")
- get_or_create_endpoint $NOVA_SERVICE \
+ get_or_create_endpoint $nova_service \
"$REGION_NAME" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
- NOVA_V3_SERVICE=$(get_or_create_service "novav3" \
+ local nova_v3_service=$(get_or_create_service "novav3" \
"computev3" "Nova Compute Service V3")
- get_or_create_endpoint $NOVA_V3_SERVICE \
+ get_or_create_endpoint $nova_v3_service \
"$REGION_NAME" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
@@ -369,9 +370,9 @@
# EC2
if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
- EC2_SERVICE=$(get_or_create_service "ec2" \
+ local ec2_service=$(get_or_create_service "ec2" \
"ec2" "EC2 Compatibility Layer")
- get_or_create_endpoint $EC2_SERVICE \
+ get_or_create_endpoint $ec2_service \
"$REGION_NAME" \
"http://$SERVICE_HOST:8773/services/Cloud" \
"http://$SERVICE_HOST:8773/services/Admin" \
@@ -383,8 +384,8 @@
if is_service_enabled n-obj swift3; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- S3_SERVICE=$(get_or_create_service "s3" "s3" "S3")
- get_or_create_endpoint $S3_SERVICE \
+ local s3_service=$(get_or_create_service "s3" "s3" "S3")
+ get_or_create_endpoint $s3_service \
"$REGION_NAME" \
"http://$SERVICE_HOST:$S3_SERVICE_PORT" \
"http://$SERVICE_HOST:$S3_SERVICE_PORT" \
@@ -480,18 +481,6 @@
iniset $NOVA_CONF DEFAULT notification_driver "messaging"
fi
- # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
- if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
- EXTRA_OPTS=$EXTRA_FLAGS
- fi
-
- # Define extra nova conf flags by defining the array ``EXTRA_OPTS``.
- # For Example: ``EXTRA_OPTS=(foo=true bar=2)``
- for I in "${EXTRA_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- iniset $NOVA_CONF DEFAULT ${I/=/ }
- done
-
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
@@ -687,6 +676,7 @@
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
+ local i
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
done
diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal
index 1d4d414..22d16a6 100644
--- a/lib/nova_plugins/hypervisor-baremetal
+++ b/lib/nova_plugins/hypervisor-baremetal
@@ -58,12 +58,6 @@
sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF"
iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF"
fi
-
- # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
- for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
- # Attempt to convert flags to options
- iniset $NOVA_CONF baremetal ${I/=/ }
- done
}
# install_nova_hypervisor() - Install external components
diff --git a/lib/tempest b/lib/tempest
index 59c5bbc..5ad2572 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -307,9 +307,9 @@
iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
- iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-x86_64-initrd.manifest.xml
- iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-x86_64-blank.img.manifest.xml
- iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-x86_64-vmlinuz.manifest.xml
+ iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml
+ iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img.manifest.xml
+ iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml
iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
iniset $TEMPEST_CONFIG boto http_socket_timeout 30
iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
@@ -329,10 +329,10 @@
fi
# Scenario
- iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-x86_64-uec"
- iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-x86_64-blank.img"
- iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-x86_64-initrd"
- iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-x86_64-vmlinuz"
+ iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec"
+ iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img"
+ iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd"
+ iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz"
# Large Ops Number
iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
@@ -411,8 +411,8 @@
# init_tempest() - Initialize ec2 images
function init_tempest {
- local base_image_name=cirros-${CIRROS_VERSION}-x86_64
- # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-uec
+ local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}
+ # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec
local image_dir="$FILES/images/${base_image_name}-uec"
local kernel="$image_dir/${base_image_name}-vmlinuz"
local ramdisk="$image_dir/${base_image_name}-initrd"
@@ -424,9 +424,9 @@
( #new namespace
# tenant:demo ; user: demo
source $TOP_DIR/accrc/demo/demo
- euca-bundle-image -r x86_64 -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
- euca-bundle-image -r x86_64 -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
- euca-bundle-image -r x86_64 -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
+ euca-bundle-image -r ${CIRROS_ARCH} -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
+ euca-bundle-image -r ${CIRROS_ARCH} -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
+ euca-bundle-image -r ${CIRROS_ARCH} -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
) 2>&1 </dev/null | cat
else
echo "Boto materials are not prepared"
diff --git a/lib/tls b/lib/tls
index a84bb76..e58e513 100644
--- a/lib/tls
+++ b/lib/tls
@@ -323,7 +323,8 @@
#
# Uses global ``SSL_ENABLED_SERVICES``
function is_ssl_enabled_service {
- services=$@
+ local services=$@
+ local service=""
for service in ${services}; do
[[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
done
diff --git a/lib/trove b/lib/trove
index 2a54336..f6a933e 100644
--- a/lib/trove
+++ b/lib/trove
@@ -180,6 +180,7 @@
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
+ iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /tmp/
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
diff --git a/samples/local.conf b/samples/local.conf
index c8126c2..20c5892 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -24,8 +24,10 @@
# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when
# there are a few minimal variables set:
-# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh`` and they will be added to ``local.conf``.
+# If the ``SERVICE_TOKEN`` and ``*_PASSWORD`` variables are not set
+# here you will be prompted to enter values for them by ``stack.sh``
+# and they will be added to ``local.conf``.
+SERVICE_TOKEN=azertytoken
ADMIN_PASSWORD=nomoresecrete
MYSQL_PASSWORD=stackdb
RABBIT_PASSWORD=stackqueue
diff --git a/stack.sh b/stack.sh
index cdfa3da..77b577a 100755
--- a/stack.sh
+++ b/stack.sh
@@ -152,7 +152,7 @@
# Look for obsolete stuff
if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then
echo "FATAL: 'swift' is not supported as a service name"
- echo "FATAL: Use the actual swift service names to enable tham as required:"
+ echo "FATAL: Use the actual swift service names to enable them as required:"
echo "FATAL: s-proxy s-object s-container s-account"
exit 1
fi
@@ -219,15 +219,6 @@
# Some distros need to add repos beyond the defaults provided by the vendor
# to pick up required packages.
-# The Debian Wheezy official repositories do not contain all required packages,
-# add gplhost repository.
-if [[ "$os_VENDOR" =~ (Debian) ]]; then
- echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
- echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
- apt_get update
- apt_get install --force-yes gplhost-archive-keyring
-fi
-
if [[ is_fedora && $DISTRO =~ (rhel) ]]; then
# Installing Open vSwitch on RHEL requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-icehouse/rdo-release-icehouse.rpm"}
@@ -317,9 +308,6 @@
# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
-# Allow the use of an alternate protocol (such as https) for service endpoints
-SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
-
# Configure services to use syslog instead of writing to individual log files
SYSLOG=`trueorfalse False $SYSLOG`
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
@@ -668,10 +656,10 @@
# Configure an appropriate python environment
if [[ "$OFFLINE" != "True" ]]; then
- $TOP_DIR/tools/install_pip.sh
+ PYPI_ALTERNATIVE_URL=$PYPI_ALTERNATIVE_URL $TOP_DIR/tools/install_pip.sh
fi
-# Do the ugly hacks for borken packages and distros
+# Do the ugly hacks for broken packages and distros
$TOP_DIR/tools/fixup_stuff.sh
@@ -1404,41 +1392,6 @@
echo_summary "WARNING: $DEPRECATED_TEXT"
fi
-# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut
-# Specific warning for deprecated configs
-if [[ -n "$EXTRA_OPTS" ]]; then
- echo ""
- echo_summary "WARNING: EXTRA_OPTS is used"
- echo "You are using EXTRA_OPTS to pass configuration into nova.conf."
- echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
- echo "EXTRA_OPTS will be removed early in the Juno development cycle"
- echo "
-[[post-config|\$NOVA_CONF]]
-[DEFAULT]
-"
- for I in "${EXTRA_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- echo ${I}
- done
-fi
-
-# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut
-if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then
- echo ""
- echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used"
- echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf."
- echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
- echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle"
- echo "
-[[post-config|\$NOVA_CONF]]
-[baremetal]
-"
- for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- echo ${I}
- done
-fi
-
# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut
if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then
echo ""
@@ -1473,40 +1426,6 @@
done
fi
-# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
-if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then
- echo ""
- echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used"
- echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE."
- echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:"
- echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
- echo "
-[[post-config|/\$Q_DHCP_CONF_FILE]]
-[DEFAULT]
-"
- for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- echo ${I}
- done
-fi
-
-# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
-if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then
- echo ""
- echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used"
- echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF."
- echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
- echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
- echo "
-[[post-config|\$NEUTRON_CONF]]
-[DEFAULT]
-"
- for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do
- # Replace the first '=' with ' ' for iniset syntax
- echo ${I}
- done
-fi
-
# TODO(dtroyer): Remove CINDER_MULTI_LVM_BACKEND after stable/juno branch is cut
if [[ "$CINDER_MULTI_LVM_BACKEND" = "True" ]]; then
echo ""
diff --git a/stackrc b/stackrc
index a05fc18..7f13232 100644
--- a/stackrc
+++ b/stackrc
@@ -332,14 +332,15 @@
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
-# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz
+# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
# * OpenVZ image:
# OpenVZ uses its own format of image, and does not support UEC style images
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
-#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img" # cirros full disk image
+#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
+CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
@@ -351,11 +352,11 @@
libvirt)
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-rootfs}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz"};;
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
+ IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz"};;
*) # otherwise, use the uec style image (with kernel, ramdisk, disk)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};;
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+ IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
esac
;;
vsphere)
@@ -366,8 +367,8 @@
IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"}
IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
*) # Default to Cirros with kernel, ramdisk and disk image
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};;
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+ IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
esac
# Use 64bit fedora image if heat is enabled
@@ -431,6 +432,9 @@
# Undo requirements changes by global requirements
UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True}
+# Allow the use of an alternate protocol (such as https) for service endpoints
+SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
+
# Local variables:
# mode: shell-script
# End:
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 150faaa..55ef93e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -50,6 +50,25 @@
}
+function configure_pypi_alternative_url {
+ PIP_ROOT_FOLDER="$HOME/.pip"
+ PIP_CONFIG_FILE="$PIP_ROOT_FOLDER/pip.conf"
+ if [[ ! -d $PIP_ROOT_FOLDER ]]; then
+ echo "Creating $PIP_ROOT_FOLDER"
+ mkdir $PIP_ROOT_FOLDER
+ fi
+ if [[ ! -f $PIP_CONFIG_FILE ]]; then
+ echo "Creating $PIP_CONFIG_FILE"
+ touch $PIP_CONFIG_FILE
+ fi
+ if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then
+ #it means that the index-url does not exist
+ iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE"
+ fi
+
+}
+
+
# Show starting versions
get_versions
@@ -60,6 +79,10 @@
install_get_pip
+if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
+ configure_pypi_alternative_url
+fi
+
pip_install -U setuptools
get_versions
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
index 2441e3d..2846dc4 100755
--- a/tools/xen/scripts/on_exit.sh
+++ b/tools/xen/scripts/on_exit.sh
@@ -3,7 +3,9 @@
set -e
set -o xtrace
-declare -a on_exit_hooks
+if [ -z "${on_exit_hooks:-}" ]; then
+ on_exit_hooks=()
+fi
on_exit()
{