Merge "XenServer: the cron job shouldn't print debug text into stderr"
diff --git a/.gitignore b/.gitignore
index 2778a65..8870bb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@
shocco
src
stack-screenrc
+userrc_early
diff --git a/Makefile b/Makefile
index 082aff2..a6bb230 100644
--- a/Makefile
+++ b/Makefile
@@ -26,7 +26,7 @@
./unstack.sh
wheels:
- WHEELHOUSE=$(WHEELHOUSE) tools/build-wheels.sh
+ WHEELHOUSE=$(WHEELHOUSE) tools/build_wheels.sh
docs:
tox -edocs
diff --git a/clean.sh b/clean.sh
index b22a29c..ae28aa9 100755
--- a/clean.sh
+++ b/clean.sh
@@ -134,7 +134,9 @@
# Clean up files
-FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN=".localrc.auto .localrc.password "
+FILES_TO_CLEAN+="docs/files docs/html shocco/ "
+FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* "
FILES_TO_CLEAN+=".stackenv .prereqs"
for file in $FILES_TO_CLEAN; do
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 1530a84..5660bc5 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -128,7 +128,7 @@
MULTI_HOST=1
LOGFILE=/opt/stack/logs/stack.sh.log
ADMIN_PASSWORD=labstack
- MYSQL_PASSWORD=supersecret
+ DATABASE_PASSWORD=supersecret
RABBIT_PASSWORD=supersecrete
SERVICE_PASSWORD=supersecrete
SERVICE_TOKEN=xyzpdqlazydog
@@ -169,7 +169,7 @@
MULTI_HOST=1
LOGFILE=/opt/stack/logs/stack.sh.log
ADMIN_PASSWORD=labstack
- MYSQL_PASSWORD=supersecret
+ DATABASE_PASSWORD=supersecret
RABBIT_PASSWORD=supersecrete
SERVICE_PASSWORD=supersecrete
SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 4248445..5891f68 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -52,7 +52,7 @@
RABBIT_HOST=172.18.161.6
GLANCE_HOSTPORT=172.18.161.6:9292
ADMIN_PASSWORD=secrete
- MYSQL_PASSWORD=secrete
+ DATABASE_PASSWORD=secrete
RABBIT_PASSWORD=secrete
SERVICE_PASSWORD=secrete
SERVICE_TOKEN=secrete
@@ -72,20 +72,57 @@
+Neutron Networking with Open vSwitch and Provider Networks
+==========================================================
+
+In some instances, it is desirable to use neutron's provider
+networking extension, so that networks that are configured on an
+external router can be utilized by neutron, and instances created via
+Nova can attach to the network managed by the external router.
+
+For example, in some lab environments, a hardware router has been
+pre-configured by another party, and an OpenStack developer has been
+given a VLAN tag and IP address range, so that instances created via
+DevStack will use the external router for L3 connectivity, as opposed
+to the neutron L3 service.
+
+Physical Network Setup
+----------------------
+
+.. nwdiag::
+
+ nwdiag {
+ inet [ shape = cloud ];
+ router;
+ inet -- router;
+
+ network provider_net {
+ address = "203.0.113.0/24"
+ router [ address = "203.0.113.1" ];
+ controller;
+ compute1;
+ compute2;
+ }
+
+ network control_plane {
+ router [ address = "10.0.0.1" ]
+ address = "10.0.0.0/24"
+ controller [ address = "10.0.0.2" ]
+ compute1 [ address = "10.0.0.3" ]
+ compute2 [ address = "10.0.0.4" ]
+ }
+ }
-Using Neutron with Multiple Interfaces
-======================================
-
-The first interface, eth0 is used for the OpenStack management (API,
-message bus, etc) as well as for ssh for an administrator to access
-the machine.
+On a compute node, the first interface, eth0 is used for the OpenStack
+management (API, message bus, etc) as well as for ssh for an
+administrator to access the machine.
::
stack@compute:~$ ifconfig eth0
eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc
- inet addr:192.168.1.18
+ inet addr:10.0.0.3
eth1 is manually configured at boot to not have an IP address.
Consult your operating system documentation for the appropriate
@@ -101,9 +138,6 @@
The second physical interface, eth1 is added to a bridge (in this case
named br-ex), which is used to forward network traffic from guest VMs.
-Network traffic from eth1 on the compute nodes is then NAT'd by the
-controller node that runs Neutron's `neutron-l3-agent` and provides L3
-connectivity.
::
@@ -123,67 +157,6 @@
Interface "eth1"
-
-
-
-Neutron Networking with Open vSwitch
-====================================
-
-Configuring neutron, OpenStack Networking in DevStack is very similar to
-configuring `nova-network` - many of the same configuration variables
-(like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are
-used by neutron, which is intentional.
-
-The only difference is the disabling of `nova-network` in your
-local.conf, and the enabling of the neutron components.
-
-
-Configuration
--------------
-
-::
-
- FIXED_RANGE=10.0.0.0/24
- FLOATING_RANGE=192.168.27.0/24
- PUBLIC_NETWORK_GATEWAY=192.168.27.2
-
- disable_service n-net
- enable_service q-svc
- enable_service q-agt
- enable_service q-dhcp
- enable_service q-meta
- enable_service q-l3
-
- Q_USE_SECGROUP=True
- ENABLE_TENANT_VLANS=True
- TENANT_VLAN_RANGE=1000:1999
- PHYSICAL_NETWORK=default
- OVS_PHYSICAL_BRIDGE=br-ex
-
-In this configuration we are defining FLOATING_RANGE to be a
-subnet that exists in the private RFC1918 address space - however in
-in a real setup FLOATING_RANGE would be a public IP address range.
-
-Note that extension drivers for the ML2 plugin is set by
-`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you
-want to remove all the extension drivers (even 'port_security'), set
-`Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
-
-Neutron Networking with Open vSwitch and Provider Networks
-==========================================================
-
-In some instances, it is desirable to use neutron's provider
-networking extension, so that networks that are configured on an
-external router can be utilized by neutron, and instances created via
-Nova can attach to the network managed by the external router.
-
-For example, in some lab environments, a hardware router has been
-pre-configured by another party, and an OpenStack developer has been
-given a VLAN tag and IP address range, so that instances created via
-DevStack will use the external router for L3 connectivity, as opposed
-to the neutron L3 service.
-
-
Service Configuration
---------------------
@@ -208,8 +181,21 @@
::
+ HOST_IP=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ RABBIT_HOST=10.0.0.2
+ GLANCE_HOSTPORT=10.0.0.2:9292
PUBLIC_INTERFACE=eth1
+ ADMIN_PASSWORD=secrete
+ MYSQL_PASSWORD=secrete
+ RABBIT_PASSWORD=secrete
+ SERVICE_PASSWORD=secrete
+ SERVICE_TOKEN=secrete
+
## Neutron options
Q_USE_SECGROUP=True
ENABLE_TENANT_VLANS=True
@@ -241,24 +227,37 @@
allocated to you, so that you could access your instances from the
public internet.
-The following is a snippet of the DevStack configuration on the
-compute node.
+The following is the DevStack configuration on
+compute node 1.
::
+ HOST_IP=10.0.0.3
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ RABBIT_HOST=10.0.0.2
+ GLANCE_HOSTPORT=10.0.0.2:9292
+ ADMIN_PASSWORD=secrete
+ MYSQL_PASSWORD=secrete
+ RABBIT_PASSWORD=secrete
+ SERVICE_PASSWORD=secrete
+ SERVICE_TOKEN=secrete
+
# Services that a compute node runs
ENABLED_SERVICES=n-cpu,rabbit,q-agt
## Neutron options
- Q_USE_SECGROUP=True
- ENABLE_TENANT_VLANS=True
- TENANT_VLAN_RANGE=3001:4000
PHYSICAL_NETWORK=default
OVS_PHYSICAL_BRIDGE=br-ex
PUBLIC_INTERFACE=eth1
Q_USE_PROVIDER_NETWORKING=True
Q_L3_ENABLED=False
+Compute node 2's configuration will be exactly the same, except
+`HOST_IP` will be `10.0.0.4`
+
When DevStack is configured to use provider networking (via
`Q_USE_PROVIDER_NETWORKING` is True and `Q_L3_ENABLED` is False) -
DevStack will automatically add the network interface defined in
@@ -304,5 +303,11 @@
sudo service iptables save
sudo ufw disable
+Configuring Extension Drivers for the ML2 Plugin
+------------------------------------------------
+Extension drivers for the ML2 plugin are set with the variable
+`Q_ML2_PLUGIN_EXT_DRIVERS`, and includes the 'port_security' extension
+by default. If you want to remove all the extension drivers (even
+'port_security'), set `Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 236ece9..a01c368 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -105,7 +105,7 @@
FIXED_NETWORK_SIZE=256
FLAT_INTERFACE=eth0
ADMIN_PASSWORD=supersecret
- MYSQL_PASSWORD=iheartdatabases
+ DATABASE_PASSWORD=iheartdatabases
RABBIT_PASSWORD=flopsymopsy
SERVICE_PASSWORD=iheartksl
SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index 515cd50..53c3fa9 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -64,7 +64,7 @@
cd devstack
echo '[[local|localrc]]' > local.conf
echo ADMIN_PASSWORD=password >> local.conf
- echo MYSQL_PASSWORD=password >> local.conf
+ echo DATABASE_PASSWORD=password >> local.conf
echo RABBIT_PASSWORD=password >> local.conf
echo SERVICE_PASSWORD=password >> local.conf
echo SERVICE_TOKEN=tokentoken >> local.conf
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 85fd7cc..49b3a7f 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -22,6 +22,8 @@
+------------------+---------------------------------------------+--------------------+
|aodh |git://git.openstack.org/openstack/aodh | alarming |
+------------------+---------------------------------------------+--------------------+
+|barbican |git://git.openstack.org/openstack/barbican | key management |
++------------------+---------------------------------------------+--------------------+
|ceilometer |git://git.openstack.org/openstack/ceilometer | metering |
+------------------+---------------------------------------------+--------------------+
|gnocchi |git://git.openstack.org/openstack/gnocchi | metric |
@@ -66,7 +68,7 @@
| Plugin Name | URL | Comments |
| | | |
+-------------+------------------------------------------------------------+------------+
-|glusterfs |git://git.openstack.org/stackforge/devstack-plugin-glusterfs| |
+|glusterfs |git://git.openstack.org/openstack/devstack-plugin-glusterfs | |
+-------------+------------------------------------------------------------+------------+
| | | |
+-------------+------------------------------------------------------------+------------+
@@ -78,7 +80,7 @@
| Plugin Name | URL | Comments |
| | | |
+----------------+--------------------------------------------------+------------+
-|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ |
+|ec2-api |git://git.openstack.org/openstack/ec2-api |[as1]_ |
+----------------+--------------------------------------------------+------------+
|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector| |
+----------------+--------------------------------------------------+------------+
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index fda601b..8bd3797 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -56,7 +56,7 @@
An example would be as follows::
- enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
+ enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
plugin.sh contract
==================
@@ -202,13 +202,12 @@
For everyday use, DevStack plugins can exist in any git tree that's
accessible on the internet. However, when using DevStack plugins in
the OpenStack gate, they must live in projects in OpenStack's
-gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are
-fine. This allows testing of the plugin as well as provides network
+gerrit. This allows testing of the plugin as well as provides network
isolation against upstream git repository failures (which we see often
enough to be an issue).
Ideally a plugin will be included within the ``devstack`` directory of
-the project they are being tested. For example, the stackforge/ec2-api
+the project they are being tested. For example, the openstack/ec2-api
project has its plugin support in its own tree.
However, some times a DevStack plugin might be used solely to
@@ -218,7 +217,7 @@
integration of SDN controllers (e.g. ovn, OpenDayLight), or
integration of alternate RPC systems (e.g. zmq, qpid). In these cases
the best practice is to build a dedicated
-``stackforge/devstack-plugin-FOO`` project.
+``openstack/devstack-plugin-FOO`` project.
To enable a plugin to be used in a gate job, the following lines will
be needed in your ``jenkins/jobs/<project>.yaml`` definition in
@@ -228,12 +227,12 @@
# Because we are testing a non standard project, add the
# our project repository. This makes zuul do the right
# reference magic for testing changes.
- export PROJECTS="stackforge/ec2-api $PROJECTS"
+ export PROJECTS="openstack/ec2-api $PROJECTS"
# note the actual url here is somewhat irrelevant because it
# caches in nodepool, however make it a valid url for
# documentation purposes.
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api"
+ export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
See Also
========
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index a0de4cc..a8fbd86 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -122,41 +122,47 @@
}
function get_image_id {
- local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ local IMAGE_ID
+ IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
echo "$IMAGE_ID"
}
function get_tenant_id {
local TENANT_NAME=$1
- local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ local TENANT_ID
+ TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
echo "$TENANT_ID"
}
function get_user_id {
local USER_NAME=$1
- local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
+ local USER_ID
+ USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
echo "$USER_ID"
}
function get_role_id {
local ROLE_NAME=$1
- local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+ local ROLE_ID
+ ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
echo "$ROLE_ID"
}
function get_network_id {
local NETWORK_NAME="$1"
- local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+ local NETWORK_ID
+ NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
echo $NETWORK_ID
}
function get_flavor_id {
local INSTANCE_TYPE=$1
- local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ local FLAVOR_ID
+ FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
echo "$FLAVOR_ID"
}
@@ -185,13 +191,15 @@
function remove_tenant {
local TENANT=$1
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
openstack project delete $TENANT_ID
}
function remove_user {
local USER=$1
- local USER_ID=$(get_user_id $USER)
+ local USER_ID
+ USER_ID=$(get_user_id $USER)
openstack user delete $USER_ID
}
@@ -221,9 +229,11 @@
local NET_NAME="${TENANT}-net$NUM"
local ROUTER_NAME="${TENANT}-router${NUM}"
source $TOP_DIR/openrc admin admin
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
source $TOP_DIR/openrc $TENANT $TENANT
- local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ local NET_ID
+ NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
neutron_debug_admin probe-create --device-owner compute $NET_ID
@@ -251,7 +261,8 @@
done
#TODO (nati) Add multi-nic test
#TODO (nati) Add public-net test
- local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
+ local VM_UUID
+ VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
--image $(get_image_id) \
$NIC \
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
@@ -309,7 +320,8 @@
local NUM=$2
local NET_NAME="${TENANT}-net$NUM"
source $TOP_DIR/openrc admin admin
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
#TODO(nati) comment out until l3-agent merged
#for res in port subnet net router;do
for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
diff --git a/functions b/functions
index ff95c89..ca5955e 100644
--- a/functions
+++ b/functions
@@ -264,7 +264,8 @@
;;
*.img)
image_name=$(basename "$image" ".img")
- local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
+ local format
+ format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
disk_format=$format
else
@@ -341,7 +342,7 @@
# No backends registered means this is likely called from ``localrc``
# This is now deprecated usage
DATABASE_TYPE=$1
- DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
+ deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc"
else
# This should no longer get called...here for posterity
use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
@@ -405,7 +406,8 @@
local vm_id=$1
local network_name=$2
local nova_result="$(nova show $vm_id)"
- local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+ local ip
+ ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
if [[ $ip = "" ]];then
echo "$nova_result"
die $LINENO "[Fail] Coudn't get ipaddress of VM"
@@ -455,7 +457,8 @@
# homedir permissions on RHEL and common practice of making DEST in
# the stack user's homedir.
- local real_path=$(readlink -f $1)
+ local real_path
+ real_path=$(readlink -f $1)
local rebuilt_path=""
for i in $(echo ${real_path} | tr "/" " "); do
rebuilt_path=$rebuilt_path"/"$i
diff --git a/functions-common b/functions-common
index ab5a1a4..42555a9 100644
--- a/functions-common
+++ b/functions-common
@@ -76,45 +76,60 @@
# The location is a variable to allow for easier refactoring later to make it
# overridable. There is currently no usecase where doing so makes sense, so
# it's not currently configurable.
- CLOUDS_YAML=~/.config/openstack/clouds.yaml
+ for clouds_path in /etc/openstack ~/.config/openstack ; do
+ CLOUDS_YAML=$clouds_path/clouds.yaml
- mkdir -p $(dirname $CLOUDS_YAML)
+ sudo mkdir -p $(dirname $CLOUDS_YAML)
+ sudo chown -R $STACK_USER $(dirname $CLOUDS_YAML)
- CA_CERT_ARG=''
- if [ -f "$SSL_BUNDLE_FILE" ]; then
- CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
- fi
- $TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack \
- --os-region-name $REGION_NAME \
- --os-identity-api-version 3 \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_AUTH_URI \
- --os-username demo \
- --os-password $ADMIN_PASSWORD \
- --os-project-name demo
- $TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack-admin \
- --os-region-name $REGION_NAME \
- --os-identity-api-version 3 \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_AUTH_URI \
- --os-username admin \
- --os-password $ADMIN_PASSWORD \
- --os-project-name admin
+ CA_CERT_ARG=''
+ if [ -f "$SSL_BUNDLE_FILE" ]; then
+ CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
+ fi
+ $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack \
+ --os-region-name $REGION_NAME \
+ --os-identity-api-version 3 \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_AUTH_URI \
+ --os-username demo \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+ $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-admin \
+ --os-region-name $REGION_NAME \
+ --os-identity-api-version 3 \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_AUTH_URI \
+ --os-username admin \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name admin
+ done
}
-# Normalize config values to True or False
-# Accepts as False: 0 no No NO false False FALSE
-# Accepts as True: 1 yes Yes YES true True TRUE
-# VAR=$(trueorfalse default-value test-value)
+# trueorfalse <True|False> <VAR>
+#
+# Normalize config-value provided in variable VAR to either "True" or
+# "False". If VAR is unset (i.e. $VAR evaluates as empty), the value
+# of the second argument will be used as the default value.
+#
+# Accepts as False: 0 no No NO false False FALSE
+# Accepts as True: 1 yes Yes YES true True TRUE
+#
+# usage:
+# VAL=$(trueorfalse False VAL)
function trueorfalse {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local default=$1
+
+ if [ -z $2 ]; then
+ die $LINENO "variable to normalize required"
+ fi
local testval=${!2:-}
case "$testval" in
@@ -139,7 +154,8 @@
# backtrace level
function backtrace {
local level=$1
- local deep=$((${#BASH_SOURCE[@]} - 1))
+ local deep
+ deep=$((${#BASH_SOURCE[@]} - 1))
echo "[Call Trace]"
while [ $level -le $deep ]; do
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
@@ -169,7 +185,8 @@
# die_if_not_set $LINENO env-var "message"
function die_if_not_set {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
@@ -179,11 +196,18 @@
$xtrace
}
+function deprecated {
+ local text=$1
+ DEPRECATED_TEXT+="\n$text"
+ echo "WARNING: $text"
+}
+
# Prints line number and "message" in error format
# err $LINENO "message"
function err {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
@@ -200,7 +224,8 @@
# err_if_not_set $LINENO env-var "message"
function err_if_not_set {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
@@ -236,7 +261,8 @@
# warn $LINENO "message"
function warn {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
echo $msg
@@ -472,7 +498,8 @@
local git_remote=$1
local git_dest=$2
local git_ref=$3
- local orig_dir=$(pwd)
+ local orig_dir
+ orig_dir=$(pwd)
local git_clone_flags=""
RECLONE=$(trueorfalse False RECLONE)
@@ -636,7 +663,8 @@
host_ip=""
# Find the interface used for the default route
host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
- local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
+ local host_ips
+ host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
local ip
for ip in $host_ips; do
# Attempt to filter out IP addresses that are part of the fixed and
@@ -685,7 +713,8 @@
# copy over a default policy.json and policy.d for projects
function install_default_policy {
local project=$1
- local project_uc=$(echo $1|tr a-z A-Z)
+ local project_uc
+ project_uc=$(echo $1|tr a-z A-Z)
local conf_dir="${project_uc}_CONF_DIR"
# eval conf dir to get the variable
conf_dir="${!conf_dir}"
@@ -718,7 +747,8 @@
# Add a terminating comma to policy lines without one
# Remove the closing '}' and all lines following to the end-of-file
- local tmpfile=$(mktemp)
+ local tmpfile
+ tmpfile=$(mktemp)
uniq ${policy_file} | sed -e '
s/]$/],/
/^[}]/,$d
@@ -911,7 +941,8 @@
# scenarios currently that use the returned id. Ideally this behaviour
# should be pushed out to the service setups and let them create the
# endpoints they need.
- local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
+ local public_id
+ public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
_get_or_create_endpoint_with_interface $1 admin $4 $2
_get_or_create_endpoint_with_interface $1 internal $5 $2
@@ -957,19 +988,26 @@
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
+ # time all the apt operations
+ time_start "apt-get"
+
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+
+ # stop the clock
+ time_stop "apt-get"
}
function _parse_package_files {
@@ -1026,10 +1064,12 @@
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local services=$@
- local package_dir=$(_get_package_dir)
+ local package_dir
+ package_dir=$(_get_package_dir)
local file_to_parse=""
local service=""
@@ -1094,7 +1134,8 @@
# The same metadata used in the main DevStack prerequisite files may be used
# in these prerequisite files, see get_packages() for more info.
function get_plugin_packages {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local files_to_parse=""
local package_dir=""
@@ -1119,7 +1160,8 @@
fi
if is_ubuntu; then
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
# if there are transient errors pulling the updates, that's fine.
@@ -1697,13 +1739,25 @@
local mode=$1
local phase=$2
if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i $mode $phase
+ local extra_plugin_file_name
+ for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do
+ [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase
+ # NOTE(sdague): generate a big warning about using
+ # extras.d in an unsupported way which will let us track
+ # unsupported usage in the gate.
+ local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh"
+ local extra=$(basename $extra_plugin_file_name)
+ if [[ ! ( $exceptions =~ "$extra" ) ]]; then
+ deprecated "extras.d support is being removed in Mitaka-1"
+ deprecated "jobs for project $extra will break after that point"
+ deprecated "please move project to a supported devstack plugin model"
+ fi
done
fi
# the source phase corresponds to settings loading in plugins
if [[ "$mode" == "source" ]]; then
load_plugin_settings
+ verify_disabled_services
elif [[ "$mode" == "override_defaults" ]]; then
plugin_override_defaults
else
@@ -1759,25 +1813,26 @@
ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove")
}
-# disable_service() removes the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are present.
+# disable_service() prepares the services passed as argument to be
+# removed from the ``ENABLED_SERVICES`` list, if they are present.
#
# For example:
# disable_service rabbit
#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
+# Uses global ``DISABLED_SERVICES``
# disable_service service [service ...]
function disable_service {
- local tmpsvcs=",${ENABLED_SERVICES},"
+ local disabled_svcs="${DISABLED_SERVICES}"
+ local enabled_svcs=",${ENABLED_SERVICES},"
local service
for service in $@; do
+ disabled_svcs+=",$service"
if is_service_enabled $service; then
- tmpsvcs=${tmpsvcs//,$service,/,}
+ enabled_svcs=${enabled_svcs//,$service,/,}
fi
done
- ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+ DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs")
+ ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs")
}
# enable_service() adds the services passed as argument to the
@@ -1794,6 +1849,10 @@
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in $@; do
+ if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+ warn $LINENO "Attempt to enable_service ${service} when it has been disabled"
+ continue
+ fi
if ! is_service_enabled $service; then
tmpsvcs+=",$service"
fi
@@ -1825,7 +1884,8 @@
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local enabled=1
local services=$@
@@ -1897,6 +1957,18 @@
return 0
}
+# Make sure that nothing has manipulated ENABLED_SERVICES in a way
+# that conflicts with prior calls to disable_service.
+# Uses global ``ENABLED_SERVICES``
+function verify_disabled_services {
+ local service
+ for service in ${ENABLED_SERVICES//,/ }; do
+ if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+ die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'"
+ fi
+ done
+}
+
# System Functions
# ================
@@ -1904,7 +1976,8 @@
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local args=( $@ )
local last
@@ -1940,8 +2013,10 @@
local ip=$1
local range=$2
local masklen=${range#*/}
- local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
- local subnet=$(maskip $ip $(cidr2netmask $masklen))
+ local network
+ network=$(maskip ${range%/*} $(cidr2netmask $masklen))
+ local subnet
+ subnet=$(maskip $ip $(cidr2netmask $masklen))
[[ $network == $subnet ]]
}
@@ -1993,7 +2068,8 @@
# Returns true if the directory is on a filesystem mounted via NFS.
function is_nfs_directory {
- local mount_type=`stat -f -L -c %T $1`
+ local mount_type
+ mount_type=`stat -f -L -c %T $1`
test "$mount_type" == "nfs"
}
@@ -2004,13 +2080,15 @@
local ip=$1
local mask=$2
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
- local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
+ local subnet
+ subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
echo $subnet
}
# Return the current python as "python<major>.<minor>"
function python_version {
- local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+ local python_version
+ python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
echo "python${python_version}"
}
@@ -2069,6 +2147,70 @@
fi
}
+# Timing infrastructure - figure out where large blocks of time are
+# used in DevStack
+#
+# The timing infrastructure for DevStack is about collecting buckets
+# of time that are spend in some subtask. For instance, that might be
+# 'apt', 'pip', 'osc', even database migrations. We do this by a pair
+# of functions: time_start / time_stop.
+#
+# These take a single parameter: $name - which specifies the name of
+# the bucket to be accounted against. time_totals function spits out
+# the results.
+#
+# Resolution is only in whole seconds, so should be used for long
+# running activities.
+
+declare -A TOTAL_TIME
+declare -A START_TIME
+
+# time_start $name
+#
+# starts the clock for a timer by name. Errors if that clock is
+# already started.
+function time_start {
+ local name=$1
+ local start_time=${START_TIME[$name]}
+ if [[ -n "$start_time" ]]; then
+ die $LINENO "Trying to start the clock on $name, but it's already been started"
+ fi
+ START_TIME[$name]=$(date +%s)
+}
+
+# time_stop $name
+#
+# stops the clock for a timer by name, and accumulate that time in the
+# global counter for that name. Errors if that clock had not
+# previously been started.
+function time_stop {
+ local name=$1
+ local start_time=${START_TIME[$name]}
+ if [[ -z "$start_time" ]]; then
+ die $LINENO "Trying to stop the clock on $name, but it was never started"
+ fi
+ local end_time=$(date +%s)
+ local elapsed_time=$(($end_time - $start_time))
+ local total=${TOTAL_TIME[$name]:-0}
+ # reset the clock so we can start it in the future
+ START_TIME[$name]=""
+ TOTAL_TIME[$name]=$(($total + $elapsed_time))
+}
+
+# time_totals
+#
+# prints out total time
+function time_totals {
+ echo
+ echo "========================"
+ echo "DevStack Components Timed"
+ echo "========================"
+ echo
+ for t in ${!TOTAL_TIME[*]}; do
+ local v=${TOTAL_TIME[$t]}
+ echo "$t - $v secs"
+ done
+}
# Restore xtrace
$XTRACE
diff --git a/inc/ini-config b/inc/ini-config
index 58386e2..42a66c6 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -17,7 +17,8 @@
# Append a new option in an ini file without replacing the old value
# iniadd [-sudo] config-file section option value1 value2 value3 ...
function iniadd {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -37,7 +38,8 @@
# Comment an option in an INI file
# inicomment [-sudo] config-file section option
function inicomment {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -55,7 +57,8 @@
# Get an option from an INI file
# iniget config-file section option
function iniget {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -70,7 +73,8 @@
# Get a multiple line option from an INI file
# iniget_multiline config-file section option
function iniget_multiline {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -85,7 +89,8 @@
# Determinate is the given option present in the INI file
# ini_has_option config-file section option
function ini_has_option {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -107,7 +112,8 @@
#
# iniadd_literal [-sudo] config-file section option value
function iniadd_literal {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -135,7 +141,8 @@
# Remove an option from an INI file
# inidelete [-sudo] config-file section option
function inidelete {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -161,7 +168,8 @@
# iniset [-sudo] config-file section option value
# - if the file does not exist, it is created
function iniset {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -188,7 +196,8 @@
$option = $value
" "$file"
else
- local sep=$(echo -ne "\x01")
+ local sep
+ sep=$(echo -ne "\x01")
# Replace it
$sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
@@ -198,7 +207,8 @@
# Set a multiple line option in an INI file
# iniset_multiline [-sudo] config-file section option value1 value2 valu3 ...
function iniset_multiline {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
@@ -236,7 +246,8 @@
# Uncomment an option in an INI file
# iniuncomment config-file section option
function iniuncomment {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
if [ $1 == "-sudo" ]; then
diff --git a/inc/meta-config b/inc/meta-config
index e5f902d..d74db59 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -89,7 +89,8 @@
# note, configfile might be a variable (note the iniset, etc
# created in the mega-awk below is "eval"ed too, so we just leave
# it alone.
- local real_configfile=$(eval echo $configfile)
+ local real_configfile
+ real_configfile=$(eval echo $configfile)
if [ ! -f $real_configfile ]; then
touch $real_configfile
fi
diff --git a/inc/python b/inc/python
index fd0d616..91ceb44 100644
--- a/inc/python
+++ b/inc/python
@@ -38,7 +38,8 @@
# Get the path to the direcotry where python executables are installed.
# get_python_exec_prefix
function get_python_exec_prefix {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
@@ -60,7 +61,8 @@
# pip_install_gr packagename
function pip_install_gr {
local name=$1
- local clean_name=$(get_from_global_requirements $name)
+ local clean_name
+ clean_name=$(get_from_global_requirements $name)
pip_install $clean_name
}
@@ -69,7 +71,8 @@
# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
# pip_install package [package ...]
function pip_install {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local upgrade=""
local offline=${OFFLINE:-False}
@@ -78,6 +81,8 @@
return
fi
+ time_start "pip_install"
+
PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE)
if [[ "$PIP_UPGRADE" = "True" ]] ; then
upgrade="--upgrade"
@@ -98,7 +103,8 @@
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- local cmd_pip=$(get_pip_command)
+ local cmd_pip
+ cmd_pip=$(get_pip_command)
local sudo_pip="sudo -H"
fi
fi
@@ -107,7 +113,8 @@
# Always apply constraints
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
- local pip_version=$(python -c "import pip; \
+ local pip_version
+ pip_version=$(python -c "import pip; \
print(pip.__version__.strip('.')[0])")
if (( pip_version<6 )); then
die $LINENO "Currently installed pip version ${pip_version} does not" \
@@ -135,13 +142,16 @@
$cmd_pip $upgrade \
-r $test_req
fi
+
+ time_stop "pip_install"
}
# get version of a package from global requirements file
# get_from_global_requirements <package>
function get_from_global_requirements {
local package=$1
- local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+ local required_pkg
+ required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
if [[ $required_pkg == "" ]]; then
die $LINENO "Can't find package $package in requirements"
fi
@@ -220,7 +230,8 @@
# practical ways.
function is_in_projects_txt {
local project_dir=$1
- local project_name=$(basename $project_dir)
+ local project_name
+ project_name=$(basename $project_dir)
grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
}
@@ -239,7 +250,8 @@
if [ -n "$REQUIREMENTS_DIR" ]; then
# Constrain this package to this project directory from here on out.
- local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+ local name
+ name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
$REQUIREMENTS_DIR/upper-constraints.txt -- $name \
"$flags file://$project_dir#egg=$name"
diff --git a/inc/rootwrap b/inc/rootwrap
index f91e557..63ab59a 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -41,7 +41,8 @@
# configure_rootwrap project
function configure_rootwrap {
local project=$1
- local project_uc=$(echo $1|tr a-z A-Z)
+ local project_uc
+ project_uc=$(echo $1|tr a-z A-Z)
local bin_dir="${project_uc}_BIN_DIR"
bin_dir="${!bin_dir}"
local project_dir="${project_uc}_DIR"
@@ -60,7 +61,8 @@
sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
# Set up the rootwrap sudoers
- local tempfile=$(mktemp)
+ local tempfile
+ tempfile=$(mktemp)
# Specify rootwrap.conf as first parameter to rootwrap
rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
diff --git a/lib/apache b/lib/apache
index a8e9bc5..17526c7 100644
--- a/lib/apache
+++ b/lib/apache
@@ -72,11 +72,14 @@
# various differences between Apache 2.2 and 2.4 that warrant special handling.
function get_apache_version {
if is_ubuntu; then
- local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
+ local version_str
+ version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
elif is_fedora; then
- local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
+ local version_str
+ version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
elif is_suse; then
- local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
+ local version_str
+ version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
else
exit_distro_not_supported "cannot determine apache version"
fi
@@ -115,7 +118,8 @@
function apache_site_config_for {
local site=$@
if is_ubuntu; then
- local apache_version=$(get_apache_version)
+ local apache_version
+ apache_version=$(get_apache_version)
if [[ "$apache_version" == "2.2" ]]; then
# Ubuntu 12.04 - Apache 2.2
echo $APACHE_CONF_DIR/${site}
diff --git a/lib/ceph b/lib/ceph
index 8e34aa4..29d2aca 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -83,7 +83,8 @@
# ------------
function get_ceph_version {
- local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
+ local ceph_version_str
+ ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
echo $ceph_version_str
}
@@ -106,7 +107,8 @@
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function undefine_virsh_secret {
if is_service_enabled cinder || is_service_enabled nova; then
- local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+ local virsh_uuid
+ virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
fi
}
@@ -219,7 +221,8 @@
done
# pools data and metadata were removed in the Giant release so depending on the version we apply different commands
- local ceph_version=$(get_ceph_version)
+ local ceph_version
+ ceph_version=$(get_ceph_version)
# change pool replica size according to the CEPH_REPLICAS set by the user
if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
diff --git a/lib/cinder b/lib/cinder
index 1014411..2c9c94a 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -93,7 +93,7 @@
if [[ $CINDER_SECURE_DELETE == "False" ]]; then
CINDER_VOLUME_CLEAR_DEFAULT="none"
fi
- DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n"
+ deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE."
fi
CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
@@ -150,7 +150,8 @@
# ensure the volume group is cleared up because fails might
# leave dead volumes in the group
if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
- local targets=$(sudo tgtadm --op show --mode target)
+ local targets
+ targets=$(sudo tgtadm --op show --mode target)
if [ $? -ne 0 ]; then
# If tgt driver isn't running this won't work obviously
# So check the response and restart if need be
@@ -198,7 +199,8 @@
# _cinder_config_apache_wsgi() - Set WSGI config files
function _cinder_config_apache_wsgi {
- local cinder_apache_conf=$(apache_site_config_for osapi-volume)
+ local cinder_apache_conf
+ cinder_apache_conf=$(apache_site_config_for osapi-volume)
local cinder_ssl=""
local cinder_certfile=""
local cinder_keyfile=""
diff --git a/lib/glance b/lib/glance
index 7be3a84..2eb93a4 100644
--- a/lib/glance
+++ b/lib/glance
@@ -106,7 +106,8 @@
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
- local dburl=`database_connection_url glance`
+ local dburl
+ dburl=`database_connection_url glance`
iniset $GLANCE_REGISTRY_CONF database connection $dburl
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
@@ -265,7 +266,8 @@
# required for swift access
if is_service_enabled s-proxy; then
- local glance_swift_user=$(get_or_create_user "glance-swift" \
+ local glance_swift_user
+ glance_swift_user=$(get_or_create_user "glance-swift" \
"$SERVICE_PASSWORD" "default" "glance-swift@example.com")
get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
fi
diff --git a/lib/heat b/lib/heat
index 3e6975a..615198c 100644
--- a/lib/heat
+++ b/lib/heat
@@ -59,10 +59,10 @@
# other default options
if [[ "$HEAT_STANDALONE" = "True" ]]; then
# for standalone, use defaults which require no service user
- HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN`
+ HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN)
HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password}
else
- HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+ HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
fi
@@ -321,7 +321,8 @@
echo "</body></html>" >> $HEAT_PIP_REPO/index.html
- local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+ local heat_pip_repo_apache_conf
+ heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
sudo sed -e "
diff --git a/lib/horizon b/lib/horizon
index b2539d1..6ecd755 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -49,7 +49,8 @@
sed -e "/^$option/d" -i $local_settings
echo -e "\n$option=$value" >> $file
elif grep -q "^$section" $file; then
- local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+ local line
+ line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
if [ -n "$line" ]; then
sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
else
@@ -68,7 +69,8 @@
# cleanup_horizon() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_horizon {
- local horizon_conf=$(apache_site_config_for horizon)
+ local horizon_conf
+ horizon_conf=$(apache_site_config_for horizon)
sudo rm -f $horizon_conf
}
@@ -112,7 +114,8 @@
# Create an empty directory that apache uses as docroot
sudo mkdir -p $HORIZON_DIR/.blackhole
- local horizon_conf=$(apache_site_config_for horizon)
+ local horizon_conf
+ horizon_conf=$(apache_site_config_for horizon)
# Configure apache to run horizon
sudo sh -c "sed -e \"
diff --git a/lib/ironic b/lib/ironic
index 40475e0..74e2f93 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -225,7 +225,8 @@
# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
function _config_ironic_apache_wsgi {
- local ironic_apache_conf=$(apache_site_config_for ironic)
+ local ironic_apache_conf
+ ironic_apache_conf=$(apache_site_config_for ironic)
sudo cp $FILES/apache-ironic.template $ironic_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
@@ -325,11 +326,13 @@
function configure_ironic_conductor {
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
- local ironic_rootwrap=$(get_rootwrap_location ironic)
+ local ironic_rootwrap
+ ironic_rootwrap=$(get_rootwrap_location ironic)
local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for ironic
- local tempfile=`mktemp`
+ local tempfile
+ tempfile=`mktemp`
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
chmod 0440 $tempfile
sudo chown root:root $tempfile
@@ -370,7 +373,8 @@
fi
iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
iniset $IRONIC_CONF_FILE glance swift_api_version v1
- local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
+ local tenant_id
+ tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
iniset $IRONIC_CONF_FILE glance swift_container glance
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
@@ -379,7 +383,8 @@
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
- local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
+ local pxebin
+ pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
@@ -445,7 +450,8 @@
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
function _ironic_bm_vm_names {
local idx
- local num_vms=$(($IRONIC_VM_COUNT - 1))
+ local num_vms
+ num_vms=$(($IRONIC_VM_COUNT - 1))
for idx in $(seq 0 $num_vms); do
echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}"
done
@@ -498,22 +504,27 @@
}
function create_ovs_taps {
- local ironic_net_id=$(neutron net-list | grep private | get_field 1)
+ local ironic_net_id
+ ironic_net_id=$(neutron net-list | grep private | get_field 1)
# Work around: No netns exists on host until a Neutron port is created. We
# need to create one in Neutron to know what netns to tap into prior to the
# first node booting.
- local port_id=$(neutron port-create private | grep " id " | get_field 2)
+ local port_id
+ port_id=$(neutron port-create private | grep " id " | get_field 2)
# intentional sleep to make sure the tag has been set to port
sleep 10
if [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
+ local tapdev
+ tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
else
- local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
+ local tapdev
+ tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
fi
- local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+ local tag_id
+ tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
# make sure veth pair is not existing, otherwise delete its links
sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
@@ -559,6 +570,7 @@
# timing out.
local resource=$1
local expected_count=$2
+ local i
echo_summary "Waiting 2 minutes for Nova resource tracker to pick up $resource >= $expected_count"
for i in $(seq 1 120); do
if [ $(nova hypervisor-stats | grep " $resource " | get_field 2) -ge $expected_count ]; then
@@ -570,7 +582,8 @@
}
function enroll_nodes {
- local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
+ local chassis_id
+ chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
if ! is_ironic_hardware; then
local ironic_node_cpu=$IRONIC_VM_SPECS_CPU
@@ -602,10 +615,14 @@
if ! is_ironic_hardware; then
local mac_address=$hardware_info
elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
- local ipmi_address=$(echo $hardware_info |awk '{print $1}')
- local mac_address=$(echo $hardware_info |awk '{print $2}')
- local ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
- local ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
+ local ipmi_address
+ ipmi_address=$(echo $hardware_info |awk '{print $1}')
+ local mac_address
+ mac_address=$(echo $hardware_info |awk '{print $2}')
+ local ironic_ipmi_username
+ ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
+ local ironic_ipmi_passwd
+ ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
# Currently we require all hardware platform have same CPU/RAM/DISK info
# in future, this can be enhanced to support different type, and then
# we create the bare metal flavor with minimum value
@@ -617,9 +634,13 @@
# First node created will be used for testing in ironic w/o glance
# scenario, so we need to know its UUID.
- local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
+ local standalone_node_uuid=""
+ if [ $total_nodes -eq 0 ]; then
+ standalone_node_uuid="--uuid $IRONIC_NODE_UUID"
+ fi
- local node_id=$(ironic node-create $standalone_node_uuid\
+ local node_id
+ node_id=$(ironic node-create $standalone_node_uuid\
--chassis_uuid $chassis_id \
--driver $IRONIC_DEPLOY_DRIVER \
--name node-$total_nodes \
@@ -640,7 +661,8 @@
# NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
# bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the
# bug is fixed in Juno.
- local adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
+ local adjusted_disk
+ adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu
nova flavor-key baremetal set "cpu_arch"="x86_64"
@@ -771,7 +793,8 @@
fi
fi
- local token=$(openstack token issue -c id -f value)
+ local token
+ token=$(openstack token issue -c id -f value)
die_if_not_set $LINENO token "Keystone fail to get token"
# load them into glance
@@ -809,7 +832,8 @@
function cleanup_baremetal_basic_ops {
rm -f $IRONIC_VM_MACS_CSV_FILE
if [ -f $IRONIC_KEY_FILE ]; then
- local key=$(cat $IRONIC_KEY_FILE.pub)
+ local key
+ key=$(cat $IRONIC_KEY_FILE.pub)
# remove public key from authorized_keys
grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
diff --git a/lib/keystone b/lib/keystone
index ec28b46..cdcc13a 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -132,7 +132,8 @@
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
function _config_keystone_apache_wsgi {
- local keystone_apache_conf=$(apache_site_config_for keystone)
+ local keystone_apache_conf
+ keystone_apache_conf=$(apache_site_config_for keystone)
local keystone_ssl=""
local keystone_certfile=""
local keystone_keyfile=""
@@ -347,9 +348,12 @@
function create_keystone_accounts {
# admin
- local admin_tenant=$(get_or_create_project "admin" default)
- local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
- local admin_role=$(get_or_create_role "admin")
+ local admin_tenant
+ admin_tenant=$(get_or_create_project "admin" default)
+ local admin_user
+ admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
+ local admin_role
+ admin_role=$(get_or_create_role "admin")
get_or_add_user_project_role $admin_role $admin_user $admin_tenant
# Create service project/role
@@ -365,18 +369,23 @@
get_or_create_role ResellerAdmin
# The Member role is used by Horizon and Swift so we need to keep it:
- local member_role=$(get_or_create_role "Member")
+ local member_role
+ member_role=$(get_or_create_role "Member")
# another_role demonstrates that an arbitrary role may be created and used
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
- local another_role=$(get_or_create_role "anotherrole")
+ local another_role
+ another_role=$(get_or_create_role "anotherrole")
# invisible tenant - admin can't see this one
- local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
+ local invis_tenant
+ invis_tenant=$(get_or_create_project "invisible_to_admin" default)
# demo
- local demo_tenant=$(get_or_create_project "demo" default)
- local demo_user=$(get_or_create_user "demo" \
+ local demo_tenant
+ demo_tenant=$(get_or_create_project "demo" default)
+ local demo_user
+ demo_user=$(get_or_create_user "demo" \
"$ADMIN_PASSWORD" "default" "demo@example.com")
get_or_add_user_project_role $member_role $demo_user $demo_tenant
@@ -384,9 +393,11 @@
get_or_add_user_project_role $another_role $demo_user $demo_tenant
get_or_add_user_project_role $member_role $demo_user $invis_tenant
- local admin_group=$(get_or_create_group "admins" \
+ local admin_group
+ admin_group=$(get_or_create_group "admins" \
"default" "openstack admin group")
- local non_admin_group=$(get_or_create_group "nonadmins" \
+ local non_admin_group
+ non_admin_group=$(get_or_create_group "nonadmins" \
"default" "non-admin group")
get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
@@ -415,7 +426,8 @@
function create_service_user {
local role=${2:-service}
- local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
+ local user
+ user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
}
diff --git a/lib/ldap b/lib/ldap
index d2dbc3b..0414fea 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -82,7 +82,8 @@
function init_ldap {
local keystone_ldif
- local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+ local tmp_ldap_dir
+ tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
# Remove data but not schemas
clear_ldap_state
@@ -113,7 +114,8 @@
echo "Installing LDAP inside function"
echo "os_VENDOR is $os_VENDOR"
- local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+ local tmp_ldap_dir
+ tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
printf "installing OpenLDAP"
if is_ubuntu; then
@@ -129,7 +131,8 @@
fi
echo "LDAP_PASSWORD is $LDAP_PASSWORD"
- local slappass=$(slappasswd -s $LDAP_PASSWORD)
+ local slappass
+ slappass=$(slappasswd -s $LDAP_PASSWORD)
printf "LDAP secret is $slappass\n"
# Create manager.ldif and add to olcdb
diff --git a/lib/lvm b/lib/lvm
index 8afd543..468a99a 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -56,7 +56,8 @@
# If the backing physical device is a loop device, it was probably setup by DevStack
if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
+ local vg_dev
+ vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
sudo losetup -d $vg_dev
rm -f $backing_file
fi
@@ -89,7 +90,8 @@
if ! sudo vgs $vg; then
# Only create if the file doesn't already exists
[[ -f $backing_file ]] || truncate -s $size $backing_file
- local vg_dev=`sudo losetup -f --show $backing_file`
+ local vg_dev
+ vg_dev=`sudo losetup -f --show $backing_file`
# Only create volume group if it doesn't already exist
if ! sudo vgs $vg; then
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index e67bd4a..4e51425 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -468,19 +468,13 @@
function create_nova_conf_neutron {
iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API"
-
- if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
- iniset $NOVA_CONF neutron auth_plugin "v3password"
- iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
- iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
- iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
- iniset $NOVA_CONF neutron user_domain_name "default"
- else
- iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME"
- iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD"
- iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME"
- fi
+ iniset $NOVA_CONF neutron auth_plugin "v3password"
+ iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+ iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
+ iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
+ iniset $NOVA_CONF neutron user_domain_name "Default"
+ iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME"
+ iniset $NOVA_CONF neutron project_domain_name "Default"
iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $NOVA_CONF neutron region_name "$REGION_NAME"
iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
@@ -806,15 +800,16 @@
local IP_ADD=""
local IP_DEL=""
- local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
+ local DEFAULT_ROUTE_GW
+ DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
local ADD_OVS_PORT=""
if [[ $af == "inet" ]]; then
- IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }')
+ IP_BRD=$(ip -f $af a s dev $from_intf | grep inet | awk '{ print $2, $3, $4; exit }')
fi
if [[ $af == "inet6" ]]; then
- IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }')
+ IP_BRD=$(ip -f $af a s dev $from_intf | grep inet6 | awk '{ print $2, $3, $4; exit }')
fi
if [ "$DEFAULT_ROUTE_GW" != "" ]; then
@@ -838,18 +833,20 @@
# runs that a clean run would need to clean up
function cleanup_neutron {
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
+ if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
- if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
- fi
+ if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
+ fi
- if is_provider_network && is_ironic_hardware; then
- for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
- sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
- sudo ip addr add $IP dev $PUBLIC_INTERFACE
- done
- sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ if is_provider_network && is_ironic_hardware; then
+ for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+ sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+ sudo ip addr add $IP dev $PUBLIC_INTERFACE
+ done
+ sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ fi
fi
if is_neutron_ovs_base_plugin; then
@@ -1165,6 +1162,9 @@
# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
function _neutron_deploy_rootwrap_filters {
+ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+ return
+ fi
local srcdir=$1
sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
@@ -1244,7 +1244,8 @@
subnet_params+="--gateway $NETWORK_GATEWAY "
subnet_params+="--name $PRIVATE_SUBNET_NAME "
subnet_params+="$NET_ID $FIXED_RANGE"
- local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+ local subnet_id
+ subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID"
echo $subnet_id
}
@@ -1259,7 +1260,8 @@
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
- local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+ local ipv6_subnet_id
+ ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID"
echo $ipv6_subnet_id
}
@@ -1272,7 +1274,8 @@
subnet_params+="--name $PUBLIC_SUBNET_NAME "
subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
subnet_params+="-- --enable_dhcp=False"
- local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ local id_and_ext_gw_ip
+ id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
echo $id_and_ext_gw_ip
}
@@ -1284,7 +1287,8 @@
subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
subnet_params+="-- --enable_dhcp=False"
- local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ local ipv6_id_and_ext_gw_ip
+ ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
echo $ipv6_id_and_ext_gw_ip
}
@@ -1293,8 +1297,10 @@
function _neutron_configure_router_v4 {
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create a public subnet on the external network
- local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
- local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
+ local id_and_ext_gw_ip
+ id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
+ local ext_gw_ip
+ ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
# Configure the external network as the default router gateway
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
@@ -1331,9 +1337,12 @@
function _neutron_configure_router_v6 {
neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
# Create a public subnet on the external network
- local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
- local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
- local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
+ local ipv6_id_and_ext_gw_ip
+ ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
+ local ipv6_ext_gw_ip
+ ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
+ local ipv6_pub_subnet_id
+ ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
# If the external network has not already been set as the default router
# gateway when configuring an IPv4 public subnet, do so now
@@ -1351,7 +1360,8 @@
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- local ext_gw_interface=$(_neutron_get_ext_gw_interface)
+ local ext_gw_interface
+ ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
# Configure interface for public bridge
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 6b4819e..2028496 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -10,7 +10,8 @@
source $TOP_DIR/lib/neutron_plugins/openvswitch
function save_function {
- local ORIG_FUNC=$(declare -f $1)
+ local ORIG_FUNC
+ ORIG_FUNC=$(declare -f $1)
local NEW_FUNC="$2${ORIG_FUNC#$1}"
eval "$NEW_FUNC"
}
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
deleted file mode 100644
index 0c570e5..0000000
--- a/lib/neutron_plugins/oneconvergence
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-#
-# Neutron One Convergence plugin
-# ------------------------------
-
-# Save trace setting
-OC_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-Q_L3_ENABLED=true
-Q_L3_ROUTER_PER_TENANT=true
-Q_USE_NAMESPACE=true
-
-function neutron_plugin_install_agent_packages {
- _neutron_ovs_base_install_agent_packages
-}
-# Configure common parameters
-function neutron_plugin_configure_common {
-
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
- Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
- Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
-}
-
-# Configure plugin specific information
-function neutron_plugin_configure_service {
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
-}
-
-function neutron_plugin_configure_debug_command {
- _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
-function has_neutron_plugin_security_group {
- # 1 means False here
- return 0
-}
-
-function setup_integration_bridge {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-}
-
-function neutron_plugin_configure_dhcp_agent {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function neutron_plugin_configure_l3_agent {
- _neutron_ovs_base_configure_l3_agent
- iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function neutron_plugin_configure_plugin_agent {
-
- AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
-
- _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_create_nova_conf {
- if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
- setup_integration_bridge
- fi
-}
-
-# Restore xtrace
-$OC_XTRACE
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index b012683..d3fd198 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -49,8 +49,10 @@
function _neutron_ovs_base_install_ubuntu_dkms {
# install Dynamic Kernel Module Support packages if needed
- local kernel_version=$(uname -r)
- local kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
+ local kernel_version
+ kernel_version=$(uname -r)
+ local kernel_major_minor
+ kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
# From kernel 3.13 on, openvswitch-datapath-dkms is not needed
if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
deleted file mode 100644
index 0d711fe..0000000
--- a/lib/neutron_plugins/plumgrid
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-#
-# PLUMgrid Neutron Plugin
-# Edgar Magana emagana@plumgrid.com
-# ------------------------------------
-
-# Save trace settings
-PG_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function neutron_plugin_create_nova_conf {
- :
-}
-
-function neutron_plugin_setup_interface_driver {
- :
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
- Q_PLUGIN_CONF_FILENAME=plumgrid.ini
- Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
- PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
- PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
- PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
- PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
- PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
- PLUMGRID_DRIVER=${PLUMGRID_DRIVER:-neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib}
-}
-
-function neutron_plugin_configure_service {
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server $PLUMGRID_DIRECTOR_IP
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server_port $PLUMGRID_DIRECTOR_PORT
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector username $PLUMGRID_ADMIN
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector password $PLUMGRID_PASSWORD
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector servertimeout $PLUMGRID_TIMEOUT
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector driver $PLUMGRID_DRIVER
-}
-
-function neutron_plugin_configure_debug_command {
- :
-}
-
-function is_neutron_ovs_base_plugin {
- # False
- return 1
-}
-
-function has_neutron_plugin_security_group {
- # return 0 means enabled
- return 0
-}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-# Restore xtrace
-$PG_XTRACE
diff --git a/lib/nova b/lib/nova
index 9830276..6e6075c 100644
--- a/lib/nova
+++ b/lib/nova
@@ -202,14 +202,16 @@
clean_iptables
# Destroy old instances
- local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+ local instances
+ instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
fi
# Logout and delete iscsi sessions
- local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+ local tgts
+ tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
local target
for target in $tgts; do
sudo iscsiadm --mode node -T $target --logout || true
@@ -245,8 +247,10 @@
function _config_nova_apache_wsgi {
sudo mkdir -p $NOVA_WSGI_DIR
- local nova_apache_conf=$(apache_site_config_for nova-api)
- local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+ local nova_apache_conf
+ nova_apache_conf=$(apache_site_config_for nova-api)
+ local nova_ec2_apache_conf
+ nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
local nova_ssl=""
local nova_certfile=""
local nova_keyfile=""
@@ -784,7 +788,8 @@
export PATH=$NOVA_BIN_DIR:$PATH
# If the site is not enabled then we are in a grenade scenario
- local enabled_site_file=$(apache_site_config_for nova-api)
+ local enabled_site_file
+ enabled_site_file=$(apache_site_config_for nova-api)
if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
enable_apache_site nova-api
enable_apache_site nova-ec2-api
diff --git a/lib/swift b/lib/swift
index 2c4ddfe..27832dc 100644
--- a/lib/swift
+++ b/lib/swift
@@ -205,9 +205,12 @@
# copy apache vhost file and set name and port
local node_number
for node_number in ${SWIFT_REPLICAS_SEQ}; do
- local object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
- local container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
- local account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
+ local object_port
+ object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
+ local container_port
+ container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
+ local account_port
+ account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
sudo sed -e "
@@ -504,7 +507,8 @@
if is_service_enabled keystone; then
iniuncomment ${testfile} func_test auth_version
- local auth_vers=$(iniget ${testfile} func_test auth_version)
+ local auth_vers
+ auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
if [[ $auth_vers == "3" ]]; then
@@ -514,7 +518,8 @@
fi
fi
- local user_group=$(id -g ${STACK_USER})
+ local user_group
+ user_group=$(id -g ${STACK_USER})
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
local swift_log_dir=${SWIFT_DATA_DIR}/logs
@@ -540,7 +545,8 @@
# First do a bit of setup by creating the directories and
# changing the permissions so we can run it as our user.
- local user_group=$(id -g ${STACK_USER})
+ local user_group
+ user_group=$(id -g ${STACK_USER})
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
# Create a loopback disk and format it to XFS.
@@ -607,7 +613,8 @@
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
- local another_role=$(get_or_create_role "anotherrole")
+ local another_role
+ another_role=$(get_or_create_role "anotherrole")
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
# temp urls, which break when uploaded by a non-admin role
@@ -623,33 +630,40 @@
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
fi
- local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
+ local swift_tenant_test1
+ swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
"default" "test@example.com")
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
- local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+ local swift_user_test3
+ swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
"default" "test3@example.com")
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
- local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
+ local swift_tenant_test2
+ swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
- local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+ local swift_user_test2
+ swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
"default" "test2@example.com")
die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
- local swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
+ local swift_domain
+ swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
- local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
+ local swift_tenant_test4
+ swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
- local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
+ local swift_user_test4
+ swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
$swift_domain "test4@example.com")
die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
diff --git a/lib/tempest b/lib/tempest
index 6eeab4e..10dd652 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -119,10 +119,6 @@
pip_install_gr testrepository
fi
- # Used during configuration so make sure we have the correct
- # version installed
- pip_install_gr python-openstackclient
-
local image_lines
local images
local num_images
@@ -331,7 +327,6 @@
if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
fi
- iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true
# Image Features
iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
@@ -363,7 +358,8 @@
# Compute Features
# Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
# NOTE(mtreinish): This must be done after auth settings are added to the tempest config
- local tmp_cfg_file=$(mktemp)
+ local tmp_cfg_file
+ tmp_cfg_file=$(mktemp)
cd $TEMPEST_DIR
tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
@@ -392,6 +388,8 @@
if is_service_enabled n-cell; then
# Cells doesn't support shelving/unshelving
iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+ # Cells doesn't support hot-plugging virtual interfaces.
+ iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
fi
# Network
diff --git a/lib/tls b/lib/tls
index 8ff2027..f4740b8 100644
--- a/lib/tls
+++ b/lib/tls
@@ -346,7 +346,8 @@
# we need to change it.
function fix_system_ca_bundle_path {
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
- local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
+ local capath
+ capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
if is_fedora; then
diff --git a/samples/local.conf b/samples/local.conf
index ce70073..cb293b6 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -28,7 +28,7 @@
# and they will be added to ``local.conf``.
SERVICE_TOKEN=azertytoken
ADMIN_PASSWORD=nomoresecrete
-MYSQL_PASSWORD=stackdb
+DATABASE_PASSWORD=stackdb
RABBIT_PASSWORD=stackqueue
SERVICE_PASSWORD=$ADMIN_PASSWORD
diff --git a/stack.sh b/stack.sh
index 01668c2..8024731 100755
--- a/stack.sh
+++ b/stack.sh
@@ -93,6 +93,15 @@
exit 1
fi
+# Provide a safety switch for devstack. If you do a lot of devstack,
+# on a lot of different environments, you sometimes run it on the
+# wrong box. This makes there be a way to prevent that.
+if [[ -e $HOME/.no-devstack ]]; then
+ echo "You've marked this host as a no-devstack host, to save yourself from"
+ echo "running devstack accidentally. If this is in error, please remove the"
+ echo "~/.no-devstack file"
+ exit 1
+fi
# Prepare the environment
# -----------------------
@@ -278,14 +287,7 @@
# ... and also optional to be enabled
sudo yum-config-manager --enable rhel-7-server-optional-rpms
- RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"}
- RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"}
-
- if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then
- echo "RDO repo not detected; installing"
- yum_install $RHEL_RDO_REPO_RPM || \
- die $LINENO "Error installing RDO repo, cannot continue"
- fi
+ sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm
if is_oraclelinux; then
sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
@@ -306,9 +308,6 @@
safe_chown -R $STACK_USER $DEST
safe_chmod 0755 $DEST
-# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
-check_path_perm_sanity ${DEST}
-
# Destination path for service data
DATA_DIR=${DATA_DIR:-${DEST}/data}
sudo mkdir -p $DATA_DIR
@@ -443,6 +442,8 @@
fi
fi
+# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
+check_path_perm_sanity ${DEST}
# Configure Error Traps
# ---------------------
@@ -545,6 +546,7 @@
# Phase: source
run_phase source
+
# Interactive Configuration
# -------------------------
@@ -560,7 +562,7 @@
if [[ -f $RC_DIR/localrc ]]; then
localrc=$TOP_DIR/localrc
else
- localrc=$TOP_DIR/.localrc.auto
+ localrc=$TOP_DIR/.localrc.password
fi
# If the password is not defined yet, proceed to prompt user for a password.
@@ -570,13 +572,15 @@
touch $localrc
fi
- # Presumably if we got this far it can only be that our localrc is missing
- # the required password. Prompt user for a password and write to localrc.
+ # Presumably if we got this far it can only be that our
+ # localrc is missing the required password. Prompt user for a
+ # password and write to localrc.
+
echo ''
echo '################################################################################'
echo $msg
echo '################################################################################'
- echo "This value will be written to your localrc file so you don't have to enter it "
+ echo "This value will be written to ${localrc} file so you don't have to enter it "
echo "again. Use only alphanumeric characters."
echo "If you leave this blank, a random default value will be used."
pw=" "
@@ -1007,14 +1011,27 @@
# Begone token auth
unset OS_TOKEN OS_URL
- # Set up password auth credentials now that Keystone is bootstrapped
- export OS_AUTH_URL=$KEYSTONE_AUTH_URI
- export OS_USERNAME=admin
- export OS_USER_DOMAIN_ID=default
- export OS_PASSWORD=$ADMIN_PASSWORD
- export OS_PROJECT_NAME=admin
- export OS_PROJECT_DOMAIN_ID=default
- export OS_REGION_NAME=$REGION_NAME
+ # Rather than just export these, we write them out to a
+ # intermediate userrc file that can also be used to debug if
+ # something goes wrong between here and running
+ # tools/create_userrc.sh (this script relies on services other
+ # than keystone being available, so we can't call it right now)
+ cat > $TOP_DIR/userrc_early <<EOF
+# Use this for debugging issues before files in accrc are created
+
+# Set up password auth credentials now that Keystone is bootstrapped
+export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_USERNAME=admin
+export OS_USER_DOMAIN_ID=default
+export OS_PASSWORD=$ADMIN_PASSWORD
+export OS_PROJECT_NAME=admin
+export OS_PROJECT_DOMAIN_ID=default
+export OS_REGION_NAME=$REGION_NAME
+
+EOF
+
+ source $TOP_DIR/userrc_early
+
fi
# Write a clouds.yaml file
@@ -1357,6 +1374,8 @@
exec 1>&3
fi
+# Dump out the time totals
+time_totals
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
index c7c6313..4026ff8 100644
--- a/stackrc
+++ b/stackrc
@@ -103,6 +103,11 @@
# be disabled for automated testing by setting this value to False.
USE_SCREEN=True
+# Passwords generated by interactive devstack runs
+if [[ -r $RC_DIR/.localrc.password ]]; then
+ source $RC_DIR/.localrc.password
+fi
+
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
# Old-style user-supplied config
@@ -442,7 +447,7 @@
GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master}
# s3 support for swift
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
# ceilometer middleware
diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh
index 2689589..03996ce 100755
--- a/tests/test_truefalse.sh
+++ b/tests/test_truefalse.sh
@@ -8,6 +8,14 @@
source $TOP/functions
source $TOP/tests/unittest.sh
+# common mistake is to use $FOO instead of "FOO"; in that case we
+# should die
+bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null
+assert_equal 1 $? "missing test-value"
+
+VAL=$(trueorfalse False MISSING_VARIABLE)
+assert_equal "False" $VAL "blank test-value"
+
function test_trueorfalse {
local one=1
local captrue=True
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 603652a..df7a8b4 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -20,8 +20,10 @@
# pass a test, printing out MSG
# usage: passed message
function passed {
- local lineno=$(caller 0 | awk '{print $1}')
- local function=$(caller 0 | awk '{print $2}')
+ local lineno
+ lineno=$(caller 0 | awk '{print $1}')
+ local function
+ function=$(caller 0 | awk '{print $2}')
local msg="$1"
if [ -z "$msg" ]; then
msg="OK"
@@ -33,8 +35,10 @@
# fail a test, printing out MSG
# usage: failed message
function failed {
- local lineno=$(caller 0 | awk '{print $1}')
- local function=$(caller 0 | awk '{print $2}')
+ local lineno
+ lineno=$(caller 0 | awk '{print $1}')
+ local function
+ function=$(caller 0 | awk '{print $2}')
local msg="$1"
FAILED_FUNCS+="$function:L$lineno\n"
echo "ERROR: $function:L$lineno!"
@@ -45,8 +49,10 @@
# assert string comparision of val1 equal val2, printing out msg
# usage: assert_equal val1 val2 msg
function assert_equal {
- local lineno=`caller 0 | awk '{print $1}'`
- local function=`caller 0 | awk '{print $2}'`
+ local lineno
+ lineno=`caller 0 | awk '{print $1}'`
+ local function
+ function=`caller 0 | awk '{print $2}'`
local msg=$3
if [ -z "$msg" ]; then
@@ -66,8 +72,10 @@
# assert variable is empty/blank, printing out msg
# usage: assert_empty VAR msg
function assert_empty {
- local lineno=`caller 0 | awk '{print $1}'`
- local function=`caller 0 | awk '{print $2}'`
+ local lineno
+ lineno=`caller 0 | awk '{print $1}'`
+ local function
+ function=`caller 0 | awk '{print $2}'`
local msg=$2
if [ -z "$msg" ]; then
diff --git a/tools/build_docs.sh b/tools/build_docs.sh
index fa84343..7dc492e 100755
--- a/tools/build_docs.sh
+++ b/tools/build_docs.sh
@@ -81,7 +81,7 @@
mkdir -p $FQ_HTML_BUILD/`dirname $f`;
$SHOCCO $f > $FQ_HTML_BUILD/$f.html
done
-for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do
+for f in $(find functions functions-common inc lib pkg samples -type f -name \* ! -name *.md ! -name *.conf); do
echo $f
FILES+="$f "
mkdir -p $FQ_HTML_BUILD/`dirname $f`;
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index de44abb..25f713c 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -190,7 +190,8 @@
local user_passwd=$5
# The admin user can see all user's secret AWS keys, it does not looks good
- local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
+ local line
+ line=$(openstack ec2 credentials list --user $user_id | grep " $project_id " || true)
if [ -z "$line" ]; then
openstack ec2 credentials create --user $user_id --project $project_id 1>&2
line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index a601cf2..9ae2ae7 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -108,7 +108,7 @@
sudo setenforce 0
fi
- FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
+ FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD)
if [[ $FORCE_FIREWALLD == "False" ]]; then
# On Fedora 20 firewalld interacts badly with libvirt and
# slows things down significantly (this issue was fixed in
@@ -135,7 +135,7 @@
fi
fi
- if [[ "$os_RELEASE" -ge "21" ]]; then
+ if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "21" ]]; then
# requests ships vendored version of chardet/urllib3, but on
# fedora these are symlinked back to the primary versions to
# avoid duplication of code on disk. This is fine when
@@ -152,9 +152,9 @@
# https://bugs.launchpad.net/glance/+bug/1476770
# https://bugzilla.redhat.com/show_bug.cgi?id=1253823
- base_path=/usr/lib/python2.7/site-packages/requests/packages
+ base_path=$(get_package_path requests)/packages
if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then
- sudo rm -f /usr/lib/python2.7/site-packages/requests/packages/{chardet,urllib3}
+ sudo rm -f $base_path/{chardet,urllib3}
# install requests with the bundled urllib3 to avoid conflicts
pip_install --upgrade --force-reinstall requests
fi
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 7b42c8c..13c1786 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -42,6 +42,15 @@
function install_get_pip {
+ # If get-pip.py isn't python, delete it. This was probably an
+ # outage on the server.
+ if [[ -r $LOCAL_PIP ]]; then
+ if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then
+ echo "WARNING: Corrupt $LOCAL_PIP found removing"
+ rm $LOCAL_PIP
+ fi
+ fi
+
# The OpenStack gate and others put a cached version of get-pip.py
# for this to find, explicitly to avoid download issues.
#
@@ -53,8 +62,15 @@
# since and only download if a new version is out -- but only if
# it seems we downloaded the file originally.
if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
- curl --retry 6 --retry-delay 5 \
- -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+ # only test freshness if LOCAL_PIP is actually there,
+ # otherwise we generate a scary warning.
+ local timecond=""
+ if [[ -r $LOCAL_PIP ]]; then
+ timecond="-z $LOCAL_PIP"
+ fi
+
+ curl -f --retry 6 --retry-delay 5 \
+ $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \
die $LINENO "Download of get-pip.py failed"
touch $LOCAL_PIP.downloaded
fi
diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh
index 0d5728a..ecbd79a 100755
--- a/tools/peakmem_tracker.sh
+++ b/tools/peakmem_tracker.sh
@@ -41,10 +41,12 @@
# snapshot of current usage; i.e. checking the latest entry in the
# file will give the peak-memory usage
function tracker {
- local low_point=$(get_mem_available)
+ local low_point
+ low_point=$(get_mem_available)
while [ 1 ]; do
- local mem_available=$(get_mem_available)
+ local mem_available
+ mem_available=$(get_mem_available)
if [[ $mem_available -lt $low_point ]]; then
low_point=$mem_available
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 6212cc5..a1adf59 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -77,7 +77,7 @@
# NOTE: these need to be specified, otherwise devstack will try
# to prompt for these passwords, blocking the install process.
- MYSQL_PASSWORD=my_super_secret
+ DATABASE_PASSWORD=my_super_secret
SERVICE_TOKEN=my_super_secret
ADMIN_PASSWORD=my_super_secret
SERVICE_PASSWORD=my_super_secret
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 1ebbeaf..66f7ef4 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -100,7 +100,8 @@
{
local v="$1"
echo "Installing VM interface on [$BRIDGE]"
- local out_network_uuid=$(find_network "$BRIDGE")
+ local out_network_uuid
+ out_network_uuid=$(find_network "$BRIDGE")
xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
}
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
index 1ed2494..96dad7e 100755
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ b/tools/xen/scripts/uninstall-os-vpx.sh
@@ -35,9 +35,12 @@
destroy_vdi()
{
local vbd_uuid="$1"
- local type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
- local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
- local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
+ local type
+ type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
+ local dev
+ dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
+ local vdi_uuid
+ vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
xe vdi-destroy uuid=$vdi_uuid
@@ -47,7 +50,8 @@
uninstall()
{
local vm_uuid="$1"
- local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
+ local power_state
+ power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
if [ "$power_state" != "halted" ]; then
xe vm-shutdown vm=$vm_uuid force=true
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 924e773..324e6a1 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -165,7 +165,8 @@
function test_get_local_sr {
setup_xe_response "uuid123"
- local RESULT=$(. mocks && get_local_sr)
+ local RESULT
+ RESULT=$(. mocks && get_local_sr)
[ "$RESULT" == "uuid123" ]
@@ -173,7 +174,8 @@
}
function test_get_local_sr_path {
- local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
+ local RESULT
+ RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
[ "/var/run/sr-mount/uuid1" == "$RESULT" ]
}
diff --git a/tox.ini b/tox.ini
index 788fea9..0df9877 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,7 +8,8 @@
install_command = pip install {opts} {packages}
[testenv:bashate]
-deps = bashate
+deps =
+ {env:BASHATE_INSTALL_PATH:bashate==0.3.1}
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs
@@ -24,7 +25,7 @@
-wholename \*/inc/\* -or \ # /inc files and
-wholename \*/lib/\* \ # /lib files are shell, but
\) \ # have no extension
- -print0 | xargs -0 bashate -v"
+ -print0 | xargs -0 bashate -v -iE006"
[testenv:docs]
deps =