Merge "Refactor error logging"
diff --git a/.gitignore b/.gitignore
index f9e2644..798b081 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,4 @@
*.pem
accrc
.stackenv
+.prereqs
diff --git a/README.md b/README.md
index 483d1b0..a738554 100644
--- a/README.md
+++ b/README.md
@@ -85,19 +85,21 @@
# Swift
-Swift is not installed by default, you can enable easily by adding this to your `localrc`:
+Swift is enabled by default configured with only one replica to avoid being IO/memory intensive on a small vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background.
- enable_service swift
+If you would like to disable Swift you can add this to your `localrc` :
+
+ disable_service s-proxy s-object s-container s-account
If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`:
disable_all_services
- enable_service key mysql swift
+ enable_service key mysql s-proxy s-object s-container s-account
-If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against.
+If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable `SWIFT_REPLICAS` in your `localrc` (usually to 3).
+
+# Swift S3
If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
-
-By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`.
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 894da74..1e92500 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -136,7 +136,7 @@
# Swift client
# ------------
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
STATUS_SWIFT="Skipped"
else
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index c84e84e..dd8e56e 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -144,7 +144,8 @@
# Swift client
# ------------
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
STATUS_SWIFT="Skipped"
else
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 46ac2c5..c4ec3e9 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -35,7 +35,7 @@
# If swift is not enabled we exit with exitcode 55 which mean
# exercise is skipped.
-is_service_enabled swift || exit 55
+is_service_enabled s-proxy || exit 55
# Container name
CONTAINER=ex-swift
diff --git a/files/apts/nova b/files/apts/nova
index 39b4060..f4615c4 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -8,7 +8,6 @@
python-mysqldb
python-xattr # needed for glance which is needed for nova --- this shouldn't be here
python-lxml # needed for glance which is needed for nova --- this shouldn't be here
-kvm
gawk
iptables
ebtables
@@ -19,6 +18,7 @@
libjs-jquery-tablesorter # Needed for coverage html reports
vlan
curl
+genisoimage # required for config_drive
rabbitmq-server # NOPRIME
qpidd # dist:precise NOPRIME
socat # used by ajaxterm
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 4c76c9b..2fc8915 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -52,7 +52,7 @@
# Services
# --------
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
# Nova needs ResellerAdmin role to download images when accessing
# swift through the s3 api.
@@ -123,7 +123,8 @@
fi
# Swift
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
SWIFT_USER=$(get_id keystone user-create \
--name=swift \
--pass="$SERVICE_PASSWORD" \
@@ -190,7 +191,7 @@
fi
# S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
S3_SERVICE=$(get_id keystone service-create \
--name=s3 \
diff --git a/files/rpms/nova b/files/rpms/nova
index 568ee7f..7ff926b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,6 +3,7 @@
dnsmasq-utils # for dhcp_release
ebtables
gawk
+genisoimage # required for config_drive
iptables
iputils
kpartx
diff --git a/functions b/functions
index 459aedd..9eecfc5 100644
--- a/functions
+++ b/functions
@@ -123,20 +123,32 @@
}
+function _get_package_dir() {
+ local pkg_dir
+ if is_ubuntu; then
+ pkg_dir=$FILES/apts
+ elif is_fedora; then
+ pkg_dir=$FILES/rpms
+ elif is_suse; then
+ pkg_dir=$FILES/rpms-suse
+ else
+ exit_distro_not_supported "list of packages"
+ fi
+ echo "$pkg_dir"
+}
+
# get_packages() collects a list of package names of any type from the
# prerequisite files in ``files/{apts|rpms}``. The list is intended
# to be passed to a package installer such as apt or yum.
#
-# Only packages required for the services in ``ENABLED_SERVICES`` will be
+# Only packages required for the services in 1st argument will be
# included. Two bits of metadata are recognized in the prerequisite files:
# - ``# NOPRIME`` defers installation to be performed later in stack.sh
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
-#
-# Uses globals ``ENABLED_SERVICES``
-# get_packages dir
function get_packages() {
- local package_dir=$1
+ local services=$1
+ local package_dir=$(_get_package_dir)
local file_to_parse
local service
@@ -147,7 +159,7 @@
if [[ -z "$DISTRO" ]]; then
GetDistro
fi
- for service in general ${ENABLED_SERVICES//,/ }; do
+ for service in general ${services//,/ }; do
# Allow individual services to specify dependencies
if [[ -e ${package_dir}/${service} ]]; then
file_to_parse="${file_to_parse} $service"
@@ -554,6 +566,9 @@
# **ceilometer** returns true if any service enabled start with **ceilometer**
# **glance** returns true if any service enabled start with **g-**
# **quantum** returns true if any service enabled start with **q-**
+# **swift** returns true if any service enabled start with **s-**
+# For backward compatibility if we have **swift** in ENABLED_SERVICES all the
+# **s-** services will be enabled. This will be deprecated in the future.
#
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
@@ -566,6 +581,8 @@
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
[[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+ [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
+ [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
done
return 1
}
@@ -1019,6 +1036,7 @@
# No backends registered means this is likely called from ``localrc``
# This is now deprecated usage
DATABASE_TYPE=$1
+ DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
else
# This should no longer get called...here for posterity
use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
diff --git a/lib/ceilometer b/lib/ceilometer
index e890ff9..8772867 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -64,13 +64,7 @@
[ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR
sudo chown $USER $CEILOMETER_API_LOG_DIR
- if is_service_enabled rabbit ; then
- iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
- iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST
- iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
- elif is_service_enabled qpid ; then
- iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_qpid'
- fi
+ iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
iniset $CEILOMETER_CONF DEFAULT verbose True
diff --git a/lib/cinder b/lib/cinder
index c8291a2..f487c8e 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -50,8 +50,13 @@
CINDER_BIN_DIR=$(get_python_exec_prefix)
fi
-# Name of the lvm volume group to use/create for iscsi volumes
+# Support for multi lvm backend configuration (default is no support)
+CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
+
+# Name of the lvm volume groups to use/create for iscsi volumes
+# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
# _clean_volume_group removes all cinder volumes from the specified volume group
@@ -106,6 +111,9 @@
# Campsite rule: leave behind a volume group at least as clean as we found it
_clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+ if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+ _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
+ fi
}
# configure_cinder() - Set config files, create data dirs, etc
@@ -164,8 +172,18 @@
cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
iniset $CINDER_CONF DEFAULT auth_strategy keystone
iniset $CINDER_CONF DEFAULT verbose True
- iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
- iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+ if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+ iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2
+ iniset $CINDER_CONF lvmdriver-1 volume_group $VOLUME_GROUP
+ iniset $CINDER_CONF lvmdriver-1 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
+ iniset $CINDER_CONF lvmdriver-1 volume_backend_name LVM_iSCSI
+ iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2
+ iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
+ iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI
+ else
+ iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
+ iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+ fi
iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
local dburl
database_connection_url dburl cinder
@@ -263,12 +281,14 @@
}
create_cinder_volume_group() {
- # Configure a default volume group called '`stack-volumes`' for the volume
- # service if it does not yet exist. If you don't wish to use a file backed
- # volume group, create your own volume group called ``stack-volumes`` before
- # invoking ``stack.sh``.
+ # According to the CINDER_MULTI_LVM_BACKEND value, configure one or two default volumes
+ # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume
+ # service if it (they) does (do) not yet exist. If you don't wish to use a
+ # file backed volume group, create your own volume group called ``stack-volumes``
+ # and ``stack-volumes2`` before invoking ``stack.sh``.
#
- # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
+ # By default, the two backing files are 5G in size, and are stored in
+ # ``/opt/stack/data``.
if ! sudo vgs $VOLUME_GROUP; then
VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
@@ -283,6 +303,23 @@
sudo vgcreate $VOLUME_GROUP $DEV
fi
fi
+ if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+ #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled
+
+ if ! sudo vgs $VOLUME_GROUP2; then
+ VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file}
+
+ # Only create if the file doesn't already exists
+ [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2
+
+ DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2`
+
+ # Only create if the loopback device doesn't contain $VOLUME_GROUP
+ if ! sudo vgs $VOLUME_GROUP2; then
+ sudo vgcreate $VOLUME_GROUP2 $DEV
+ fi
+ fi
+ fi
mkdir -p $CINDER_STATE_PATH/volumes
}
@@ -314,6 +351,9 @@
sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
# Start with a clean volume group
_clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+ if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+ _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
+ fi
fi
fi
diff --git a/lib/database b/lib/database
index 4fba7c2..ebab333 100644
--- a/lib/database
+++ b/lib/database
@@ -29,20 +29,6 @@
# Sourcing the database libs sets DATABASE_BACKENDS with the available list
for f in $TOP_DIR/lib/databases/*; do source $f; done
-# If ``DATABASE_TYPE`` is defined here it's because the user has it in ``localrc``
-# or has called ``use_database``. Both are deprecated so let's fix it up for now.
-if [[ -n $DATABASE_TYPE ]]; then
- # This is now deprecated usage, set up a warning and try to be
- # somewhat backward compatible for now.
- DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; DATABASE_TYPE or use_database is deprecated localrc\n"
- if [[ ! $ENABLED_SERVICES =~ $DATABASE_TYPE ]]; then
- # It's not in enabled services but user has attempted to select a
- # database, so just add it now
- ENABLED_SERVICES+=,$DATABASE_TYPE
- unset DATABASE_TYPE
- fi
-fi
-
# ``DATABASE_BACKENDS`` now contains a list of the supported databases
# Look in ``ENABLED_SERVICES`` to see if one has been selected
for db in $DATABASE_BACKENDS; do
diff --git a/lib/glance b/lib/glance
index cbe47fc..a6b698f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -114,9 +114,8 @@
iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid
elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
- iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
- iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
fi
+ iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
diff --git a/lib/keystone b/lib/keystone
index 2580351..eea2c4d 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -146,7 +146,7 @@
cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
# Add swift endpoints to service catalog if swift is enabled
- if is_service_enabled swift; then
+ if is_service_enabled s-proxy; then
echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
diff --git a/lib/quantum b/lib/quantum
index 9068f62..3e41d8d 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -440,7 +440,6 @@
iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
iniset $Q_DHCP_CONF_FILE DEFAULT debug True
iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
_quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
@@ -462,7 +461,6 @@
iniset $Q_L3_CONF_FILE DEFAULT verbose True
iniset $Q_L3_CONF_FILE DEFAULT debug True
iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
_quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
@@ -479,7 +477,6 @@
iniset $Q_META_CONF_FILE DEFAULT verbose True
iniset $Q_META_CONF_FILE DEFAULT debug True
- iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
@@ -517,6 +514,7 @@
iniset $QUANTUM_CONF DEFAULT verbose True
iniset $QUANTUM_CONF DEFAULT debug True
+ iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 6d5d4e0..0756de4 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -30,11 +30,12 @@
}
function quantum_plugin_configure_dhcp_agent() {
- :
+ iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
}
function quantum_plugin_configure_l3_agent() {
iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
+ iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
}
function quantum_plugin_configure_plugin_agent() {
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 288fa69..a57336e 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -36,6 +36,7 @@
function quantum_plugin_configure_l3_agent() {
_quantum_ovs_base_configure_l3_agent
+ iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
}
function quantum_plugin_configure_plugin_agent() {
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index 8563674..915129e 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -24,10 +24,13 @@
if is_ubuntu; then
kernel_version=`cat /proc/version | cut -d " " -f3`
install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
- else
- ### FIXME(dtroyer): Find RPMs for OpenVSwitch
+ elif is_fedora; then
+ install_package openvswitch
+ # Ensure that the service is started
+ restart_service openvswitch
+ elif is_suse; then
+ ### FIXME: Find RPMs for OpenVSwitch
echo "OpenVSwitch packages need to be located"
- # Fedora does not started OVS by default
restart_service openvswitch
fi
}
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index 2dfd4f7..d1d7382 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -17,7 +17,9 @@
_quantum_ovs_base_install_agent_packages
# quantum_ryu_agent requires ryu module
+ install_package $(get_packages "ryu")
install_ryu
+ configure_ryu
}
function quantum_plugin_configure_common() {
diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu
index 7a01923..f1e9e7c 100644
--- a/lib/quantum_thirdparty/ryu
+++ b/lib/quantum_thirdparty/ryu
@@ -17,24 +17,15 @@
RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
# Ryu Applications
RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-# Ryu configuration
-RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"
---app_lists=$RYU_APPS
---wsapi_host=$RYU_API_HOST
---wsapi_port=$RYU_API_PORT
---ofp_listen_host=$RYU_OFP_HOST
---ofp_tcp_listen_port=$RYU_OFP_PORT
---quantum_url=http://$Q_HOST:$Q_PORT
---quantum_admin_username=$Q_ADMIN_USERNAME
---quantum_admin_password=$SERVICE_PASSWORD
---quantum_admin_tenant_name=$SERVICE_TENANT_NAME
---quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
---quantum_auth_strategy=$Q_AUTH_STRATEGY
---quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
-"}
+# configure_ryu can be called multiple times as quantum_pluing/ryu may call
+# this function for quantum-ryu-agent
+_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
function configure_ryu() {
- setup_develop $RYU_DIR
+ if [[ "$_RYU_CONFIGURED" == "False" ]]; then
+ setup_develop $RYU_DIR
+ _RYU_CONFIGURED=True
+ fi
}
function init_ryu() {
@@ -46,6 +37,21 @@
RYU_CONF=$RYU_CONF_DIR/ryu.conf
sudo rm -rf $RYU_CONF
+ # Ryu configuration
+ RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"[DEFAULT]
+app_lists=$RYU_APPS
+wsapi_host=$RYU_API_HOST
+wsapi_port=$RYU_API_PORT
+ofp_listen_host=$RYU_OFP_HOST
+ofp_tcp_listen_port=$RYU_OFP_PORT
+quantum_url=http://$Q_HOST:$Q_PORT
+quantum_admin_username=$Q_ADMIN_USERNAME
+quantum_admin_password=$SERVICE_PASSWORD
+quantum_admin_tenant_name=$SERVICE_TENANT_NAME
+quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
+quantum_auth_strategy=$Q_AUTH_STRATEGY
+quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
+"}
echo "${RYU_CONF_CONTENTS}" > $RYU_CONF
}
@@ -62,7 +68,7 @@
}
function start_ryu() {
- screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+ screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
}
function stop_ryu() {
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 7ea71ee..02614ea 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -57,6 +57,8 @@
install_package qpid-cpp-server-daemon
elif is_ubuntu; then
install_package qpidd
+ sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf
+ sudo chmod o+r /etc/qpid/qpidd.sasldb
else
exit_distro_not_supported "qpid installation"
fi
@@ -99,6 +101,11 @@
iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq
elif is_service_enabled qpid; then
iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
+ if is_ubuntu; then
+ QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1`
+ iniset $file $section qpid_password $QPID_PASSWORD
+ iniset $file $section qpid_username admin
+ fi
elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
iniset $file $section rabbit_host $RABBIT_HOST
diff --git a/lib/swift b/lib/swift
index 5ba7e56..326c6f3 100644
--- a/lib/swift
+++ b/lib/swift
@@ -56,10 +56,11 @@
SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
-# configured for your Swift cluster. By default the three replicas would need a
-# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
-# only some quick testing.
-SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
+# configured for your Swift cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test swift replication you may want to set this
+# up to 3.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
@@ -359,13 +360,26 @@
sudo systemctl start xinetd.service
fi
- # First spawn all the swift services then kill the
- # proxy service so we can run it in foreground in screen.
- # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
- # ignore it just in case
+ # By default with only one replica we are launching the proxy,
+ # container, account and object server in screen in foreground and
+ # other services in background. If we have SWIFT_REPLICAS set to something
+ # greater than one we first spawn all the swift services then kill the proxy
+ # service so we can run it in foreground in screen. ``swift-init ...
+ # {stop|restart}`` exits with '1' if no servers are running, ignore it just
+ # in case
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
- swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
- screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
+ if [[ ${SWIFT_REPLICAS} == 1 ]];then
+ todo="object container account"
+ fi
+ for type in proxy ${todo};do
+ swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
+ done
+ screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
+ if [[ ${SWIFT_REPLICAS} == 1 ]];then
+ for type in object container account;do
+ screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONFIG_DIR}/${type}-server/1.conf -v"
+ done
+ fi
}
# stop_swift() - Stop running processes (non-screen)
diff --git a/lib/tempest b/lib/tempest
index d17b32d..9cc19ae 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -238,6 +238,9 @@
iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED
# network
+ if is_service_enabled quantum; then
+ iniset $TEMPEST_CONF network quantum_available "True"
+ fi
iniset $TEMPEST_CONF network api_version 2.0
iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable"
iniset $TEMPEST_CONF network public_network_id "$public_network_id"
diff --git a/stack.sh b/stack.sh
index a1af00b..d9fbb94 100755
--- a/stack.sh
+++ b/stack.sh
@@ -427,7 +427,7 @@
read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
fi
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
# If we are using swift3, we can default the s3 port to swift instead
# of nova-objectstore
if is_service_enabled swift3;then
@@ -664,12 +664,12 @@
git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
# glance, swift middleware and nova api needs keystone middleware
-if is_service_enabled key g-api n-api swift; then
+if is_service_enabled key g-api n-api s-proxy; then
# unified auth system (manages accounts/tokens)
install_keystone
fi
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
install_swiftclient
install_swift
if is_service_enabled swift3; then
@@ -726,10 +726,10 @@
configure_keystoneclient
configure_novaclient
setup_develop $OPENSTACKCLIENT_DIR
-if is_service_enabled key g-api n-api swift; then
+if is_service_enabled key g-api n-api s-proxy; then
configure_keystone
fi
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
configure_swift
configure_swiftclient
if is_service_enabled swift3; then
@@ -913,7 +913,7 @@
init_glance
# Store the images in swift if enabled.
- if is_service_enabled swift; then
+ if is_service_enabled s-proxy; then
iniset $GLANCE_API_CONF DEFAULT default_store swift
iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
@@ -972,7 +972,7 @@
# Storage Service
# ---------------
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
echo_summary "Configuring Swift"
init_swift
fi
@@ -1064,7 +1064,7 @@
elif [ "$VIRT_DRIVER" = 'openvz' ]; then
echo_summary "Using OpenVZ virtualization driver"
- iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver"
+ iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
iniset $NOVA_CONF DEFAULT connection_type "openvz"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
@@ -1119,7 +1119,7 @@
# Only run the services specified in ``ENABLED_SERVICES``
# Launch Swift Services
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
echo_summary "Starting Swift"
start_swift
fi
diff --git a/stackrc b/stackrc
index 008bc9c..f2c279f 100644
--- a/stackrc
+++ b/stackrc
@@ -21,7 +21,7 @@
# ``disable_service`` functions in ``localrc``.
# For example, to enable Swift add this to ``localrc``:
# enable_service swift
-ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,s-proxy,s-account,s-object,s-container,horizon,rabbit,tempest,mysql
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
diff --git a/tools/info.sh b/tools/info.sh
index ef1f338..14ab8f6 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -88,17 +88,7 @@
# - We are going to check packages only for the services needed.
# - We are parsing the packages files and detecting metadatas.
-if is_ubuntu; then
- PKG_DIR=$FILES/apts
-elif is_fedora; then
- PKG_DIR=$FILES/rpms
-elif is_suse; then
- PKG_DIR=$FILES/rpms-suse
-else
- exit_distro_not_supported "list of packages"
-fi
-
-for p in $(get_packages $PKG_DIR); do
+for p in $(get_packages $ENABLED_SERVICES); do
if [[ "$os_PACKAGE" = "deb" ]]; then
ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2)
elif [[ "$os_PACKAGE" = "rpm" ]]; then
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 4d151db..7c4386f 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -54,15 +54,7 @@
# ================
# Install package requirements
-if is_ubuntu; then
- install_package $(get_packages $FILES/apts)
-elif is_fedora; then
- install_package $(get_packages $FILES/rpms)
-elif is_suse; then
- install_package $(get_packages $FILES/rpms-suse)
-else
- exit_distro_not_supported "list of packages"
-fi
+install_package $(get_packages $ENABLED_SERVICES)
if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
if is_ubuntu || is_fedora; then
diff --git a/unstack.sh b/unstack.sh
index a086d5c..3ac2985 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,7 +63,7 @@
fi
# Swift runs daemons
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
stop_swift
cleanup_swift
fi