Merge "Replace screen_it() with run_process() throughout"
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index 1e915c7..d754c08 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -50,6 +50,18 @@
 
 * Kyle Mestery <kmestery@cisco.com>
 
+OpenFlow Agent (ofagent)
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
+* Fumihiko Kakuma <kakuma@valinux.co.jp>
+
+Ryu
+~~~
+
+* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
+* Fumihiko Kakuma <kakuma@valinux.co.jp>
+
 Sahara
 ~~~~~~
 
diff --git a/lib/ceph b/lib/ceph
index 32a4760..8464042 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -36,7 +36,7 @@
 # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
 # kilobytes.
 # Default is 1 gigabyte.
-CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G
+CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
 CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
 
 # Common
diff --git a/lib/cinder b/lib/cinder
index 6032a9a..5c487a2 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -307,6 +307,9 @@
     iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
 
+    if [ -n "$API_WORKERS" ]; then
+        iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS"
+    fi
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
diff --git a/lib/glance b/lib/glance
index 1ed0d31..d6d12ca 100644
--- a/lib/glance
+++ b/lib/glance
@@ -28,6 +28,7 @@
 
 # Set up default directories
 GLANCE_DIR=$DEST/glance
+GLANCE_STORE_DIR=$DEST/glance_store
 GLANCECLIENT_DIR=$DEST/python-glanceclient
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
 GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
@@ -137,6 +138,10 @@
     # sections.
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
 
+    if [ -n "$API_WORKERS" ]; then
+        iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+    fi
+
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
         iniset $GLANCE_API_CONF DEFAULT default_store swift
@@ -263,6 +268,11 @@
 
 # install_glance() - Collect source and prepare
 function install_glance {
+    # Install glance_store from git so we make sure we're testing
+    # the latest code.
+    git_clone $GLANCE_STORE_REPO $GLANCE_STORE_DIR $GLANCE_STORE_BRANCH
+    setup_develop $GLANCE_STORE_DIR
+
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
     setup_develop $GLANCE_DIR
 }
diff --git a/lib/nova b/lib/nova
index 1c1ab8c..14d07b0 100644
--- a/lib/nova
+++ b/lib/nova
@@ -517,6 +517,12 @@
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
     iniset_rpc_backend nova $NOVA_CONF DEFAULT
     iniset $NOVA_CONF glance api_servers "$GLANCE_HOSTPORT"
+
+    if [ -n "$API_WORKERS" ]; then
+        iniset $NOVA_CONF DEFAULT osci_compute_workers "$API_WORKERS"
+        iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
+        iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
+    fi
 }
 
 function init_nova_cells {
diff --git a/lib/swift b/lib/swift
index 13f4307..50e2482 100644
--- a/lib/swift
+++ b/lib/swift
@@ -426,7 +426,7 @@
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object
+        generate_swift_config ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object
         iniset ${swift_node_config} filter:recon recon_cache_path  ${SWIFT_DATA_DIR}/cache
         # Using a sed and not iniset/iniuncomment because we want to a global
         # modification and make sure it works for new sections.
@@ -434,14 +434,14 @@
 
         swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] container
+        generate_swift_config ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
         iniuncomment ${swift_node_config} app:container-server allow_versions
         iniset ${swift_node_config} app:container-server allow_versions  "true"
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
 
         swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] account
+        generate_swift_config ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
     done
 
@@ -614,9 +614,9 @@
         swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
 
         for node_number in ${SWIFT_REPLICAS_SEQ}; do
-            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
-            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
-            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
+            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
         done
         swift-ring-builder object.builder rebalance
         swift-ring-builder container.builder rebalance
diff --git a/lib/tempest b/lib/tempest
index 2e8aa3e..933c059 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -294,6 +294,10 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
     iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+    iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions ${COMPUTE_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG compute-feature-disabled api_extensions ${DISABLE_COMPUTE_API_EXTENSIONS}
+    iniset $TEMPEST_CONFIG compute-feature-enabled api_v3_extensions ${COMPUTE_API_V3_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG compute-feature-disabled api_v3_extensions ${DISABLE_COMPUTE_API_V3_EXTENSIONS}
 
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
@@ -308,6 +312,8 @@
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled api_extensions ${NETWORK_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG network-feature-disabled api_extensions ${DISABLE_NETWORK_API_EXTENSIONS}
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
@@ -348,7 +354,13 @@
     # Once Tempest retires support for icehouse this flag can be removed.
     iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False"
 
+    # Object storage
+    iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis ${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG object-storage-feature-disabled discoverable_apis ${OBJECT_STORAGE_DISABLE_API_EXTENSIONS}
+
     # Volume
+    iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions ${VOLUME_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG volume-feature-disabled api_extensions ${DISABLE_VOLUME_API_EXTENSIONS}
     if ! is_service_enabled c-bak; then
         iniset $TEMPEST_CONFIG volume-feature-enabled backup False
     fi
@@ -371,9 +383,6 @@
     # cli
     iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR
 
-    # Networking
-    iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}"
-
     # Baremetal
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG baremetal driver_enabled True
diff --git a/lib/zaqar b/lib/zaqar
index 33f1092..43fb5a1 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -175,6 +175,7 @@
 
 # stop_zaqar() - Stop running processes
 function stop_zaqar {
+    local serv
     # Kill the zaqar screen windows
     for serv in zaqar-server; do
         screen -S $SCREEN_NAME -p $serv -X kill
@@ -182,18 +183,18 @@
 }
 
 function create_zaqar_accounts {
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
     ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    ZAQAR_USER=$(get_or_create_user "zaqar" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT)
-    get_or_add_user_role $ADMIN_ROLE $ZAQAR_USER $SERVICE_TENANT
+    local zaqar_user=$(get_or_create_user "zaqar" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $ADMIN_ROLE $zaqar_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        ZAQAR_SERVICE=$(get_or_create_service "zaqar" \
+        local zaqar_service=$(get_or_create_service "zaqar" \
             "queuing" "Zaqar Service")
-        get_or_create_endpoint $ZAQAR_SERVICE \
+        get_or_create_endpoint $zaqar_service \
             "$REGION_NAME" \
             "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
             "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
diff --git a/stack.sh b/stack.sh
index 77e71d8..68eac7c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -637,9 +637,9 @@
     if [[ $r -ne 0 ]]; then
         echo "Error on exit"
         if [[ -z $LOGDIR ]]; then
-            ./tools/worlddump.py
+            $TOP_DIR/tools/worlddump.py
         else
-            ./tools/worlddump.py -d $LOGDIR
+            $TOP_DIR/tools/worlddump.py -d $LOGDIR
         fi
     fi
 
diff --git a/stackrc b/stackrc
index e071132..53c8579 100644
--- a/stackrc
+++ b/stackrc
@@ -144,6 +144,9 @@
 GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
 GLANCE_BRANCH=${GLANCE_BRANCH:-master}
 
+GLANCE_STORE_REPO=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
+GLANCE_STORE_BRANCH=${GLANCE_STORE_BRANCH:-master}
+
 # python glance client library
 GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
 GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
@@ -505,6 +508,11 @@
 # Allow the use of an alternate protocol (such as https) for service endpoints
 SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
 
+# Sets the maximum number of workers for various services and can restrict
+# the memory used where there are a large number of CPUs present
+# (the default number of workers for many services is the number of CPUs)
+# API_WORKERS=4
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tox.ini b/tox.ini
index c8a603b..325adae 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,3 +11,6 @@
 deps = bashate
 whitelist_externals = bash
 commands = bash -c "find {toxinidir} -not -wholename \*.tox/\* -and \( -name \*.sh -or -name \*rc -or -name functions\* -or \( -wholename lib/\* -and -not -name \*.md \) \) -print0 | xargs -0 bashate -v"
+
+[testenv:docs]
+commands = python setup.py build_sphinx