Merge "Allow mongod service to launch on Fedora"
diff --git a/AUTHORS b/AUTHORS
index 35c0a52..718a760 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -13,6 +13,7 @@
 Devin Carlen <devin.carlen@gmail.com>
 Doug hellmann <doug.hellmann@dreamhost.com>
 Eddie Hebert <edhebert@gmail.com>
+Edgar Magana <emagana@gmail.com>
 Eoghan Glynn <eglynn@redhat.com>
 Eric Windisch <ewindisch@cloudscaling.com>
 Gabriel Hurley <gabriel@strikeawe.com>
diff --git a/README.md b/README.md
index a738554..483d1b0 100644
--- a/README.md
+++ b/README.md
@@ -85,21 +85,19 @@
 
 # Swift
 
-Swift is enabled by default configured with only one replica to avoid being IO/memory intensive on a small vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background.
+Swift is not installed by default, you can enable easily by adding this to your `localrc`:
 
-If you would like to disable Swift you can add this to your `localrc` :
-
-    disable_service s-proxy s-object s-container s-account
+    enable_service swift
 
 If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`:
 
     disable_all_services
-    enable_service key mysql s-proxy s-object s-container s-account
+    enable_service key mysql swift
 
-If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable `SWIFT_REPLICAS` in your `localrc` (usually to 3).
-
-# Swift S3
+If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against.
 
 If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
 
 Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
+
+By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`.
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 1e92500..894da74 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -136,7 +136,7 @@
 # Swift client
 # ------------
 
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
     if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
         STATUS_SWIFT="Skipped"
     else
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index dd8e56e..c84e84e 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -144,8 +144,7 @@
 # Swift client
 # ------------
 
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
     if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
         STATUS_SWIFT="Skipped"
     else
diff --git a/exercises/swift.sh b/exercises/swift.sh
index c4ec3e9..46ac2c5 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -35,7 +35,7 @@
 
 # If swift is not enabled we exit with exitcode 55 which mean
 # exercise is skipped.
-is_service_enabled s-proxy || exit 55
+is_service_enabled swift || exit 55
 
 # Container name
 CONTAINER=ex-swift
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index c67ade3..71007ba 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,2 +1,6 @@
 python-pymongo
 mongodb-server
+libnspr4-dev
+pkg-config
+libxml2-dev
+libxslt-dev
\ No newline at end of file
diff --git a/files/apts/general b/files/apts/general
index 0264066..a1fcf3c 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,6 +9,7 @@
 git
 lsof # useful when debugging
 openssh-server
+openssl
 vim-nox
 locate # useful when debugging
 python-virtualenv
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 2fc8915..a6fab09 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -52,7 +52,7 @@
 # Services
 # --------
 
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
     NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api.
@@ -123,8 +123,7 @@
 fi
 
 # Swift
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
     SWIFT_USER=$(get_id keystone user-create \
         --name=swift \
         --pass="$SERVICE_PASSWORD" \
@@ -168,9 +167,9 @@
         keystone endpoint-create \
             --region RegionOne \
             --service_id $CEILOMETER_SERVICE \
-            --publicurl "http://$SERVICE_HOST:8777/" \
-            --adminurl "http://$SERVICE_HOST:8777/" \
-            --internalurl "http://$SERVICE_HOST:8777/"
+            --publicurl "http://$SERVICE_HOST:8777" \
+            --adminurl "http://$SERVICE_HOST:8777" \
+            --internalurl "http://$SERVICE_HOST:8777"
     fi
 fi
 
@@ -191,7 +190,7 @@
 fi
 
 # S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         S3_SERVICE=$(get_id keystone service-create \
             --name=s3 \
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
index 00c9861..2b76372 100644
--- a/files/ldap/openstack.ldif
+++ b/files/ldap/openstack.ldif
@@ -20,6 +20,10 @@
 objectClass: organizationalUnit
 ou: Projects
 
+dn: ou=Domains,dc=openstack,dc=org
+objectClass: organizationalUnit
+ou: Domains
+
 dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
 objectClass: organizationalRole
 ou: _member_
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 8ed74ec..b8ceeb7 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -4,6 +4,7 @@
 git-core
 iputils
 openssh
+openssl
 psmisc
 python-cmd2 # dist:opensuse-12.3
 python-netaddr
diff --git a/files/rpms/general b/files/rpms/general
index e4f143d..fc3412b 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -3,6 +3,7 @@
 euca2ools # only for testing client
 git-core
 openssh-server
+openssl
 psmisc
 pylint
 python-netaddr
diff --git a/functions b/functions
index 9eecfc5..8cb703c 100644
--- a/functions
+++ b/functions
@@ -566,9 +566,6 @@
 #   **ceilometer** returns true if any service enabled start with **ceilometer**
 #   **glance** returns true if any service enabled start with **g-**
 #   **quantum** returns true if any service enabled start with **q-**
-#   **swift** returns true if any service enabled start with **s-**
-#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
-#   **s-** services will be enabled. This will be deprecated in the future.
 #
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
@@ -581,8 +578,6 @@
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
         [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
-        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
-        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
     done
     return 1
 }
diff --git a/lib/baremetal b/lib/baremetal
index 2659386..57048a1 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -63,7 +63,7 @@
 
 # sub-driver to use for remote power management
 # - nova.virt.baremetal.fake.FakePowerManager, for manual power control
-# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI
+# - nova.virt.baremetal.ipmi.IPMI, for remote IPMI
 # - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
 BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager}
 
@@ -258,9 +258,10 @@
     nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
             $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
     nova flavor-key $BM_FLAVOR_NAME set \
-            cpu_arch=$BM_FLAVOR_ARCH \
-            deploy_kernel_id=$aki \
-            deploy_ramdisk_id=$ari
+            "cpu_arch"="$BM_FLAVOR_ARCH" \
+            "baremetal:deploy_kernel_id"="$aki" \
+            "baremetal:deploy_ramdisk_id"="$ari"
+
 }
 
 # pull run-time kernel/ramdisk out of disk image and load into glance
diff --git a/lib/keystone b/lib/keystone
index eea2c4d..2580351 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -146,7 +146,7 @@
         cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
 
         # Add swift endpoints to service catalog if swift is enabled
-        if is_service_enabled s-proxy; then
+        if is_service_enabled swift; then
             echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
diff --git a/lib/quantum b/lib/quantum
index 3e41d8d..eebbabf 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -89,7 +89,7 @@
 # Meta data IP
 Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
 # Allow Overlapping IP among subnets
-Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False}
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
 # Use quantum-debug command
 Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
 # The name of the default q-l3 router
@@ -176,6 +176,11 @@
 # Please refer to lib/quantum_plugins/README.md for details.
 source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN
 
+# Agent loadbalancer service plugin functions
+# -------------------------------------------
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+
 # Entry Points
 # ------------
 
@@ -185,6 +190,10 @@
     _configure_quantum_common
     iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT
 
+    # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
+    if is_service_enabled q-lbaas; then
+        _configure_quantum_lbaas
+    fi
     if is_service_enabled q-svc; then
         _configure_quantum_service
     fi
@@ -362,6 +371,10 @@
     screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
     screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
     screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+
+    if is_service_enabled q-lbaas; then
+        screen_it q-lbaas "python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+    fi
 }
 
 # stop_quantum() - Stop running processes (non-screen)
@@ -483,6 +496,13 @@
     _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
 }
 
+function _configure_quantum_lbaas()
+{
+    quantum_agent_lbaas_install_agent_packages
+    quantum_agent_lbaas_configure_common
+    quantum_agent_lbaas_configure_agent
+}
+
 # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent
 # It is called when q-agt is enabled.
 function _configure_quantum_plugin_agent() {
@@ -512,6 +532,10 @@
     # Update either configuration file with plugin
     iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
 
+    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
+        iniset $QUANTUM_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
+    fi
+
     iniset $QUANTUM_CONF DEFAULT verbose True
     iniset $QUANTUM_CONF DEFAULT debug True
     iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
diff --git a/lib/quantum_plugins/agent_loadbalancer b/lib/quantum_plugins/agent_loadbalancer
new file mode 100644
index 0000000..a4d6dff
--- /dev/null
+++ b/lib/quantum_plugins/agent_loadbalancer
@@ -0,0 +1,45 @@
+# Quantum loadbalancer plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent"
+
+function quantum_agent_lbaas_install_agent_packages() {
+    if is_ubuntu || is_fedora; then
+        install_package haproxy
+    fi
+}
+
+function quantum_agent_lbaas_configure_common() {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES="quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+    else
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+    fi
+}
+
+function quantum_agent_lbaas_configure_agent() {
+    LBAAS_AGENT_CONF_PATH=/etc/quantum/plugins/services/agent_loadbalancer
+    mkdir -p $LBAAS_AGENT_CONF_PATH
+
+    LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
+
+    cp $QUANTUM_DIR/etc/lbaas_agent.ini /$LBAAS_AGENT_CONF_FILENAME
+
+    if [[ $Q_PLUGIN == 'linuxbridge' || $Q_PLUGIN == 'brocade' ]]; then
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.BridgeInterfaceDriver"
+    else
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.OVSInterfaceDriver"
+    fi
+
+    if is_fedora; then
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid
new file mode 100644
index 0000000..b49aa92
--- /dev/null
+++ b/lib/quantum_plugins/plumgrid
@@ -0,0 +1,37 @@
+# PLUMgrid Quantum Plugin
+# Edgar Magana emagana@plumgrid.com
+# ------------------------------------
+
+# Save trace settings
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+#source $TOP_DIR/lib/quantum_plugins/ovs_base
+
+function quantum_plugin_create_nova_conf() {
+
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+}
+
+function quantum_plugin_setup_interface_driver() {
+    :
+}
+
+function quantum_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/quantum/plugins/plumgrid
+    Q_PLUGIN_CONF_FILENAME=plumgrid.ini
+    Q_DB_NAME="plumgrid_quantum"
+    Q_PLUGIN_CLASS="quantum.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.QuantumPluginPLUMgridV2"
+}
+
+function quantum_plugin_configure_service() {
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server localhost
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port 7766
+}
+
+function quantum_plugin_configure_debug_command() {
+    :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/swift b/lib/swift
index 326c6f3..5ba7e56 100644
--- a/lib/swift
+++ b/lib/swift
@@ -56,11 +56,10 @@
 SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
 
 # Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
-# configured for your Swift cluster. By default we are configuring
-# only one replica since this is way less CPU and memory intensive. If
-# you are planning to test swift replication you may want to set this
-# up to 3.
-SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
+# configured for your Swift cluster.  By default the three replicas would need a
+# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
+# only some quick testing.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
 SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
 
 # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
@@ -360,26 +359,13 @@
         sudo systemctl start xinetd.service
     fi
 
-   # By default with only one replica we are launching the proxy,
-   # container, account and object server in screen in foreground and
-   # other services in background. If we have SWIFT_REPLICAS set to something
-   # greater than one we first spawn all the swift services then kill the proxy
-   # service so we can run it in foreground in screen.  ``swift-init ...
-   # {stop|restart}`` exits with '1' if no servers are running, ignore it just
-   # in case
+   # First spawn all the swift services then kill the
+   # proxy service so we can run it in foreground in screen.
+   # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
+   # ignore it just in case
    swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
-   if [[ ${SWIFT_REPLICAS} == 1 ]];then
-        todo="object container account"
-   fi
-   for type in proxy ${todo};do
-       swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
-   done
-   screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
-   if [[ ${SWIFT_REPLICAS} == 1 ]];then
-       for type in object container account;do
-           screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONFIG_DIR}/${type}-server/1.conf -v"
-       done
-   fi
+   swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
+   screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
 }
 
 # stop_swift() - Stop running processes (non-screen)
diff --git a/stack.sh b/stack.sh
index d9fbb94..0f5401a 100755
--- a/stack.sh
+++ b/stack.sh
@@ -193,9 +193,7 @@
 
 # Create the destination directory and ensure it is writable by the user
 sudo mkdir -p $DEST
-if [ ! -w $DEST ]; then
-    sudo chown $STACK_USER $DEST
-fi
+sudo chown -R $STACK_USER $DEST
 
 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
 # Internet access. ``stack.sh`` must have been previously run with Internet
@@ -210,7 +208,7 @@
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 sudo mkdir -p $DATA_DIR
-sudo chown $STACK_USER $DATA_DIR
+sudo chown -R $STACK_USER $DATA_DIR
 
 
 # Common Configuration
@@ -427,7 +425,7 @@
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 fi
 
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     # If we are using swift3, we can default the s3 port to swift instead
     # of nova-objectstore
     if is_service_enabled swift3;then
@@ -664,12 +662,12 @@
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
 
 # glance, swift middleware and nova api needs keystone middleware
-if is_service_enabled key g-api n-api s-proxy; then
+if is_service_enabled key g-api n-api swift; then
     # unified auth system (manages accounts/tokens)
     install_keystone
 fi
 
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     install_swiftclient
     install_swift
     if is_service_enabled swift3; then
@@ -726,10 +724,10 @@
 configure_keystoneclient
 configure_novaclient
 setup_develop $OPENSTACKCLIENT_DIR
-if is_service_enabled key g-api n-api s-proxy; then
+if is_service_enabled key g-api n-api swift; then
     configure_keystone
 fi
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     configure_swift
     configure_swiftclient
     if is_service_enabled swift3; then
@@ -913,7 +911,7 @@
     init_glance
 
     # Store the images in swift if enabled.
-    if is_service_enabled s-proxy; then
+    if is_service_enabled swift; then
         iniset $GLANCE_API_CONF DEFAULT default_store swift
         iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
         iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
@@ -972,7 +970,7 @@
 # Storage Service
 # ---------------
 
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     echo_summary "Configuring Swift"
     init_swift
 fi
@@ -1078,9 +1076,8 @@
         iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver
         iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
         iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager
-        # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485
-        #             As a work around, we disable CCFilter by explicitly enabling all the other default filters.
-        iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter
+        iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+        iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
         iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH
         iniset $NOVA_CONF baremetal driver $BM_DRIVER
         iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER
@@ -1119,7 +1116,7 @@
 # Only run the services specified in ``ENABLED_SERVICES``
 
 # Launch Swift Services
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     echo_summary "Starting Swift"
     start_swift
 fi
diff --git a/stackrc b/stackrc
index f2c279f..008bc9c 100644
--- a/stackrc
+++ b/stackrc
@@ -21,7 +21,7 @@
 # ``disable_service`` functions in ``localrc``.
 # For example, to enable Swift add this to ``localrc``:
 # enable_service swift
-ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,s-proxy,s-account,s-object,s-container,horizon,rabbit,tempest,mysql
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
 
 # Set the default Nova APIs to enable
 NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
diff --git a/unstack.sh b/unstack.sh
index 3ac2985..a086d5c 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,7 +63,7 @@
 fi
 
 # Swift runs daemons
-if is_service_enabled s-proxy; then
+if is_service_enabled swift; then
     stop_swift
     cleanup_swift
 fi